hugetlb: fix a hugepage reservation check for MAP_SHARED
[sfrench/cifs-2.6.git] / mm / hugetlb.c
index a2d29b84501f104227244b1a0fd9bad30da7467f..8c20aed62b9c350715db24004fa953cf5ebbd515 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/mempolicy.h>
 #include <linux/cpuset.h>
 #include <linux/mutex.h>
+#include <linux/bootmem.h>
+#include <linux/sysfs.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include "internal.h"
 
 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
-static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
-static unsigned long surplus_huge_pages;
-static unsigned long nr_overcommit_huge_pages;
-unsigned long max_huge_pages;
-unsigned long sysctl_overcommit_huge_pages;
-static struct list_head hugepage_freelists[MAX_NUMNODES];
-static unsigned int nr_huge_pages_node[MAX_NUMNODES];
-static unsigned int free_huge_pages_node[MAX_NUMNODES];
-static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
 unsigned long hugepages_treat_as_movable;
-static int hugetlb_next_nid;
+
+static int max_hstate;
+unsigned int default_hstate_idx;
+struct hstate hstates[HUGE_MAX_HSTATE];
+
+__initdata LIST_HEAD(huge_boot_pages);
+
+/* for command line parsing */
+static struct hstate * __initdata parsed_hstate;
+static unsigned long __initdata default_hstate_max_huge_pages;
+static unsigned long __initdata default_hstate_size;
+
+#define for_each_hstate(h) \
+       for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
 
 /*
  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  */
 static DEFINE_SPINLOCK(hugetlb_lock);
 
-#define HPAGE_RESV_OWNER    (1UL << (BITS_PER_LONG - 1))
-#define HPAGE_RESV_UNMAPPED (1UL << (BITS_PER_LONG - 2))
+/*
+ * Region tracking -- allows tracking of reservations and instantiated pages
+ *                    across the pages in a mapping.
+ *
+ * The region data structures are protected by a combination of the mmap_sem
+ * and the hugetlb_instantion_mutex.  To access or modify a region the caller
+ * must either hold the mmap_sem for write, or the mmap_sem for read and
+ * the hugetlb_instantiation mutex:
+ *
+ *     down_write(&mm->mmap_sem);
+ * or
+ *     down_read(&mm->mmap_sem);
+ *     mutex_lock(&hugetlb_instantiation_mutex);
+ */
+struct file_region {
+       struct list_head link;
+       long from;
+       long to;
+};
+
+static long region_add(struct list_head *head, long f, long t)
+{
+       struct file_region *rg, *nrg, *trg;
+
+       /* Locate the region we are either in or before. */
+       list_for_each_entry(rg, head, link)
+               if (f <= rg->to)
+                       break;
+
+       /* Round our left edge to the current segment if it encloses us. */
+       if (f > rg->from)
+               f = rg->from;
+
+       /* Check for and consume any regions we now overlap with. */
+       nrg = rg;
+       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+               if (&rg->link == head)
+                       break;
+               if (rg->from > t)
+                       break;
+
+               /* If this area reaches higher then extend our area to
+                * include it completely.  If this is not the first area
+                * which we intend to reuse, free it. */
+               if (rg->to > t)
+                       t = rg->to;
+               if (rg != nrg) {
+                       list_del(&rg->link);
+                       kfree(rg);
+               }
+       }
+       nrg->from = f;
+       nrg->to = t;
+       return 0;
+}
+
+static long region_chg(struct list_head *head, long f, long t)
+{
+       struct file_region *rg, *nrg;
+       long chg = 0;
+
+       /* Locate the region we are before or in. */
+       list_for_each_entry(rg, head, link)
+               if (f <= rg->to)
+                       break;
+
+       /* If we are below the current region then a new region is required.
+        * Subtle, allocate a new region at the position but make it zero
+        * size such that we can guarantee to record the reservation. */
+       if (&rg->link == head || t < rg->from) {
+               nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
+               if (!nrg)
+                       return -ENOMEM;
+               nrg->from = f;
+               nrg->to   = f;
+               INIT_LIST_HEAD(&nrg->link);
+               list_add(&nrg->link, rg->link.prev);
+
+               return t - f;
+       }
+
+       /* Round our left edge to the current segment if it encloses us. */
+       if (f > rg->from)
+               f = rg->from;
+       chg = t - f;
+
+       /* Check for and consume any regions we now overlap with. */
+       list_for_each_entry(rg, rg->link.prev, link) {
+               if (&rg->link == head)
+                       break;
+               if (rg->from > t)
+                       return chg;
+
+               /* We overlap with this area, if it extends futher than
+                * us then we must extend ourselves.  Account for its
+                * existing reservation. */
+               if (rg->to > t) {
+                       chg += rg->to - t;
+                       t = rg->to;
+               }
+               chg -= rg->to - rg->from;
+       }
+       return chg;
+}
+
+static long region_truncate(struct list_head *head, long end)
+{
+       struct file_region *rg, *trg;
+       long chg = 0;
+
+       /* Locate the region we are either in or before. */
+       list_for_each_entry(rg, head, link)
+               if (end <= rg->to)
+                       break;
+       if (&rg->link == head)
+               return 0;
+
+       /* If we are in the middle of a region then adjust it. */
+       if (end > rg->from) {
+               chg = rg->to - end;
+               rg->to = end;
+               rg = list_entry(rg->link.next, typeof(*rg), link);
+       }
+
+       /* Drop any remaining regions. */
+       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
+               if (&rg->link == head)
+                       break;
+               chg += rg->to - rg->from;
+               list_del(&rg->link);
+               kfree(rg);
+       }
+       return chg;
+}
+
+static long region_count(struct list_head *head, long f, long t)
+{
+       struct file_region *rg;
+       long chg = 0;
+
+       /* Locate each segment we overlap with, and count that overlap. */
+       list_for_each_entry(rg, head, link) {
+               int seg_from;
+               int seg_to;
+
+               if (rg->to <= f)
+                       continue;
+               if (rg->from >= t)
+                       break;
+
+               seg_from = max(rg->from, f);
+               seg_to = min(rg->to, t);
+
+               chg += seg_to - seg_from;
+       }
+
+       return chg;
+}
+
+/*
+ * Convert the address within this vma to the page offset within
+ * the mapping, in pagecache page units; huge pages here.
+ */
+static pgoff_t vma_hugecache_offset(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long address)
+{
+       return ((address - vma->vm_start) >> huge_page_shift(h)) +
+                       (vma->vm_pgoff >> huge_page_order(h));
+}
+
+/*
+ * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
+ * bits of the reservation map pointer, which are always clear due to
+ * alignment.
+ */
+#define HPAGE_RESV_OWNER    (1UL << 0)
+#define HPAGE_RESV_UNMAPPED (1UL << 1)
 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
+
 /*
  * These helpers are used to track how many pages are reserved for
  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
@@ -52,58 +234,102 @@ static DEFINE_SPINLOCK(hugetlb_lock);
  * the reserve counters are updated with the hugetlb_lock held. It is safe
  * to reset the VMA at fork() time as it is not in use yet and there is no
  * chance of the global counters getting corrupted as a result of the values.
+ *
+ * The private mapping reservation is represented in a subtly different
+ * manner to a shared mapping.  A shared mapping has a region map associated
+ * with the underlying file, this region map represents the backing file
+ * pages which have ever had a reservation assigned which this persists even
+ * after the page is instantiated.  A private mapping has a region map
+ * associated with the original mmap which is attached to all VMAs which
+ * reference it, this region map represents those offsets which have consumed
+ * reservation ie. where pages have been instantiated.
  */
-static unsigned long vma_resv_huge_pages(struct vm_area_struct *vma)
+static unsigned long get_vma_private_data(struct vm_area_struct *vma)
+{
+       return (unsigned long)vma->vm_private_data;
+}
+
+static void set_vma_private_data(struct vm_area_struct *vma,
+                                                       unsigned long value)
+{
+       vma->vm_private_data = (void *)value;
+}
+
+struct resv_map {
+       struct kref refs;
+       struct list_head regions;
+};
+
+struct resv_map *resv_map_alloc(void)
+{
+       struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
+       if (!resv_map)
+               return NULL;
+
+       kref_init(&resv_map->refs);
+       INIT_LIST_HEAD(&resv_map->regions);
+
+       return resv_map;
+}
+
+void resv_map_release(struct kref *ref)
+{
+       struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
+
+       /* Clear out any active regions before we release the map. */
+       region_truncate(&resv_map->regions, 0);
+       kfree(resv_map);
+}
+
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
        if (!(vma->vm_flags & VM_SHARED))
-               return (unsigned long)vma->vm_private_data & ~HPAGE_RESV_MASK;
+               return (struct resv_map *)(get_vma_private_data(vma) &
+                                                       ~HPAGE_RESV_MASK);
        return 0;
 }
 
-static void set_vma_resv_huge_pages(struct vm_area_struct *vma,
-                                                       unsigned long reserve)
+static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 {
-       unsigned long flags;
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
        VM_BUG_ON(vma->vm_flags & VM_SHARED);
 
-       flags = (unsigned long)vma->vm_private_data & HPAGE_RESV_MASK;
-       vma->vm_private_data = (void *)(reserve | flags);
+       set_vma_private_data(vma, (get_vma_private_data(vma) &
+                               HPAGE_RESV_MASK) | (unsigned long)map);
 }
 
 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 {
-       unsigned long reserveflags = (unsigned long)vma->vm_private_data;
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       vma->vm_private_data = (void *)(reserveflags | flags);
+       VM_BUG_ON(vma->vm_flags & VM_SHARED);
+
+       set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 }
 
 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 {
        VM_BUG_ON(!is_vm_hugetlb_page(vma));
-       return ((unsigned long)vma->vm_private_data & flag) != 0;
+
+       return (get_vma_private_data(vma) & flag) != 0;
 }
 
 /* Decrement the reserved pages in the hugepage pool by one */
-static void decrement_hugepage_resv_vma(struct vm_area_struct *vma)
+static void decrement_hugepage_resv_vma(struct hstate *h,
+                       struct vm_area_struct *vma)
 {
+       if (vma->vm_flags & VM_NORESERVE)
+               return;
+
        if (vma->vm_flags & VM_SHARED) {
                /* Shared mappings always use reserves */
-               resv_huge_pages--;
-       } else {
+               h->resv_huge_pages--;
+       } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
                /*
                 * Only the process that called mmap() has reserves for
                 * private mappings.
                 */
-               if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-                       unsigned long flags, reserve;
-                       resv_huge_pages--;
-                       flags = (unsigned long)vma->vm_private_data &
-                                                       HPAGE_RESV_MASK;
-                       reserve = (unsigned long)vma->vm_private_data - 1;
-                       vma->vm_private_data = (void *)(reserve | flags);
-               }
+               h->resv_huge_pages--;
        }
 }
 
@@ -116,21 +342,22 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 }
 
 /* Returns true if the VMA has associated reserve pages */
-static int vma_has_private_reserves(struct vm_area_struct *vma)
+static int vma_has_reserves(struct vm_area_struct *vma)
 {
        if (vma->vm_flags & VM_SHARED)
-               return 0;
-       if (!vma_resv_huge_pages(vma))
-               return 0;
-       return 1;
+               return 1;
+       if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+               return 1;
+       return 0;
 }
 
-static void clear_huge_page(struct page *page, unsigned long addr)
+static void clear_huge_page(struct page *page,
+                       unsigned long addr, unsigned long sz)
 {
        int i;
 
        might_sleep();
-       for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
+       for (i = 0; i < sz/PAGE_SIZE; i++) {
                cond_resched();
                clear_user_highpage(page + i, addr + i * PAGE_SIZE);
        }
@@ -140,41 +367,43 @@ static void copy_huge_page(struct page *dst, struct page *src,
                           unsigned long addr, struct vm_area_struct *vma)
 {
        int i;
+       struct hstate *h = hstate_vma(vma);
 
        might_sleep();
-       for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
+       for (i = 0; i < pages_per_huge_page(h); i++) {
                cond_resched();
                copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
        }
 }
 
-static void enqueue_huge_page(struct page *page)
+static void enqueue_huge_page(struct hstate *h, struct page *page)
 {
        int nid = page_to_nid(page);
-       list_add(&page->lru, &hugepage_freelists[nid]);
-       free_huge_pages++;
-       free_huge_pages_node[nid]++;
+       list_add(&page->lru, &h->hugepage_freelists[nid]);
+       h->free_huge_pages++;
+       h->free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(void)
+static struct page *dequeue_huge_page(struct hstate *h)
 {
        int nid;
        struct page *page = NULL;
 
        for (nid = 0; nid < MAX_NUMNODES; ++nid) {
-               if (!list_empty(&hugepage_freelists[nid])) {
-                       page = list_entry(hugepage_freelists[nid].next,
+               if (!list_empty(&h->hugepage_freelists[nid])) {
+                       page = list_entry(h->hugepage_freelists[nid].next,
                                          struct page, lru);
                        list_del(&page->lru);
-                       free_huge_pages--;
-                       free_huge_pages_node[nid]--;
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[nid]--;
                        break;
                }
        }
        return page;
 }
 
-static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
+static struct page *dequeue_huge_page_vma(struct hstate *h,
+                               struct vm_area_struct *vma,
                                unsigned long address, int avoid_reserve)
 {
        int nid;
@@ -191,27 +420,27 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
         * have no page reserves. This check ensures that reservations are
         * not "stolen". The child may still get SIGKILLed
         */
-       if (!vma_has_private_reserves(vma) &&
-                       free_huge_pages - resv_huge_pages == 0)
+       if (!vma_has_reserves(vma) &&
+                       h->free_huge_pages - h->resv_huge_pages == 0)
                return NULL;
 
        /* If reserves cannot be used, ensure enough pages are in the pool */
-       if (avoid_reserve && free_huge_pages - resv_huge_pages == 0)
+       if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
                return NULL;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                                MAX_NR_ZONES - 1, nodemask) {
                nid = zone_to_nid(zone);
                if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
-                   !list_empty(&hugepage_freelists[nid])) {
-                       page = list_entry(hugepage_freelists[nid].next,
+                   !list_empty(&h->hugepage_freelists[nid])) {
+                       page = list_entry(h->hugepage_freelists[nid].next,
                                          struct page, lru);
                        list_del(&page->lru);
-                       free_huge_pages--;
-                       free_huge_pages_node[nid]--;
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[nid]--;
 
                        if (!avoid_reserve)
-                               decrement_hugepage_resv_vma(vma);
+                               decrement_hugepage_resv_vma(h, vma);
 
                        break;
                }
@@ -220,12 +449,13 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
        return page;
 }
 
-static void update_and_free_page(struct page *page)
+static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
-       nr_huge_pages--;
-       nr_huge_pages_node[page_to_nid(page)]--;
-       for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
+
+       h->nr_huge_pages--;
+       h->nr_huge_pages_node[page_to_nid(page)]--;
+       for (i = 0; i < pages_per_huge_page(h); i++) {
                page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
                                1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
                                1 << PG_private | 1<< PG_writeback);
@@ -233,11 +463,27 @@ static void update_and_free_page(struct page *page)
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
        arch_release_hugepage(page);
-       __free_pages(page, HUGETLB_PAGE_ORDER);
+       __free_pages(page, huge_page_order(h));
+}
+
+struct hstate *size_to_hstate(unsigned long size)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               if (huge_page_size(h) == size)
+                       return h;
+       }
+       return NULL;
 }
 
 static void free_huge_page(struct page *page)
 {
+       /*
+        * Can't pass hstate in here because it is called from the
+        * compound page destructor.
+        */
+       struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
        struct address_space *mapping;
 
@@ -247,12 +493,12 @@ static void free_huge_page(struct page *page)
        INIT_LIST_HEAD(&page->lru);
 
        spin_lock(&hugetlb_lock);
-       if (surplus_huge_pages_node[nid]) {
-               update_and_free_page(page);
-               surplus_huge_pages--;
-               surplus_huge_pages_node[nid]--;
+       if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
+               update_and_free_page(h, page);
+               h->surplus_huge_pages--;
+               h->surplus_huge_pages_node[nid]--;
        } else {
-               enqueue_huge_page(page);
+               enqueue_huge_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
        if (mapping)
@@ -264,7 +510,7 @@ static void free_huge_page(struct page *page)
  * balanced by operating on them in a round-robin fashion.
  * Returns 1 if an adjustment was made.
  */
-static int adjust_pool_surplus(int delta)
+static int adjust_pool_surplus(struct hstate *h, int delta)
 {
        static int prev_nid;
        int nid = prev_nid;
@@ -277,15 +523,15 @@ static int adjust_pool_surplus(int delta)
                        nid = first_node(node_online_map);
 
                /* To shrink on this node, there must be a surplus page */
-               if (delta < 0 && !surplus_huge_pages_node[nid])
+               if (delta < 0 && !h->surplus_huge_pages_node[nid])
                        continue;
                /* Surplus cannot exceed the total number of pages */
-               if (delta > 0 && surplus_huge_pages_node[nid] >=
-                                               nr_huge_pages_node[nid])
+               if (delta > 0 && h->surplus_huge_pages_node[nid] >=
+                                               h->nr_huge_pages_node[nid])
                        continue;
 
-               surplus_huge_pages += delta;
-               surplus_huge_pages_node[nid] += delta;
+               h->surplus_huge_pages += delta;
+               h->surplus_huge_pages_node[nid] += delta;
                ret = 1;
                break;
        } while (nid != prev_nid);
@@ -294,59 +540,74 @@ static int adjust_pool_surplus(int delta)
        return ret;
 }
 
-static struct page *alloc_fresh_huge_page_node(int nid)
+static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+{
+       set_compound_page_dtor(page, free_huge_page);
+       spin_lock(&hugetlb_lock);
+       h->nr_huge_pages++;
+       h->nr_huge_pages_node[nid]++;
+       spin_unlock(&hugetlb_lock);
+       put_page(page); /* free it into the hugepage allocator */
+}
+
+static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
 
+       if (h->order >= MAX_ORDER)
+               return NULL;
+
        page = alloc_pages_node(nid,
                htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
                                                __GFP_REPEAT|__GFP_NOWARN,
-               HUGETLB_PAGE_ORDER);
+               huge_page_order(h));
        if (page) {
                if (arch_prepare_hugepage(page)) {
                        __free_pages(page, HUGETLB_PAGE_ORDER);
                        return NULL;
                }
-               set_compound_page_dtor(page, free_huge_page);
-               spin_lock(&hugetlb_lock);
-               nr_huge_pages++;
-               nr_huge_pages_node[nid]++;
-               spin_unlock(&hugetlb_lock);
-               put_page(page); /* free it into the hugepage allocator */
+               prep_new_huge_page(h, page, nid);
        }
 
        return page;
 }
 
-static int alloc_fresh_huge_page(void)
+/*
+ * Use a helper variable to find the next node and then
+ * copy it back to hugetlb_next_nid afterwards:
+ * otherwise there's a window in which a racer might
+ * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+ * But we don't need to use a spin_lock here: it really
+ * doesn't matter if occasionally a racer chooses the
+ * same nid as we do.  Move nid forward in the mask even
+ * if we just successfully allocated a hugepage so that
+ * the next caller gets hugepages on the next node.
+ */
+static int hstate_next_node(struct hstate *h)
+{
+       int next_nid;
+       next_nid = next_node(h->hugetlb_next_nid, node_online_map);
+       if (next_nid == MAX_NUMNODES)
+               next_nid = first_node(node_online_map);
+       h->hugetlb_next_nid = next_nid;
+       return next_nid;
+}
+
+static int alloc_fresh_huge_page(struct hstate *h)
 {
        struct page *page;
        int start_nid;
        int next_nid;
        int ret = 0;
 
-       start_nid = hugetlb_next_nid;
+       start_nid = h->hugetlb_next_nid;
 
        do {
-               page = alloc_fresh_huge_page_node(hugetlb_next_nid);
+               page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
                if (page)
                        ret = 1;
-               /*
-                * Use a helper variable to find the next node and then
-                * copy it back to hugetlb_next_nid afterwards:
-                * otherwise there's a window in which a racer might
-                * pass invalid nid MAX_NUMNODES to alloc_pages_node.
-                * But we don't need to use a spin_lock here: it really
-                * doesn't matter if occasionally a racer chooses the
-                * same nid as we do.  Move nid forward in the mask even
-                * if we just successfully allocated a hugepage so that
-                * the next caller gets hugepages on the next node.
-                */
-               next_nid = next_node(hugetlb_next_nid, node_online_map);
-               if (next_nid == MAX_NUMNODES)
-                       next_nid = first_node(node_online_map);
-               hugetlb_next_nid = next_nid;
-       } while (!page && hugetlb_next_nid != start_nid);
+               next_nid = hstate_next_node(h);
+       } while (!page && h->hugetlb_next_nid != start_nid);
 
        if (ret)
                count_vm_event(HTLB_BUDDY_PGALLOC);
@@ -356,12 +617,15 @@ static int alloc_fresh_huge_page(void)
        return ret;
 }
 
-static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
-                                               unsigned long address)
+static struct page *alloc_buddy_huge_page(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long address)
 {
        struct page *page;
        unsigned int nid;
 
+       if (h->order >= MAX_ORDER)
+               return NULL;
+
        /*
         * Assume we will successfully allocate the surplus page to
         * prevent racing processes from causing the surplus to exceed
@@ -386,18 +650,18 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
         * per-node value is checked there.
         */
        spin_lock(&hugetlb_lock);
-       if (surplus_huge_pages >= nr_overcommit_huge_pages) {
+       if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
                spin_unlock(&hugetlb_lock);
                return NULL;
        } else {
-               nr_huge_pages++;
-               surplus_huge_pages++;
+               h->nr_huge_pages++;
+               h->surplus_huge_pages++;
        }
        spin_unlock(&hugetlb_lock);
 
        page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
                                        __GFP_REPEAT|__GFP_NOWARN,
-                                       HUGETLB_PAGE_ORDER);
+                                       huge_page_order(h));
 
        spin_lock(&hugetlb_lock);
        if (page) {
@@ -412,12 +676,12 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
                /*
                 * We incremented the global counters already
                 */
-               nr_huge_pages_node[nid]++;
-               surplus_huge_pages_node[nid]++;
+               h->nr_huge_pages_node[nid]++;
+               h->surplus_huge_pages_node[nid]++;
                __count_vm_event(HTLB_BUDDY_PGALLOC);
        } else {
-               nr_huge_pages--;
-               surplus_huge_pages--;
+               h->nr_huge_pages--;
+               h->surplus_huge_pages--;
                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
        }
        spin_unlock(&hugetlb_lock);
@@ -429,16 +693,16 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
  * Increase the hugetlb pool such that it can accomodate a reservation
  * of size 'delta'.
  */
-static int gather_surplus_pages(int delta)
+static int gather_surplus_pages(struct hstate *h, int delta)
 {
        struct list_head surplus_list;
        struct page *page, *tmp;
        int ret, i;
        int needed, allocated;
 
-       needed = (resv_huge_pages + delta) - free_huge_pages;
+       needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
        if (needed <= 0) {
-               resv_huge_pages += delta;
+               h->resv_huge_pages += delta;
                return 0;
        }
 
@@ -449,7 +713,7 @@ static int gather_surplus_pages(int delta)
 retry:
        spin_unlock(&hugetlb_lock);
        for (i = 0; i < needed; i++) {
-               page = alloc_buddy_huge_page(NULL, 0);
+               page = alloc_buddy_huge_page(h, NULL, 0);
                if (!page) {
                        /*
                         * We were not able to allocate enough pages to
@@ -470,7 +734,8 @@ retry:
         * because either resv_huge_pages or free_huge_pages may have changed.
         */
        spin_lock(&hugetlb_lock);
-       needed = (resv_huge_pages + delta) - (free_huge_pages + allocated);
+       needed = (h->resv_huge_pages + delta) -
+                       (h->free_huge_pages + allocated);
        if (needed > 0)
                goto retry;
 
@@ -483,7 +748,7 @@ retry:
         * before they are reserved.
         */
        needed += allocated;
-       resv_huge_pages += delta;
+       h->resv_huge_pages += delta;
        ret = 0;
 free:
        /* Free the needed pages to the hugetlb pool */
@@ -491,7 +756,7 @@ free:
                if ((--needed) < 0)
                        break;
                list_del(&page->lru);
-               enqueue_huge_page(page);
+               enqueue_huge_page(h, page);
        }
 
        /* Free unnecessary surplus pages to the buddy allocator */
@@ -519,7 +784,8 @@ free:
  * allocated to satisfy the reservation must be explicitly freed if they were
  * never used.
  */
-static void return_unused_surplus_pages(unsigned long unused_resv_pages)
+static void return_unused_surplus_pages(struct hstate *h,
+                                       unsigned long unused_resv_pages)
 {
        static int nid = -1;
        struct page *page;
@@ -534,59 +800,119 @@ static void return_unused_surplus_pages(unsigned long unused_resv_pages)
        unsigned long remaining_iterations = num_online_nodes();
 
        /* Uncommit the reservation */
-       resv_huge_pages -= unused_resv_pages;
+       h->resv_huge_pages -= unused_resv_pages;
+
+       /* Cannot return gigantic pages currently */
+       if (h->order >= MAX_ORDER)
+               return;
 
-       nr_pages = min(unused_resv_pages, surplus_huge_pages);
+       nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 
        while (remaining_iterations-- && nr_pages) {
                nid = next_node(nid, node_online_map);
                if (nid == MAX_NUMNODES)
                        nid = first_node(node_online_map);
 
-               if (!surplus_huge_pages_node[nid])
+               if (!h->surplus_huge_pages_node[nid])
                        continue;
 
-               if (!list_empty(&hugepage_freelists[nid])) {
-                       page = list_entry(hugepage_freelists[nid].next,
+               if (!list_empty(&h->hugepage_freelists[nid])) {
+                       page = list_entry(h->hugepage_freelists[nid].next,
                                          struct page, lru);
                        list_del(&page->lru);
-                       update_and_free_page(page);
-                       free_huge_pages--;
-                       free_huge_pages_node[nid]--;
-                       surplus_huge_pages--;
-                       surplus_huge_pages_node[nid]--;
+                       update_and_free_page(h, page);
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[nid]--;
+                       h->surplus_huge_pages--;
+                       h->surplus_huge_pages_node[nid]--;
                        nr_pages--;
                        remaining_iterations = num_online_nodes();
                }
        }
 }
 
+/*
+ * Determine if the huge page at addr within the vma has an associated
+ * reservation.  Where it does not we will need to logically increase
+ * reservation and actually increase quota before an allocation can occur.
+ * Where any new reservation would be required the reservation change is
+ * prepared, but not committed.  Once the page has been quota'd allocated
+ * an instantiated the change should be committed via vma_commit_reservation.
+ * No action is required on failure.
+ */
+static int vma_needs_reservation(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long addr)
+{
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct inode *inode = mapping->host;
+
+       if (vma->vm_flags & VM_SHARED) {
+               pgoff_t idx = vma_hugecache_offset(h, vma, addr);
+               return region_chg(&inode->i_mapping->private_list,
+                                                       idx, idx + 1);
+
+       } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               return 1;
+
+       } else  {
+               int err;
+               pgoff_t idx = vma_hugecache_offset(h, vma, addr);
+               struct resv_map *reservations = vma_resv_map(vma);
+
+               err = region_chg(&reservations->regions, idx, idx + 1);
+               if (err < 0)
+                       return err;
+               return 0;
+       }
+}
+static void vma_commit_reservation(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long addr)
+{
+       struct address_space *mapping = vma->vm_file->f_mapping;
+       struct inode *inode = mapping->host;
+
+       if (vma->vm_flags & VM_SHARED) {
+               pgoff_t idx = vma_hugecache_offset(h, vma, addr);
+               region_add(&inode->i_mapping->private_list, idx, idx + 1);
+
+       } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+               pgoff_t idx = vma_hugecache_offset(h, vma, addr);
+               struct resv_map *reservations = vma_resv_map(vma);
+
+               /* Mark this page used in the map. */
+               region_add(&reservations->regions, idx, idx + 1);
+       }
+}
+
 static struct page *alloc_huge_page(struct vm_area_struct *vma,
                                    unsigned long addr, int avoid_reserve)
 {
+       struct hstate *h = hstate_vma(vma);
        struct page *page;
        struct address_space *mapping = vma->vm_file->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned int chg = 0;
+       unsigned int chg;
 
        /*
         * Processes that did not create the mapping will have no reserves and
         * will not have accounted against quota. Check that the quota can be
         * made before satisfying the allocation
+        * MAP_NORESERVE mappings may also need pages and quota allocated
+        * if no reserve mapping overlaps.
         */
-       if (!(vma->vm_flags & VM_SHARED) &&
-                       !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
-               chg = 1;
+       chg = vma_needs_reservation(h, vma, addr);
+       if (chg < 0)
+               return ERR_PTR(chg);
+       if (chg)
                if (hugetlb_get_quota(inode->i_mapping, chg))
                        return ERR_PTR(-ENOSPC);
-       }
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page_vma(vma, addr, avoid_reserve);
+       page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
        spin_unlock(&hugetlb_lock);
 
        if (!page) {
-               page = alloc_buddy_huge_page(vma, addr);
+               page = alloc_buddy_huge_page(h, vma, addr);
                if (!page) {
                        hugetlb_put_quota(inode->i_mapping, chg);
                        return ERR_PTR(-VM_FAULT_OOM);
@@ -596,38 +922,458 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        set_page_refcounted(page);
        set_page_private(page, (unsigned long) mapping);
 
+       vma_commit_reservation(h, vma, addr);
+
        return page;
 }
 
-static int __init hugetlb_init(void)
+__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
+{
+       struct huge_bootmem_page *m;
+       int nr_nodes = nodes_weight(node_online_map);
+
+       while (nr_nodes) {
+               void *addr;
+
+               addr = __alloc_bootmem_node_nopanic(
+                               NODE_DATA(h->hugetlb_next_nid),
+                               huge_page_size(h), huge_page_size(h), 0);
+
+               if (addr) {
+                       /*
+                        * Use the beginning of the huge page to store the
+                        * huge_bootmem_page struct (until gather_bootmem
+                        * puts them into the mem_map).
+                        */
+                       m = addr;
+                       if (m)
+                               goto found;
+               }
+               hstate_next_node(h);
+               nr_nodes--;
+       }
+       return 0;
+
+found:
+       BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
+       /* Put them into a private list first because mem_map is not up yet */
+       list_add(&m->list, &huge_boot_pages);
+       m->hstate = h;
+       return 1;
+}
+
+/* Put bootmem huge pages into the standard lists after mem_map is up */
+static void __init gather_bootmem_prealloc(void)
+{
+       struct huge_bootmem_page *m;
+
+       list_for_each_entry(m, &huge_boot_pages, list) {
+               struct page *page = virt_to_page(m);
+               struct hstate *h = m->hstate;
+               __ClearPageReserved(page);
+               WARN_ON(page_count(page) != 1);
+               prep_compound_page(page, h->order);
+               prep_new_huge_page(h, page, page_to_nid(page));
+       }
+}
+
+static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
+{
+       unsigned long i;
+
+       for (i = 0; i < h->max_huge_pages; ++i) {
+               if (h->order >= MAX_ORDER) {
+                       if (!alloc_bootmem_huge_page(h))
+                               break;
+               } else if (!alloc_fresh_huge_page(h))
+                       break;
+       }
+       h->max_huge_pages = i;
+}
+
+static void __init hugetlb_init_hstates(void)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               /* oversize hugepages were init'ed in early boot */
+               if (h->order < MAX_ORDER)
+                       hugetlb_hstate_alloc_pages(h);
+       }
+}
+
+static char * __init memfmt(char *buf, unsigned long n)
+{
+       if (n >= (1UL << 30))
+               sprintf(buf, "%lu GB", n >> 30);
+       else if (n >= (1UL << 20))
+               sprintf(buf, "%lu MB", n >> 20);
+       else
+               sprintf(buf, "%lu KB", n >> 10);
+       return buf;
+}
+
+static void __init report_hugepages(void)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               char buf[32];
+               printk(KERN_INFO "HugeTLB registered %s page size, "
+                                "pre-allocated %ld pages\n",
+                       memfmt(buf, huge_page_size(h)),
+                       h->free_huge_pages);
+       }
+}
+
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_HIGHMEM
+static void try_to_free_low(struct hstate *h, unsigned long count)
+{
+       int i;
+
+       if (h->order >= MAX_ORDER)
+               return;
+
+       for (i = 0; i < MAX_NUMNODES; ++i) {
+               struct page *page, *next;
+               struct list_head *freel = &h->hugepage_freelists[i];
+               list_for_each_entry_safe(page, next, freel, lru) {
+                       if (count >= h->nr_huge_pages)
+                               return;
+                       if (PageHighMem(page))
+                               continue;
+                       list_del(&page->lru);
+                       update_and_free_page(h, page);
+                       h->free_huge_pages--;
+                       h->free_huge_pages_node[page_to_nid(page)]--;
+               }
+       }
+}
+#else
+static inline void try_to_free_low(struct hstate *h, unsigned long count)
+{
+}
+#endif
+
+#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
+static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
+{
+       unsigned long min_count, ret;
+
+       if (h->order >= MAX_ORDER)
+               return h->max_huge_pages;
+
+       /*
+        * Increase the pool size
+        * First take pages out of surplus state.  Then make up the
+        * remaining difference by allocating fresh huge pages.
+        *
+        * We might race with alloc_buddy_huge_page() here and be unable
+        * to convert a surplus huge page to a normal huge page. That is
+        * not critical, though, it just means the overall size of the
+        * pool might be one hugepage larger than it needs to be, but
+        * within all the constraints specified by the sysctls.
+        */
+       spin_lock(&hugetlb_lock);
+       while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
+               if (!adjust_pool_surplus(h, -1))
+                       break;
+       }
+
+       while (count > persistent_huge_pages(h)) {
+               /*
+                * If this allocation races such that we no longer need the
+                * page, free_huge_page will handle it by freeing the page
+                * and reducing the surplus.
+                */
+               spin_unlock(&hugetlb_lock);
+               ret = alloc_fresh_huge_page(h);
+               spin_lock(&hugetlb_lock);
+               if (!ret)
+                       goto out;
+
+       }
+
+       /*
+        * Decrease the pool size
+        * First return free pages to the buddy allocator (being careful
+        * to keep enough around to satisfy reservations).  Then place
+        * pages into surplus state as needed so the pool will shrink
+        * to the desired size as pages become free.
+        *
+        * By placing pages into the surplus state independent of the
+        * overcommit value, we are allowing the surplus pool size to
+        * exceed overcommit. There are few sane options here. Since
+        * alloc_buddy_huge_page() is checking the global counter,
+        * though, we'll note that we're not allowed to exceed surplus
+        * and won't grow the pool anywhere else. Not until one of the
+        * sysctls are changed, or the surplus pages go out of use.
+        */
+       min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
+       min_count = max(count, min_count);
+       try_to_free_low(h, min_count);
+       while (min_count < persistent_huge_pages(h)) {
+               struct page *page = dequeue_huge_page(h);
+               if (!page)
+                       break;
+               update_and_free_page(h, page);
+       }
+       while (count < persistent_huge_pages(h)) {
+               if (!adjust_pool_surplus(h, 1))
+                       break;
+       }
+out:
+       ret = persistent_huge_pages(h);
+       spin_unlock(&hugetlb_lock);
+       return ret;
+}
+
+#define HSTATE_ATTR_RO(_name) \
+       static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define HSTATE_ATTR(_name) \
+       static struct kobj_attribute _name##_attr = \
+               __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static struct kobject *hugepages_kobj;
+static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
+
+static struct hstate *kobj_to_hstate(struct kobject *kobj)
+{
+       int i;
+       for (i = 0; i < HUGE_MAX_HSTATE; i++)
+               if (hstate_kobjs[i] == kobj)
+                       return &hstates[i];
+       BUG();
+       return NULL;
+}
+
+static ssize_t nr_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->nr_huge_pages);
+}
+static ssize_t nr_hugepages_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int err;
+       unsigned long input;
+       struct hstate *h = kobj_to_hstate(kobj);
+
+       err = strict_strtoul(buf, 10, &input);
+       if (err)
+               return 0;
+
+       h->max_huge_pages = set_max_huge_pages(h, input);
+
+       return count;
+}
+HSTATE_ATTR(nr_hugepages);
+
+static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
+}
+static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int err;
+       unsigned long input;
+       struct hstate *h = kobj_to_hstate(kobj);
+
+       err = strict_strtoul(buf, 10, &input);
+       if (err)
+               return 0;
+
+       spin_lock(&hugetlb_lock);
+       h->nr_overcommit_huge_pages = input;
+       spin_unlock(&hugetlb_lock);
+
+       return count;
+}
+HSTATE_ATTR(nr_overcommit_hugepages);
+
+static ssize_t free_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->free_huge_pages);
+}
+HSTATE_ATTR_RO(free_hugepages);
+
+static ssize_t resv_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->resv_huge_pages);
+}
+HSTATE_ATTR_RO(resv_hugepages);
+
+static ssize_t surplus_hugepages_show(struct kobject *kobj,
+                                       struct kobj_attribute *attr, char *buf)
+{
+       struct hstate *h = kobj_to_hstate(kobj);
+       return sprintf(buf, "%lu\n", h->surplus_huge_pages);
+}
+HSTATE_ATTR_RO(surplus_hugepages);
+
+static struct attribute *hstate_attrs[] = {
+       &nr_hugepages_attr.attr,
+       &nr_overcommit_hugepages_attr.attr,
+       &free_hugepages_attr.attr,
+       &resv_hugepages_attr.attr,
+       &surplus_hugepages_attr.attr,
+       NULL,
+};
+
+static struct attribute_group hstate_attr_group = {
+       .attrs = hstate_attrs,
+};
+
+static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
+{
+       int retval;
+
+       hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
+                                                       hugepages_kobj);
+       if (!hstate_kobjs[h - hstates])
+               return -ENOMEM;
+
+       retval = sysfs_create_group(hstate_kobjs[h - hstates],
+                                                       &hstate_attr_group);
+       if (retval)
+               kobject_put(hstate_kobjs[h - hstates]);
+
+       return retval;
+}
+
+static void __init hugetlb_sysfs_init(void)
+{
+       struct hstate *h;
+       int err;
+
+       hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
+       if (!hugepages_kobj)
+               return;
+
+       for_each_hstate(h) {
+               err = hugetlb_sysfs_add_hstate(h);
+               if (err)
+                       printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
+                                                               h->name);
+       }
+}
+
+static void __exit hugetlb_exit(void)
+{
+       struct hstate *h;
+
+       for_each_hstate(h) {
+               kobject_put(hstate_kobjs[h - hstates]);
+       }
+
+       kobject_put(hugepages_kobj);
+}
+module_exit(hugetlb_exit);
+
+static int __init hugetlb_init(void)
+{
+       BUILD_BUG_ON(HPAGE_SHIFT == 0);
+
+       if (!size_to_hstate(default_hstate_size)) {
+               default_hstate_size = HPAGE_SIZE;
+               if (!size_to_hstate(default_hstate_size))
+                       hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
+       }
+       default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
+       if (default_hstate_max_huge_pages)
+               default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+
+       hugetlb_init_hstates();
+
+       gather_bootmem_prealloc();
+
+       report_hugepages();
+
+       hugetlb_sysfs_init();
+
+       return 0;
+}
+module_init(hugetlb_init);
+
+/* Should be called on processing a hugepagesz=... option */
+void __init hugetlb_add_hstate(unsigned order)
 {
+       struct hstate *h;
        unsigned long i;
 
-       if (HPAGE_SHIFT == 0)
-               return 0;
-
+       if (size_to_hstate(PAGE_SIZE << order)) {
+               printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
+               return;
+       }
+       BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
+       BUG_ON(order == 0);
+       h = &hstates[max_hstate++];
+       h->order = order;
+       h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+       h->nr_huge_pages = 0;
+       h->free_huge_pages = 0;
        for (i = 0; i < MAX_NUMNODES; ++i)
-               INIT_LIST_HEAD(&hugepage_freelists[i]);
+               INIT_LIST_HEAD(&h->hugepage_freelists[i]);
+       h->hugetlb_next_nid = first_node(node_online_map);
+       snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
+                                       huge_page_size(h)/1024);
 
-       hugetlb_next_nid = first_node(node_online_map);
+       parsed_hstate = h;
+}
 
-       for (i = 0; i < max_huge_pages; ++i) {
-               if (!alloc_fresh_huge_page())
-                       break;
+static int __init hugetlb_nrpages_setup(char *s)
+{
+       unsigned long *mhp;
+       static unsigned long *last_mhp;
+
+       /*
+        * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
+        * so this hugepages= parameter goes to the "default hstate".
+        */
+       if (!max_hstate)
+               mhp = &default_hstate_max_huge_pages;
+       else
+               mhp = &parsed_hstate->max_huge_pages;
+
+       if (mhp == last_mhp) {
+               printk(KERN_WARNING "hugepages= specified twice without "
+                       "interleaving hugepagesz=, ignoring\n");
+               return 1;
        }
-       max_huge_pages = free_huge_pages = nr_huge_pages = i;
-       printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
-       return 0;
+
+       if (sscanf(s, "%lu", mhp) <= 0)
+               *mhp = 0;
+
+       /*
+        * Global state is always initialized later in hugetlb_init.
+        * But we need to allocate >= MAX_ORDER hstates here early to still
+        * use the bootmem allocator.
+        */
+       if (max_hstate && parsed_hstate->order >= MAX_ORDER)
+               hugetlb_hstate_alloc_pages(parsed_hstate);
+
+       last_mhp = mhp;
+
+       return 1;
 }
-module_init(hugetlb_init);
+__setup("hugepages=", hugetlb_nrpages_setup);
 
-static int __init hugetlb_setup(char *s)
+static int __init hugetlb_default_setup(char *s)
 {
-       if (sscanf(s, "%lu", &max_huge_pages) <= 0)
-               max_huge_pages = 0;
+       default_hstate_size = memparse(s, &s);
        return 1;
 }
-__setup("hugepages=", hugetlb_setup);
+__setup("default_hugepagesz=", hugetlb_default_setup);
 
 static unsigned int cpuset_mems_nr(unsigned int *array)
 {
@@ -640,108 +1386,23 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
        return nr;
 }
 
-#ifdef CONFIG_SYSCTL
-#ifdef CONFIG_HIGHMEM
-static void try_to_free_low(unsigned long count)
-{
-       int i;
-
-       for (i = 0; i < MAX_NUMNODES; ++i) {
-               struct page *page, *next;
-               list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
-                       if (count >= nr_huge_pages)
-                               return;
-                       if (PageHighMem(page))
-                               continue;
-                       list_del(&page->lru);
-                       update_and_free_page(page);
-                       free_huge_pages--;
-                       free_huge_pages_node[page_to_nid(page)]--;
-               }
-       }
-}
-#else
-static inline void try_to_free_low(unsigned long count)
-{
-}
-#endif
-
-#define persistent_huge_pages (nr_huge_pages - surplus_huge_pages)
-static unsigned long set_max_huge_pages(unsigned long count)
-{
-       unsigned long min_count, ret;
-
-       /*
-        * Increase the pool size
-        * First take pages out of surplus state.  Then make up the
-        * remaining difference by allocating fresh huge pages.
-        *
-        * We might race with alloc_buddy_huge_page() here and be unable
-        * to convert a surplus huge page to a normal huge page. That is
-        * not critical, though, it just means the overall size of the
-        * pool might be one hugepage larger than it needs to be, but
-        * within all the constraints specified by the sysctls.
-        */
-       spin_lock(&hugetlb_lock);
-       while (surplus_huge_pages && count > persistent_huge_pages) {
-               if (!adjust_pool_surplus(-1))
-                       break;
-       }
-
-       while (count > persistent_huge_pages) {
-               /*
-                * If this allocation races such that we no longer need the
-                * page, free_huge_page will handle it by freeing the page
-                * and reducing the surplus.
-                */
-               spin_unlock(&hugetlb_lock);
-               ret = alloc_fresh_huge_page();
-               spin_lock(&hugetlb_lock);
-               if (!ret)
-                       goto out;
-
-       }
-
-       /*
-        * Decrease the pool size
-        * First return free pages to the buddy allocator (being careful
-        * to keep enough around to satisfy reservations).  Then place
-        * pages into surplus state as needed so the pool will shrink
-        * to the desired size as pages become free.
-        *
-        * By placing pages into the surplus state independent of the
-        * overcommit value, we are allowing the surplus pool size to
-        * exceed overcommit. There are few sane options here. Since
-        * alloc_buddy_huge_page() is checking the global counter,
-        * though, we'll note that we're not allowed to exceed surplus
-        * and won't grow the pool anywhere else. Not until one of the
-        * sysctls are changed, or the surplus pages go out of use.
-        */
-       min_count = resv_huge_pages + nr_huge_pages - free_huge_pages;
-       min_count = max(count, min_count);
-       try_to_free_low(min_count);
-       while (min_count < persistent_huge_pages) {
-               struct page *page = dequeue_huge_page();
-               if (!page)
-                       break;
-               update_and_free_page(page);
-       }
-       while (count < persistent_huge_pages) {
-               if (!adjust_pool_surplus(1))
-                       break;
-       }
-out:
-       ret = persistent_huge_pages;
-       spin_unlock(&hugetlb_lock);
-       return ret;
-}
-
 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
                           struct file *file, void __user *buffer,
                           size_t *length, loff_t *ppos)
 {
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+
+       if (!write)
+               tmp = h->max_huge_pages;
+
+       table->data = &tmp;
+       table->maxlen = sizeof(unsigned long);
        proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
-       max_huge_pages = set_max_huge_pages(max_huge_pages);
+
+       if (write)
+               h->max_huge_pages = set_max_huge_pages(h, tmp);
+
        return 0;
 }
 
@@ -761,10 +1422,22 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
                        struct file *file, void __user *buffer,
                        size_t *length, loff_t *ppos)
 {
+       struct hstate *h = &default_hstate;
+       unsigned long tmp;
+
+       if (!write)
+               tmp = h->nr_overcommit_huge_pages;
+
+       table->data = &tmp;
+       table->maxlen = sizeof(unsigned long);
        proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
-       spin_lock(&hugetlb_lock);
-       nr_overcommit_huge_pages = sysctl_overcommit_huge_pages;
-       spin_unlock(&hugetlb_lock);
+
+       if (write) {
+               spin_lock(&hugetlb_lock);
+               h->nr_overcommit_huge_pages = tmp;
+               spin_unlock(&hugetlb_lock);
+       }
+
        return 0;
 }
 
@@ -772,37 +1445,40 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
 
 int hugetlb_report_meminfo(char *buf)
 {
+       struct hstate *h = &default_hstate;
        return sprintf(buf,
                        "HugePages_Total: %5lu\n"
                        "HugePages_Free:  %5lu\n"
                        "HugePages_Rsvd:  %5lu\n"
                        "HugePages_Surp:  %5lu\n"
                        "Hugepagesize:    %5lu kB\n",
-                       nr_huge_pages,
-                       free_huge_pages,
-                       resv_huge_pages,
-                       surplus_huge_pages,
-                       HPAGE_SIZE/1024);
+                       h->nr_huge_pages,
+                       h->free_huge_pages,
+                       h->resv_huge_pages,
+                       h->surplus_huge_pages,
+                       1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
 }
 
 int hugetlb_report_node_meminfo(int nid, char *buf)
 {
+       struct hstate *h = &default_hstate;
        return sprintf(buf,
                "Node %d HugePages_Total: %5u\n"
                "Node %d HugePages_Free:  %5u\n"
                "Node %d HugePages_Surp:  %5u\n",
-               nid, nr_huge_pages_node[nid],
-               nid, free_huge_pages_node[nid],
-               nid, surplus_huge_pages_node[nid]);
+               nid, h->nr_huge_pages_node[nid],
+               nid, h->free_huge_pages_node[nid],
+               nid, h->surplus_huge_pages_node[nid]);
 }
 
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
 unsigned long hugetlb_total_pages(void)
 {
-       return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
+       struct hstate *h = &default_hstate;
+       return h->nr_huge_pages * pages_per_huge_page(h);
 }
 
-static int hugetlb_acct_memory(long delta)
+static int hugetlb_acct_memory(struct hstate *h, long delta)
 {
        int ret = -ENOMEM;
 
@@ -825,29 +1501,60 @@ static int hugetlb_acct_memory(long delta)
         * semantics that cpuset has.
         */
        if (delta > 0) {
-               if (gather_surplus_pages(delta) < 0)
+               if (gather_surplus_pages(h, delta) < 0)
                        goto out;
 
-               if (delta > cpuset_mems_nr(free_huge_pages_node)) {
-                       return_unused_surplus_pages(delta);
+               if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
+                       return_unused_surplus_pages(h, delta);
                        goto out;
                }
        }
 
        ret = 0;
        if (delta < 0)
-               return_unused_surplus_pages((unsigned long) -delta);
+               return_unused_surplus_pages(h, (unsigned long) -delta);
 
 out:
        spin_unlock(&hugetlb_lock);
        return ret;
 }
 
+static void hugetlb_vm_op_open(struct vm_area_struct *vma)
+{
+       struct resv_map *reservations = vma_resv_map(vma);
+
+       /*
+        * This new VMA should share its siblings reservation map if present.
+        * The VMA will only ever have a valid reservation map pointer where
+        * it is being copied for another still existing VMA.  As that VMA
+        * has a reference to the reservation map it cannot dissappear until
+        * after this open call completes.  It is therefore safe to take a
+        * new reference here without additional locking.
+        */
+       if (reservations)
+               kref_get(&reservations->refs);
+}
+
 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
 {
-       unsigned long reserve = vma_resv_huge_pages(vma);
-       if (reserve)
-               hugetlb_acct_memory(-reserve);
+       struct hstate *h = hstate_vma(vma);
+       struct resv_map *reservations = vma_resv_map(vma);
+       unsigned long reserve;
+       unsigned long start;
+       unsigned long end;
+
+       if (reservations) {
+               start = vma_hugecache_offset(h, vma, vma->vm_start);
+               end = vma_hugecache_offset(h, vma, vma->vm_end);
+
+               reserve = (end - start) -
+                       region_count(&reservations->regions, start, end);
+
+               kref_put(&reservations->refs, resv_map_release);
+
+               if (reserve)
+                       hugetlb_acct_memory(h, -reserve);
+       }
 }
 
 /*
@@ -864,6 +1571,7 @@ static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
 struct vm_operations_struct hugetlb_vm_ops = {
        .fault = hugetlb_vm_op_fault,
+       .open = hugetlb_vm_op_open,
        .close = hugetlb_vm_op_close,
 };
 
@@ -903,14 +1611,16 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        struct page *ptepage;
        unsigned long addr;
        int cow;
+       struct hstate *h = hstate_vma(vma);
+       unsigned long sz = huge_page_size(h);
 
        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 
-       for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
+       for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
                src_pte = huge_pte_offset(src, addr);
                if (!src_pte)
                        continue;
-               dst_pte = huge_pte_alloc(dst, addr);
+               dst_pte = huge_pte_alloc(dst, addr, sz);
                if (!dst_pte)
                        goto nomem;
 
@@ -946,6 +1656,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        pte_t pte;
        struct page *page;
        struct page *tmp;
+       struct hstate *h = hstate_vma(vma);
+       unsigned long sz = huge_page_size(h);
+
        /*
         * A page gathering list, protected by per file i_mmap_lock. The
         * lock is used to avoid list corruption from multiple unmapping
@@ -954,11 +1667,11 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        LIST_HEAD(page_list);
 
        WARN_ON(!is_vm_hugetlb_page(vma));
-       BUG_ON(start & ~HPAGE_MASK);
-       BUG_ON(end & ~HPAGE_MASK);
+       BUG_ON(start & ~huge_page_mask(h));
+       BUG_ON(end & ~huge_page_mask(h));
 
        spin_lock(&mm->page_table_lock);
-       for (address = start; address < end; address += HPAGE_SIZE) {
+       for (address = start; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
@@ -1007,19 +1720,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                          unsigned long end, struct page *ref_page)
 {
-       /*
-        * It is undesirable to test vma->vm_file as it should be non-null
-        * for valid hugetlb area. However, vm_file will be NULL in the error
-        * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
-        * do_mmap_pgoff() nullifies vma->vm_file before calling this function
-        * to clean up. Since no pte has actually been setup, it is safe to
-        * do nothing in this case.
-        */
-       if (vma->vm_file) {
-               spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
-               __unmap_hugepage_range(vma, start, end, ref_page);
-               spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
-       }
+       spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+       __unmap_hugepage_range(vma, start, end, ref_page);
+       spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
 }
 
 /*
@@ -1072,6 +1775,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte,
                        struct page *pagecache_page)
 {
+       struct hstate *h = hstate_vma(vma);
        struct page *old_page, *new_page;
        int avoidcopy;
        int outside_reserve = 0;
@@ -1132,7 +1836,7 @@ retry_avoidcopy:
        __SetPageUptodate(new_page);
        spin_lock(&mm->page_table_lock);
 
-       ptep = huge_pte_offset(mm, address & HPAGE_MASK);
+       ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
                huge_ptep_clear_flush(vma, address, ptep);
@@ -1147,15 +1851,14 @@ retry_avoidcopy:
 }
 
 /* Return the pagecache page at a given address within a VMA */
-static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
-                       unsigned long address)
+static struct page *hugetlbfs_pagecache_page(struct hstate *h,
+                       struct vm_area_struct *vma, unsigned long address)
 {
        struct address_space *mapping;
-       unsigned long idx;
+       pgoff_t idx;
 
        mapping = vma->vm_file->f_mapping;
-       idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
-               + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+       idx = vma_hugecache_offset(h, vma, address);
 
        return find_lock_page(mapping, idx);
 }
@@ -1163,8 +1866,9 @@ static struct page *hugetlbfs_pagecache_page(struct vm_area_struct *vma,
 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, int write_access)
 {
+       struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
-       unsigned long idx;
+       pgoff_t idx;
        unsigned long size;
        struct page *page;
        struct address_space *mapping;
@@ -1183,8 +1887,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        mapping = vma->vm_file->f_mapping;
-       idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
-               + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
+       idx = vma_hugecache_offset(h, vma, address);
 
        /*
         * Use page lock to guard against racing truncation
@@ -1193,7 +1896,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
 retry:
        page = find_lock_page(mapping, idx);
        if (!page) {
-               size = i_size_read(mapping->host) >> HPAGE_SHIFT;
+               size = i_size_read(mapping->host) >> huge_page_shift(h);
                if (idx >= size)
                        goto out;
                page = alloc_huge_page(vma, address, 0);
@@ -1201,7 +1904,7 @@ retry:
                        ret = -PTR_ERR(page);
                        goto out;
                }
-               clear_huge_page(page, address);
+               clear_huge_page(page, address, huge_page_size(h));
                __SetPageUptodate(page);
 
                if (vma->vm_flags & VM_SHARED) {
@@ -1217,14 +1920,14 @@ retry:
                        }
 
                        spin_lock(&inode->i_lock);
-                       inode->i_blocks += BLOCKS_PER_HUGEPAGE;
+                       inode->i_blocks += blocks_per_huge_page(h);
                        spin_unlock(&inode->i_lock);
                } else
                        lock_page(page);
        }
 
        spin_lock(&mm->page_table_lock);
-       size = i_size_read(mapping->host) >> HPAGE_SHIFT;
+       size = i_size_read(mapping->host) >> huge_page_shift(h);
        if (idx >= size)
                goto backout;
 
@@ -1260,8 +1963,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_t entry;
        int ret;
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
+       struct hstate *h = hstate_vma(vma);
 
-       ptep = huge_pte_alloc(mm, address);
+       ptep = huge_pte_alloc(mm, address, huge_page_size(h));
        if (!ptep)
                return VM_FAULT_OOM;
 
@@ -1285,7 +1989,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (likely(pte_same(entry, huge_ptep_get(ptep))))
                if (write_access && !pte_write(entry)) {
                        struct page *page;
-                       page = hugetlbfs_pagecache_page(vma, address);
+                       page = hugetlbfs_pagecache_page(h, vma, address);
                        ret = hugetlb_cow(mm, vma, address, ptep, entry, page);
                        if (page) {
                                unlock_page(page);
@@ -1298,6 +2002,15 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        return ret;
 }
 
+/* Can be overriden by architectures */
+__attribute__((weak)) struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+              pud_t *pud, int write)
+{
+       BUG();
+       return NULL;
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        struct page **pages, struct vm_area_struct **vmas,
                        unsigned long *position, int *length, int i,
@@ -1306,6 +2019,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long pfn_offset;
        unsigned long vaddr = *position;
        int remainder = *length;
+       struct hstate *h = hstate_vma(vma);
 
        spin_lock(&mm->page_table_lock);
        while (vaddr < vma->vm_end && remainder) {
@@ -1317,7 +2031,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * each hugepage.  We have to make * sure we get the
                 * first, for the page indexing below to work.
                 */
-               pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
+               pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
 
                if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
                    (write && !pte_write(huge_ptep_get(pte)))) {
@@ -1335,7 +2049,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        break;
                }
 
-               pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
+               pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
                page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
@@ -1351,7 +2065,7 @@ same_page:
                --remainder;
                ++i;
                if (vaddr < vma->vm_end && remainder &&
-                               pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
+                               pfn_offset < pages_per_huge_page(h)) {
                        /*
                         * We use pfn_offset to avoid touching the pageframes
                         * of this compound page.
@@ -1373,13 +2087,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        unsigned long start = address;
        pte_t *ptep;
        pte_t pte;
+       struct hstate *h = hstate_vma(vma);
 
        BUG_ON(address >= end);
        flush_cache_range(vma, address, end);
 
        spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
        spin_lock(&mm->page_table_lock);
-       for (; address < end; address += HPAGE_SIZE) {
+       for (; address < end; address += huge_page_size(h)) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
@@ -1397,132 +2112,15 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
        flush_tlb_range(vma, start, end);
 }
 
-struct file_region {
-       struct list_head link;
-       long from;
-       long to;
-};
-
-static long region_add(struct list_head *head, long f, long t)
-{
-       struct file_region *rg, *nrg, *trg;
-
-       /* Locate the region we are either in or before. */
-       list_for_each_entry(rg, head, link)
-               if (f <= rg->to)
-                       break;
-
-       /* Round our left edge to the current segment if it encloses us. */
-       if (f > rg->from)
-               f = rg->from;
-
-       /* Check for and consume any regions we now overlap with. */
-       nrg = rg;
-       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-               if (&rg->link == head)
-                       break;
-               if (rg->from > t)
-                       break;
-
-               /* If this area reaches higher then extend our area to
-                * include it completely.  If this is not the first area
-                * which we intend to reuse, free it. */
-               if (rg->to > t)
-                       t = rg->to;
-               if (rg != nrg) {
-                       list_del(&rg->link);
-                       kfree(rg);
-               }
-       }
-       nrg->from = f;
-       nrg->to = t;
-       return 0;
-}
-
-static long region_chg(struct list_head *head, long f, long t)
-{
-       struct file_region *rg, *nrg;
-       long chg = 0;
-
-       /* Locate the region we are before or in. */
-       list_for_each_entry(rg, head, link)
-               if (f <= rg->to)
-                       break;
-
-       /* If we are below the current region then a new region is required.
-        * Subtle, allocate a new region at the position but make it zero
-        * size such that we can guarantee to record the reservation. */
-       if (&rg->link == head || t < rg->from) {
-               nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
-               if (!nrg)
-                       return -ENOMEM;
-               nrg->from = f;
-               nrg->to   = f;
-               INIT_LIST_HEAD(&nrg->link);
-               list_add(&nrg->link, rg->link.prev);
-
-               return t - f;
-       }
-
-       /* Round our left edge to the current segment if it encloses us. */
-       if (f > rg->from)
-               f = rg->from;
-       chg = t - f;
-
-       /* Check for and consume any regions we now overlap with. */
-       list_for_each_entry(rg, rg->link.prev, link) {
-               if (&rg->link == head)
-                       break;
-               if (rg->from > t)
-                       return chg;
-
-               /* We overlap with this area, if it extends futher than
-                * us then we must extend ourselves.  Account for its
-                * existing reservation. */
-               if (rg->to > t) {
-                       chg += rg->to - t;
-                       t = rg->to;
-               }
-               chg -= rg->to - rg->from;
-       }
-       return chg;
-}
-
-static long region_truncate(struct list_head *head, long end)
-{
-       struct file_region *rg, *trg;
-       long chg = 0;
-
-       /* Locate the region we are either in or before. */
-       list_for_each_entry(rg, head, link)
-               if (end <= rg->to)
-                       break;
-       if (&rg->link == head)
-               return 0;
-
-       /* If we are in the middle of a region then adjust it. */
-       if (end > rg->from) {
-               chg = rg->to - end;
-               rg->to = end;
-               rg = list_entry(rg->link.next, typeof(*rg), link);
-       }
-
-       /* Drop any remaining regions. */
-       list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
-               if (&rg->link == head)
-                       break;
-               chg += rg->to - rg->from;
-               list_del(&rg->link);
-               kfree(rg);
-       }
-       return chg;
-}
-
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma)
 {
        long ret, chg;
+       struct hstate *h = hstate_inode(inode);
+
+       if (vma && vma->vm_flags & VM_NORESERVE)
+               return 0;
 
        /*
         * Shared mappings base their reservation on the number of pages that
@@ -1533,8 +2131,13 @@ int hugetlb_reserve_pages(struct inode *inode,
        if (!vma || vma->vm_flags & VM_SHARED)
                chg = region_chg(&inode->i_mapping->private_list, from, to);
        else {
+               struct resv_map *resv_map = resv_map_alloc();
+               if (!resv_map)
+                       return -ENOMEM;
+
                chg = to - from;
-               set_vma_resv_huge_pages(vma, chg);
+
+               set_vma_resv_map(vma, resv_map);
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
@@ -1543,7 +2146,7 @@ int hugetlb_reserve_pages(struct inode *inode,
 
        if (hugetlb_get_quota(inode->i_mapping, chg))
                return -ENOSPC;
-       ret = hugetlb_acct_memory(chg);
+       ret = hugetlb_acct_memory(h, chg);
        if (ret < 0) {
                hugetlb_put_quota(inode->i_mapping, chg);
                return ret;
@@ -1555,12 +2158,13 @@ int hugetlb_reserve_pages(struct inode *inode,
 
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
 {
+       struct hstate *h = hstate_inode(inode);
        long chg = region_truncate(&inode->i_mapping->private_list, offset);
 
        spin_lock(&inode->i_lock);
-       inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
+       inode->i_blocks -= blocks_per_huge_page(h);
        spin_unlock(&inode->i_lock);
 
        hugetlb_put_quota(inode->i_mapping, (chg - freed));
-       hugetlb_acct_memory(-(chg - freed));
+       hugetlb_acct_memory(h, -(chg - freed));
 }