ASoC: SOF: core: harden shutdown helper
[sfrench/cifs-2.6.git] / mm / hugetlb.c
index 905a7d549b00f6099abb69682266eb49745b6700..8fb42c6dd74bf2bbb9a07f35d5c025bf633e2baf 100644 (file)
@@ -79,34 +79,29 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
-static inline bool PageHugeFreed(struct page *head)
-{
-       return page_private(head + 4) == -1UL;
-}
+/* Forward declaration */
+static int hugetlb_acct_memory(struct hstate *h, long delta);
 
-static inline void SetPageHugeFreed(struct page *head)
+static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
-       set_page_private(head + 4, -1UL);
-}
+       if (spool->count)
+               return false;
+       if (spool->max_hpages != -1)
+               return spool->used_hpages == 0;
+       if (spool->min_hpages != -1)
+               return spool->rsv_hpages == spool->min_hpages;
 
-static inline void ClearPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, 0);
+       return true;
 }
 
-/* Forward declaration */
-static int hugetlb_acct_memory(struct hstate *h, long delta);
-
 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
 {
-       bool free = (spool->count == 0) && (spool->used_hpages == 0);
-
        spin_unlock(&spool->lock);
 
        /* If no pages are used, and no other handles to the subpool
         * remain, give up any reservations based on minimum size and
         * free the subpool */
-       if (free) {
+       if (subpool_is_free(spool)) {
                if (spool->min_hpages != -1)
                        hugetlb_acct_memory(spool->hstate,
                                                -spool->min_hpages);
@@ -1043,7 +1038,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
-       SetPageHugeFreed(page);
+       SetHPageFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1060,7 +1055,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
-               ClearPageHugeFreed(page);
+               ClearHPageFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1133,7 +1128,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
-               SetPagePrivate(page);
+               SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
        }
 
@@ -1224,8 +1219,7 @@ static void destroy_compound_gigantic_page(struct page *page,
        struct page *p = page + 1;
 
        atomic_set(compound_mapcount_ptr(page), 0);
-       if (hpage_pincount_available(page))
-               atomic_set(compound_pincount_ptr(page), 0);
+       atomic_set(compound_pincount_ptr(page), 0);
 
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                clear_compound_head(p);
@@ -1312,14 +1306,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
 static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
+       struct page *subpage = page;
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+       for (i = 0; i < pages_per_huge_page(h);
+            i++, subpage = mem_map_next(subpage, page, i)) {
+               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
                                1 << PG_active | 1 << PG_private |
                                1 << PG_writeback);
@@ -1353,52 +1349,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
- * to hstate->hugepage_activelist.)
- *
- * This function can be called for tail pages, but never returns true for them.
- */
-bool page_huge_active(struct page *page)
-{
-       return PageHeadHuge(page) && PagePrivate(&page[1]);
-}
-
-/* never called for tail page */
-void set_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       SetPagePrivate(&page[1]);
-}
-
-static void clear_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       ClearPagePrivate(&page[1]);
-}
-
-/*
- * Internal hugetlb specific page flag. Do not use outside of the hugetlb
- * code
- */
-static inline bool PageHugeTemporary(struct page *page)
-{
-       if (!PageHuge(page))
-               return false;
-
-       return (unsigned long)page[2].mapping == -1U;
-}
-
-static inline void SetPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = (void *)-1U;
-}
-
-static inline void ClearPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = NULL;
-}
-
 static void __free_huge_page(struct page *page)
 {
        /*
@@ -1407,24 +1357,23 @@ static void __free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct hugepage_subpool *spool =
-               (struct hugepage_subpool *)page_private(page);
+       struct hugepage_subpool *spool = hugetlb_page_subpool(page);
        bool restore_reserve;
 
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
 
-       set_page_private(page, 0);
+       hugetlb_set_page_subpool(page, NULL);
        page->mapping = NULL;
-       restore_reserve = PagePrivate(page);
-       ClearPagePrivate(page);
+       restore_reserve = HPageRestoreReserve(page);
+       ClearHPageRestoreReserve(page);
 
        /*
-        * If PagePrivate() was set on page, page allocation consumed a
+        * If HPageRestoreReserve was set on page, page allocation consumed a
         * reservation.  If the page was associated with a subpool, there
         * would have been a page reserved in the subpool before allocation
         * via hugepage_subpool_get_pages().  Since we are 'restoring' the
-        * reservtion, do not call hugepage_subpool_put_pages() as this will
+        * reservation, do not call hugepage_subpool_put_pages() as this will
         * remove the reserved page from the subpool.
         */
        if (!restore_reserve) {
@@ -1439,7 +1388,7 @@ static void __free_huge_page(struct page *page)
        }
 
        spin_lock(&hugetlb_lock);
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -1447,9 +1396,9 @@ static void __free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (PageHugeTemporary(page)) {
+       if (HPageTemporary(page)) {
                list_del(&page->lru);
-               ClearPageHugeTemporary(page);
+               ClearHPageTemporary(page);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
@@ -1516,12 +1465,13 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+       hugetlb_set_page_subpool(page, NULL);
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
-       ClearPageHugeFreed(page);
+       ClearHPageFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1553,9 +1503,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
                set_compound_head(p, page);
        }
        atomic_set(compound_mapcount_ptr(page), -1);
-
-       if (hpage_pincount_available(page))
-               atomic_set(compound_pincount_ptr(page), 0);
+       atomic_set(compound_pincount_ptr(page), 0);
 }
 
 /*
@@ -1794,7 +1742,7 @@ retry:
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!PageHugeFreed(head))) {
+               if (unlikely(!HPageFreed(head))) {
                        spin_unlock(&hugetlb_lock);
                        cond_resched();
 
@@ -1885,7 +1833,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
         * codeflow
         */
        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+               SetHPageTemporary(page);
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
@@ -1916,7 +1864,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       SetPageHugeTemporary(page);
+       SetHPageTemporary(page);
 
        return page;
 }
@@ -2254,24 +2202,24 @@ static long vma_add_reservation(struct hstate *h,
  * This routine is called to restore a reservation on error paths.  In the
  * specific error paths, a huge page was allocated (via alloc_huge_page)
  * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set PagePrivate
- * in the newly allocated page.  When the page is freed via free_huge_page,
- * the global reservation count will be incremented if PagePrivate is set.
- * However, free_huge_page can not adjust the reserve map.  Adjust the
- * reserve map here to be consistent with global reserve count adjustments
- * to be made by free_huge_page.
+ * alloc_huge_page would have consumed the reservation and set
+ * HPageRestoreReserve in the newly allocated page.  When the page is freed
+ * via free_huge_page, the global reservation count will be incremented if
+ * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+ * reserve map.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.
  */
 static void restore_reserve_on_error(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address,
                        struct page *page)
 {
-       if (unlikely(PagePrivate(page))) {
+       if (unlikely(HPageRestoreReserve(page))) {
                long rc = vma_needs_reservation(h, vma, address);
 
                if (unlikely(rc < 0)) {
                        /*
                         * Rare out of memory condition in reserve map
-                        * manipulation.  Clear PagePrivate so that
+                        * manipulation.  Clear HPageRestoreReserve so that
                         * global reserve count will not be incremented
                         * by free_huge_page.  This will make it appear
                         * as though the reservation for this page was
@@ -2280,7 +2228,7 @@ static void restore_reserve_on_error(struct hstate *h,
                         * is better than inconsistent global huge page
                         * accounting of reserve counts.
                         */
-                       ClearPagePrivate(page);
+                       ClearHPageRestoreReserve(page);
                } else if (rc) {
                        rc = vma_add_reservation(h, vma, address);
                        if (unlikely(rc < 0))
@@ -2288,7 +2236,7 @@ static void restore_reserve_on_error(struct hstate *h,
                                 * See above comment about rare out of
                                 * memory condition.
                                 */
-                               ClearPagePrivate(page);
+                               ClearHPageRestoreReserve(page);
                } else
                        vma_end_reservation(h, vma, address);
        }
@@ -2369,7 +2317,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                if (!page)
                        goto out_uncharge_cgroup;
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetPagePrivate(page);
+                       SetHPageRestoreReserve(page);
                        h->resv_huge_pages--;
                }
                spin_lock(&hugetlb_lock);
@@ -2387,7 +2335,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        spin_unlock(&hugetlb_lock);
 
-       set_page_private(page, (unsigned long)spool);
+       hugetlb_set_page_subpool(page, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
@@ -2476,7 +2424,7 @@ static void __init gather_bootmem_prealloc(void)
                struct hstate *h = m->hstate;
 
                WARN_ON(page_count(page) != 1);
-               prep_compound_huge_page(page, h->order);
+               prep_compound_huge_page(page, huge_page_order(h));
                WARN_ON(PageReserved(page));
                prep_new_huge_page(h, page, page_to_nid(page));
                put_page(page); /* free it into the hugepage allocator */
@@ -2488,7 +2436,7 @@ static void __init gather_bootmem_prealloc(void)
                 * side-effects, like CommitLimit going negative.
                 */
                if (hstate_is_gigantic(h))
-                       adjust_managed_page_count(page, 1 << h->order);
+                       adjust_managed_page_count(page, pages_per_huge_page(h));
                cond_resched();
        }
 }
@@ -2520,7 +2468,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                if (hstate_is_gigantic(h)) {
                        if (hugetlb_cma_size) {
                                pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
-                               break;
+                               goto free;
                        }
                        if (!alloc_bootmem_huge_page(h))
                                break;
@@ -2538,7 +2486,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                        h->max_huge_pages, buf, i);
                h->max_huge_pages = i;
        }
-
+free:
        kfree(node_alloc_noretry);
 }
 
@@ -2988,8 +2936,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
                return -ENOMEM;
 
        retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
-       if (retval)
+       if (retval) {
                kobject_put(hstate_kobjs[hi]);
+               hstate_kobjs[hi] = NULL;
+       }
 
        return retval;
 }
@@ -3159,6 +3109,9 @@ static int __init hugetlb_init(void)
 {
        int i;
 
+       BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
+                       __NR_HPAGEFLAGS);
+
        if (!hugepages_supported()) {
                if (hugetlb_max_hstate || default_hstate_max_huge_pages)
                        pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
@@ -3239,7 +3192,7 @@ void __init hugetlb_add_hstate(unsigned int order)
        BUG_ON(order == 0);
        h = &hstates[hugetlb_max_hstate++];
        h->order = order;
-       h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+       h->mask = ~(huge_page_size(h) - 1);
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
@@ -3408,8 +3361,7 @@ static unsigned int allowed_mems_nr(struct hstate *h)
        mpol_allowed = policy_nodemask_current(gfp_mask);
 
        for_each_node_mask(node, cpuset_current_mems_allowed) {
-               if (!mpol_allowed ||
-                   (mpol_allowed && node_isset(node, *mpol_allowed)))
+               if (!mpol_allowed || node_isset(node, *mpol_allowed))
                        nr += array[node];
        }
 
@@ -3515,7 +3467,7 @@ void hugetlb_report_meminfo(struct seq_file *m)
        for_each_hstate(h) {
                unsigned long count = h->nr_huge_pages;
 
-               total += (PAGE_SIZE << huge_page_order(h)) * count;
+               total += huge_page_size(h) * count;
 
                if (h == &default_hstate)
                        seq_printf(m,
@@ -3528,10 +3480,10 @@ void hugetlb_report_meminfo(struct seq_file *m)
                                   h->free_huge_pages,
                                   h->resv_huge_pages,
                                   h->surplus_huge_pages,
-                                  (PAGE_SIZE << huge_page_order(h)) / 1024);
+                                  huge_page_size(h) / SZ_1K);
        }
 
-       seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
+       seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
 }
 
 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
@@ -3565,7 +3517,7 @@ void hugetlb_show_meminfo(void)
                                h->nr_huge_pages_node[nid],
                                h->free_huge_pages_node[nid],
                                h->surplus_huge_pages_node[nid],
-                               1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+                               huge_page_size(h) / SZ_1K);
 }
 
 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
@@ -3589,6 +3541,9 @@ static int hugetlb_acct_memory(struct hstate *h, long delta)
 {
        int ret = -ENOMEM;
 
+       if (!delta)
+               return 0;
+
        spin_lock(&hugetlb_lock);
        /*
         * When cpuset is configured, it breaks the strict hugetlb page
@@ -3685,15 +3640,13 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 
 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
 {
-       struct hstate *hstate = hstate_vma(vma);
-
-       return 1UL << huge_page_shift(hstate);
+       return huge_page_size(hstate_vma(vma));
 }
 
 /*
  * We cannot handle pagefaults against hugetlb pages at all.  They cause
  * handle_mm_fault() to try to instantiate regular-sized pages in the
- * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
+ * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
  * this far.
  */
 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
@@ -4017,7 +3970,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 
 /*
  * This is called when the original mapper is failing to COW a MAP_PRIVATE
- * mappping it owns the reserve page for. The intention is to unmap the page
+ * mapping it owns the reserve page for. The intention is to unmap the page
  * from other VMAs and let the children be SIGKILLed if they are faulting the
  * same region.
  */
@@ -4196,7 +4149,7 @@ retry_avoidcopy:
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
-               ClearPagePrivate(new_page);
+               ClearHPageRestoreReserve(new_page);
 
                /* Break COW */
                huge_ptep_clear_flush(vma, haddr, ptep);
@@ -4205,7 +4158,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
-               set_page_huge_active(new_page);
+               SetHPageMigratable(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -4263,7 +4216,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 
        if (err)
                return err;
-       ClearPagePrivate(page);
+       ClearHPageRestoreReserve(page);
 
        /*
         * set page dirty so that it will not be removed from cache/file
@@ -4425,7 +4378,7 @@ retry:
                goto backout;
 
        if (anon_rmap) {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, vma, haddr);
        } else
                page_dup_rmap(page, true);
@@ -4442,12 +4395,12 @@ retry:
        spin_unlock(ptl);
 
        /*
-        * Only make newly allocated pages active.  Existing pages found
-        * in the pagecache could be !page_huge_active() if they have been
-        * isolated for migration.
+        * Only set HPageMigratable in newly allocated pages.  Existing pages
+        * found in the pagecache may not have HPageMigratableset if they have
+        * been isolated for migration.
         */
        if (new_page)
-               set_page_huge_active(page);
+               SetHPageMigratable(page);
 
        unlock_page(page);
 out:
@@ -4477,7 +4430,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
 }
 #else
 /*
- * For uniprocesor systems we always use a single mutex, so just
+ * For uniprocessor systems we always use a single mutex, so just
  * return 0 and avoid the hashing overhead.
  */
 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
@@ -4739,7 +4692,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (vm_shared) {
                page_dup_rmap(page, true);
        } else {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
        }
 
@@ -4758,7 +4711,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -4773,6 +4726,20 @@ out_release_nounlock:
        goto out;
 }
 
+static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
+                                int refs, struct page **pages,
+                                struct vm_area_struct **vmas)
+{
+       int nr;
+
+       for (nr = 0; nr < refs; nr++) {
+               if (likely(pages))
+                       pages[nr] = mem_map_offset(page, nr);
+               if (vmas)
+                       vmas[nr] = vma;
+       }
+}
+
 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         struct page **pages, struct vm_area_struct **vmas,
                         unsigned long *position, unsigned long *nr_pages,
@@ -4782,7 +4749,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long vaddr = *position;
        unsigned long remainder = *nr_pages;
        struct hstate *h = hstate_vma(vma);
-       int err = -EFAULT;
+       int err = -EFAULT, refs;
 
        while (vaddr < vma->vm_end && remainder) {
                pte_t *pte;
@@ -4902,20 +4869,29 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        continue;
                }
 
-same_page:
+               refs = min3(pages_per_huge_page(h) - pfn_offset,
+                           (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
+
+               if (pages || vmas)
+                       record_subpages_vmas(mem_map_offset(page, pfn_offset),
+                                            vma, refs,
+                                            likely(pages) ? pages + i : NULL,
+                                            vmas ? vmas + i : NULL);
+
                if (pages) {
-                       pages[i] = mem_map_offset(page, pfn_offset);
                        /*
-                        * try_grab_page() should always succeed here, because:
-                        * a) we hold the ptl lock, and b) we've just checked
-                        * that the huge page is present in the page tables. If
-                        * the huge page is present, then the tail pages must
-                        * also be present. The ptl prevents the head page and
-                        * tail pages from being rearranged in any way. So this
-                        * page must be available at this point, unless the page
-                        * refcount overflowed:
+                        * try_grab_compound_head() should always succeed here,
+                        * because: a) we hold the ptl lock, and b) we've just
+                        * checked that the huge page is present in the page
+                        * tables. If the huge page is present, then the tail
+                        * pages must also be present. The ptl prevents the
+                        * head page and tail pages from being rearranged in
+                        * any way. So this page must be available at this
+                        * point, unless the page refcount overflowed:
                         */
-                       if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
+                       if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
+                                                                refs,
+                                                                flags))) {
                                spin_unlock(ptl);
                                remainder = 0;
                                err = -ENOMEM;
@@ -4923,21 +4899,10 @@ same_page:
                        }
                }
 
-               if (vmas)
-                       vmas[i] = vma;
-
-               vaddr += PAGE_SIZE;
-               ++pfn_offset;
-               --remainder;
-               ++i;
-               if (vaddr < vma->vm_end && remainder &&
-                               pfn_offset < pages_per_huge_page(h)) {
-                       /*
-                        * We use pfn_offset to avoid touching the pageframes
-                        * of this compound page.
-                        */
-                       goto same_page;
-               }
+               vaddr += (refs << PAGE_SHIFT);
+               remainder -= refs;
+               i += refs;
+
                spin_unlock(ptl);
        }
        *nr_pages = remainder;
@@ -5051,12 +5016,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        return pages << h->order;
 }
 
-int hugetlb_reserve_pages(struct inode *inode,
+/* Return true if reservation was successful, false otherwise.  */
+bool hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
                                        vm_flags_t vm_flags)
 {
-       long ret, chg, add = -1;
+       long chg, add = -1;
        struct hstate *h = hstate_inode(inode);
        struct hugepage_subpool *spool = subpool_inode(inode);
        struct resv_map *resv_map;
@@ -5066,7 +5032,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        /* This should never happen */
        if (from > to) {
                VM_WARN(1, "%s called with a negative range\n", __func__);
-               return -EINVAL;
+               return false;
        }
 
        /*
@@ -5075,7 +5041,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
-               return 0;
+               return true;
 
        /*
         * Shared mappings base their reservation on the number of pages that
@@ -5097,7 +5063,7 @@ int hugetlb_reserve_pages(struct inode *inode,
                /* Private mapping. */
                resv_map = resv_map_alloc();
                if (!resv_map)
-                       return -ENOMEM;
+                       return false;
 
                chg = to - from;
 
@@ -5105,18 +5071,12 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0) {
-               ret = chg;
+       if (chg < 0)
                goto out_err;
-       }
 
-       ret = hugetlb_cgroup_charge_cgroup_rsvd(
-               hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
-
-       if (ret < 0) {
-               ret = -ENOMEM;
+       if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+                               chg * pages_per_huge_page(h), &h_cg) < 0)
                goto out_err;
-       }
 
        if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
                /* For private mappings, the hugetlb_cgroup uncharge info hangs
@@ -5131,19 +5091,15 @@ int hugetlb_reserve_pages(struct inode *inode,
         * reservations already in place (gbl_reserve).
         */
        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
-       if (gbl_reserve < 0) {
-               ret = -ENOSPC;
+       if (gbl_reserve < 0)
                goto out_uncharge_cgroup;
-       }
 
        /*
         * Check enough hugepages are available for the reservation.
         * Hand the pages back to the subpool if there are not
         */
-       ret = hugetlb_acct_memory(h, gbl_reserve);
-       if (ret < 0) {
+       if (hugetlb_acct_memory(h, gbl_reserve) < 0)
                goto out_put_pages;
-       }
 
        /*
         * Account for the reservations made. Shared mappings record regions
@@ -5161,7 +5117,6 @@ int hugetlb_reserve_pages(struct inode *inode,
 
                if (unlikely(add < 0)) {
                        hugetlb_acct_memory(h, -gbl_reserve);
-                       ret = add;
                        goto out_put_pages;
                } else if (unlikely(chg > add)) {
                        /*
@@ -5182,7 +5137,8 @@ int hugetlb_reserve_pages(struct inode *inode,
                        hugetlb_acct_memory(h, -rsv_adjust);
                }
        }
-       return 0;
+       return true;
+
 out_put_pages:
        /* put back original number of pages, chg */
        (void)hugepage_subpool_put_pages(spool, chg);
@@ -5198,7 +5154,7 @@ out_err:
                        region_abort(resv_map, from, to, regions_needed);
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                kref_put(&resv_map->refs, resv_map_release);
-       return ret;
+       return false;
 }
 
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -5259,7 +5215,7 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
         */
        if (pmd_index(addr) != pmd_index(saddr) ||
            vm_flags != svm_flags ||
-           sbase < svma->vm_start || svma->vm_end < s_end)
+           !range_in_vma(svma, sbase, s_end))
                return 0;
 
        return saddr;
@@ -5286,21 +5242,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end)
 {
-       unsigned long a_start, a_end;
+       unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
+               v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
 
-       if (!(vma->vm_flags & VM_MAYSHARE))
+       /*
+        * vma need span at least one aligned PUD size and the start,end range
+        * must at least partialy within it.
+        */
+       if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
+               (*end <= v_start) || (*start >= v_end))
                return;
 
        /* Extend the range to be PUD aligned for a worst case scenario */
-       a_start = ALIGN_DOWN(*start, PUD_SIZE);
-       a_end = ALIGN(*end, PUD_SIZE);
+       if (*start > v_start)
+               *start = ALIGN_DOWN(*start, PUD_SIZE);
 
-       /*
-        * Intersect the range with the vma range, since pmd sharing won't be
-        * across vma after all
-        */
-       *start = max(vma->vm_start, a_start);
-       *end = min(vma->vm_end, a_end);
+       if (*end < v_end)
+               *end = ALIGN(*end, PUD_SIZE);
 }
 
 /*
@@ -5583,12 +5541,13 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
        bool ret = true;
 
        spin_lock(&hugetlb_lock);
-       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
            !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -5597,9 +5556,8 @@ unlock:
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);
@@ -5622,12 +5580,12 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (PageHugeTemporary(newpage)) {
+       if (HPageTemporary(newpage)) {
                int old_nid = page_to_nid(oldpage);
                int new_nid = page_to_nid(newpage);
 
-               SetPageHugeTemporary(oldpage);
-               ClearPageHugeTemporary(newpage);
+               SetHPageTemporary(oldpage);
+               ClearHPageTemporary(newpage);
 
                spin_lock(&hugetlb_lock);
                if (h->surplus_huge_pages_node[old_nid]) {