ASoC: SOF: core: harden shutdown helper
[sfrench/cifs-2.6.git] / mm / hugetlb.c
index f5e85dabb7a32e146685274795e36e56ee52a770..8fb42c6dd74bf2bbb9a07f35d5c025bf633e2baf 100644 (file)
@@ -79,21 +79,6 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
-static inline bool PageHugeFreed(struct page *head)
-{
-       return page_private(head + 4) == -1UL;
-}
-
-static inline void SetPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, -1UL);
-}
-
-static inline void ClearPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, 0);
-}
-
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
@@ -1053,7 +1038,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
-       SetPageHugeFreed(page);
+       SetHPageFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1070,7 +1055,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
-               ClearPageHugeFreed(page);
+               ClearHPageFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1364,52 +1349,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
- * to hstate->hugepage_activelist.)
- *
- * This function can be called for tail pages, but never returns true for them.
- */
-bool page_huge_active(struct page *page)
-{
-       return PageHeadHuge(page) && PagePrivate(&page[1]);
-}
-
-/* never called for tail page */
-void set_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       SetPagePrivate(&page[1]);
-}
-
-static void clear_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       ClearPagePrivate(&page[1]);
-}
-
-/*
- * Internal hugetlb specific page flag. Do not use outside of the hugetlb
- * code
- */
-static inline bool PageHugeTemporary(struct page *page)
-{
-       if (!PageHuge(page))
-               return false;
-
-       return (unsigned long)page[2].mapping == -1U;
-}
-
-static inline void SetPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = (void *)-1U;
-}
-
-static inline void ClearPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = NULL;
-}
-
 static void __free_huge_page(struct page *page)
 {
        /*
@@ -1449,7 +1388,7 @@ static void __free_huge_page(struct page *page)
        }
 
        spin_lock(&hugetlb_lock);
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -1457,9 +1396,9 @@ static void __free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (PageHugeTemporary(page)) {
+       if (HPageTemporary(page)) {
                list_del(&page->lru);
-               ClearPageHugeTemporary(page);
+               ClearHPageTemporary(page);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
@@ -1526,12 +1465,13 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+       hugetlb_set_page_subpool(page, NULL);
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
-       ClearPageHugeFreed(page);
+       ClearHPageFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1802,7 +1742,7 @@ retry:
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!PageHugeFreed(head))) {
+               if (unlikely(!HPageFreed(head))) {
                        spin_unlock(&hugetlb_lock);
                        cond_resched();
 
@@ -1893,7 +1833,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
         * codeflow
         */
        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+               SetHPageTemporary(page);
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
@@ -1924,7 +1864,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       SetPageHugeTemporary(page);
+       SetHPageTemporary(page);
 
        return page;
 }
@@ -4218,7 +4158,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
-               set_page_huge_active(new_page);
+               SetHPageMigratable(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -4455,12 +4395,12 @@ retry:
        spin_unlock(ptl);
 
        /*
-        * Only make newly allocated pages active.  Existing pages found
-        * in the pagecache could be !page_huge_active() if they have been
-        * isolated for migration.
+        * Only set HPageMigratable in newly allocated pages.  Existing pages
+        * found in the pagecache may not have HPageMigratableset if they have
+        * been isolated for migration.
         */
        if (new_page)
-               set_page_huge_active(page);
+               SetHPageMigratable(page);
 
        unlock_page(page);
 out:
@@ -4771,7 +4711,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -5076,12 +5016,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        return pages << h->order;
 }
 
-int hugetlb_reserve_pages(struct inode *inode,
+/* Return true if reservation was successful, false otherwise.  */
+bool hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
                                        vm_flags_t vm_flags)
 {
-       long ret, chg, add = -1;
+       long chg, add = -1;
        struct hstate *h = hstate_inode(inode);
        struct hugepage_subpool *spool = subpool_inode(inode);
        struct resv_map *resv_map;
@@ -5091,7 +5032,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        /* This should never happen */
        if (from > to) {
                VM_WARN(1, "%s called with a negative range\n", __func__);
-               return -EINVAL;
+               return false;
        }
 
        /*
@@ -5100,7 +5041,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
-               return 0;
+               return true;
 
        /*
         * Shared mappings base their reservation on the number of pages that
@@ -5122,7 +5063,7 @@ int hugetlb_reserve_pages(struct inode *inode,
                /* Private mapping. */
                resv_map = resv_map_alloc();
                if (!resv_map)
-                       return -ENOMEM;
+                       return false;
 
                chg = to - from;
 
@@ -5130,18 +5071,12 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0) {
-               ret = chg;
+       if (chg < 0)
                goto out_err;
-       }
-
-       ret = hugetlb_cgroup_charge_cgroup_rsvd(
-               hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
 
-       if (ret < 0) {
-               ret = -ENOMEM;
+       if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+                               chg * pages_per_huge_page(h), &h_cg) < 0)
                goto out_err;
-       }
 
        if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
                /* For private mappings, the hugetlb_cgroup uncharge info hangs
@@ -5156,19 +5091,15 @@ int hugetlb_reserve_pages(struct inode *inode,
         * reservations already in place (gbl_reserve).
         */
        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
-       if (gbl_reserve < 0) {
-               ret = -ENOSPC;
+       if (gbl_reserve < 0)
                goto out_uncharge_cgroup;
-       }
 
        /*
         * Check enough hugepages are available for the reservation.
         * Hand the pages back to the subpool if there are not
         */
-       ret = hugetlb_acct_memory(h, gbl_reserve);
-       if (ret < 0) {
+       if (hugetlb_acct_memory(h, gbl_reserve) < 0)
                goto out_put_pages;
-       }
 
        /*
         * Account for the reservations made. Shared mappings record regions
@@ -5186,7 +5117,6 @@ int hugetlb_reserve_pages(struct inode *inode,
 
                if (unlikely(add < 0)) {
                        hugetlb_acct_memory(h, -gbl_reserve);
-                       ret = add;
                        goto out_put_pages;
                } else if (unlikely(chg > add)) {
                        /*
@@ -5207,7 +5137,8 @@ int hugetlb_reserve_pages(struct inode *inode,
                        hugetlb_acct_memory(h, -rsv_adjust);
                }
        }
-       return 0;
+       return true;
+
 out_put_pages:
        /* put back original number of pages, chg */
        (void)hugepage_subpool_put_pages(spool, chg);
@@ -5223,7 +5154,7 @@ out_err:
                        region_abort(resv_map, from, to, regions_needed);
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                kref_put(&resv_map->refs, resv_map_release);
-       return ret;
+       return false;
 }
 
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -5610,12 +5541,13 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
        bool ret = true;
 
        spin_lock(&hugetlb_lock);
-       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
            !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -5625,7 +5557,7 @@ unlock:
 void putback_active_hugepage(struct page *page)
 {
        spin_lock(&hugetlb_lock);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);
@@ -5648,12 +5580,12 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (PageHugeTemporary(newpage)) {
+       if (HPageTemporary(newpage)) {
                int old_nid = page_to_nid(oldpage);
                int new_nid = page_to_nid(newpage);
 
-               SetPageHugeTemporary(oldpage);
-               ClearPageHugeTemporary(newpage);
+               SetHPageTemporary(oldpage);
+               ClearHPageTemporary(newpage);
 
                spin_lock(&hugetlb_lock);
                if (h->surplus_huge_pages_node[old_nid]) {