ASoC: SOF: core: harden shutdown helper
[sfrench/cifs-2.6.git] / mm / hugetlb.c
index 4c956e64a10c7f64f37c185755564cf39e32383c..8fb42c6dd74bf2bbb9a07f35d5c025bf633e2baf 100644 (file)
@@ -79,21 +79,6 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
-static inline bool PageHugeFreed(struct page *head)
-{
-       return page_private(head + 4) == -1UL;
-}
-
-static inline void SetPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, -1UL);
-}
-
-static inline void ClearPageHugeFreed(struct page *head)
-{
-       set_page_private(head + 4, 0);
-}
-
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
@@ -1053,7 +1038,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
-       SetPageHugeFreed(page);
+       SetHPageFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1070,7 +1055,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
-               ClearPageHugeFreed(page);
+               ClearHPageFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1143,7 +1128,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
        nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
        page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
        if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
-               SetPagePrivate(page);
+               SetHPageRestoreReserve(page);
                h->resv_huge_pages--;
        }
 
@@ -1321,14 +1306,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
 static void update_and_free_page(struct hstate *h, struct page *page)
 {
        int i;
+       struct page *subpage = page;
 
        if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
                return;
 
        h->nr_huge_pages--;
        h->nr_huge_pages_node[page_to_nid(page)]--;
-       for (i = 0; i < pages_per_huge_page(h); i++) {
-               page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+       for (i = 0; i < pages_per_huge_page(h);
+            i++, subpage = mem_map_next(subpage, page, i)) {
+               subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
                                1 << PG_referenced | 1 << PG_dirty |
                                1 << PG_active | 1 << PG_private |
                                1 << PG_writeback);
@@ -1362,52 +1349,6 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-/*
- * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
- * to hstate->hugepage_activelist.)
- *
- * This function can be called for tail pages, but never returns true for them.
- */
-bool page_huge_active(struct page *page)
-{
-       return PageHeadHuge(page) && PagePrivate(&page[1]);
-}
-
-/* never called for tail page */
-void set_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       SetPagePrivate(&page[1]);
-}
-
-static void clear_page_huge_active(struct page *page)
-{
-       VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
-       ClearPagePrivate(&page[1]);
-}
-
-/*
- * Internal hugetlb specific page flag. Do not use outside of the hugetlb
- * code
- */
-static inline bool PageHugeTemporary(struct page *page)
-{
-       if (!PageHuge(page))
-               return false;
-
-       return (unsigned long)page[2].mapping == -1U;
-}
-
-static inline void SetPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = (void *)-1U;
-}
-
-static inline void ClearPageHugeTemporary(struct page *page)
-{
-       page[2].mapping = NULL;
-}
-
 static void __free_huge_page(struct page *page)
 {
        /*
@@ -1416,20 +1357,19 @@ static void __free_huge_page(struct page *page)
         */
        struct hstate *h = page_hstate(page);
        int nid = page_to_nid(page);
-       struct hugepage_subpool *spool =
-               (struct hugepage_subpool *)page_private(page);
+       struct hugepage_subpool *spool = hugetlb_page_subpool(page);
        bool restore_reserve;
 
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(page_mapcount(page), page);
 
-       set_page_private(page, 0);
+       hugetlb_set_page_subpool(page, NULL);
        page->mapping = NULL;
-       restore_reserve = PagePrivate(page);
-       ClearPagePrivate(page);
+       restore_reserve = HPageRestoreReserve(page);
+       ClearHPageRestoreReserve(page);
 
        /*
-        * If PagePrivate() was set on page, page allocation consumed a
+        * If HPageRestoreReserve was set on page, page allocation consumed a
         * reservation.  If the page was associated with a subpool, there
         * would have been a page reserved in the subpool before allocation
         * via hugepage_subpool_get_pages().  Since we are 'restoring' the
@@ -1448,7 +1388,7 @@ static void __free_huge_page(struct page *page)
        }
 
        spin_lock(&hugetlb_lock);
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
                                     pages_per_huge_page(h), page);
        hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
@@ -1456,9 +1396,9 @@ static void __free_huge_page(struct page *page)
        if (restore_reserve)
                h->resv_huge_pages++;
 
-       if (PageHugeTemporary(page)) {
+       if (HPageTemporary(page)) {
                list_del(&page->lru);
-               ClearPageHugeTemporary(page);
+               ClearHPageTemporary(page);
                update_and_free_page(h, page);
        } else if (h->surplus_huge_pages_node[nid]) {
                /* remove the page from active list */
@@ -1525,12 +1465,13 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
+       hugetlb_set_page_subpool(page, NULL);
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
-       ClearPageHugeFreed(page);
+       ClearHPageFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1801,7 +1742,7 @@ retry:
                 * We should make sure that the page is already on the free list
                 * when it is dissolved.
                 */
-               if (unlikely(!PageHugeFreed(head))) {
+               if (unlikely(!HPageFreed(head))) {
                        spin_unlock(&hugetlb_lock);
                        cond_resched();
 
@@ -1892,7 +1833,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
         * codeflow
         */
        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
-               SetPageHugeTemporary(page);
+               SetHPageTemporary(page);
                spin_unlock(&hugetlb_lock);
                put_page(page);
                return NULL;
@@ -1923,7 +1864,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
         * We do not account these pages as surplus because they are only
         * temporary and will be released properly on the last reference
         */
-       SetPageHugeTemporary(page);
+       SetHPageTemporary(page);
 
        return page;
 }
@@ -2261,24 +2202,24 @@ static long vma_add_reservation(struct hstate *h,
  * This routine is called to restore a reservation on error paths.  In the
  * specific error paths, a huge page was allocated (via alloc_huge_page)
  * and is about to be freed.  If a reservation for the page existed,
- * alloc_huge_page would have consumed the reservation and set PagePrivate
- * in the newly allocated page.  When the page is freed via free_huge_page,
- * the global reservation count will be incremented if PagePrivate is set.
- * However, free_huge_page can not adjust the reserve map.  Adjust the
- * reserve map here to be consistent with global reserve count adjustments
- * to be made by free_huge_page.
+ * alloc_huge_page would have consumed the reservation and set
+ * HPageRestoreReserve in the newly allocated page.  When the page is freed
+ * via free_huge_page, the global reservation count will be incremented if
+ * HPageRestoreReserve is set.  However, free_huge_page can not adjust the
+ * reserve map.  Adjust the reserve map here to be consistent with global
+ * reserve count adjustments to be made by free_huge_page.
  */
 static void restore_reserve_on_error(struct hstate *h,
                        struct vm_area_struct *vma, unsigned long address,
                        struct page *page)
 {
-       if (unlikely(PagePrivate(page))) {
+       if (unlikely(HPageRestoreReserve(page))) {
                long rc = vma_needs_reservation(h, vma, address);
 
                if (unlikely(rc < 0)) {
                        /*
                         * Rare out of memory condition in reserve map
-                        * manipulation.  Clear PagePrivate so that
+                        * manipulation.  Clear HPageRestoreReserve so that
                         * global reserve count will not be incremented
                         * by free_huge_page.  This will make it appear
                         * as though the reservation for this page was
@@ -2287,7 +2228,7 @@ static void restore_reserve_on_error(struct hstate *h,
                         * is better than inconsistent global huge page
                         * accounting of reserve counts.
                         */
-                       ClearPagePrivate(page);
+                       ClearHPageRestoreReserve(page);
                } else if (rc) {
                        rc = vma_add_reservation(h, vma, address);
                        if (unlikely(rc < 0))
@@ -2295,7 +2236,7 @@ static void restore_reserve_on_error(struct hstate *h,
                                 * See above comment about rare out of
                                 * memory condition.
                                 */
-                               ClearPagePrivate(page);
+                               ClearHPageRestoreReserve(page);
                } else
                        vma_end_reservation(h, vma, address);
        }
@@ -2376,7 +2317,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
                if (!page)
                        goto out_uncharge_cgroup;
                if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
-                       SetPagePrivate(page);
+                       SetHPageRestoreReserve(page);
                        h->resv_huge_pages--;
                }
                spin_lock(&hugetlb_lock);
@@ -2394,7 +2335,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        spin_unlock(&hugetlb_lock);
 
-       set_page_private(page, (unsigned long)spool);
+       hugetlb_set_page_subpool(page, spool);
 
        map_commit = vma_commit_reservation(h, vma, addr);
        if (unlikely(map_chg > map_commit)) {
@@ -2527,7 +2468,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                if (hstate_is_gigantic(h)) {
                        if (hugetlb_cma_size) {
                                pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
-                               break;
+                               goto free;
                        }
                        if (!alloc_bootmem_huge_page(h))
                                break;
@@ -2545,7 +2486,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
                        h->max_huge_pages, buf, i);
                h->max_huge_pages = i;
        }
-
+free:
        kfree(node_alloc_noretry);
 }
 
@@ -3168,6 +3109,9 @@ static int __init hugetlb_init(void)
 {
        int i;
 
+       BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
+                       __NR_HPAGEFLAGS);
+
        if (!hugepages_supported()) {
                if (hugetlb_max_hstate || default_hstate_max_huge_pages)
                        pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
@@ -3248,7 +3192,7 @@ void __init hugetlb_add_hstate(unsigned int order)
        BUG_ON(order == 0);
        h = &hstates[hugetlb_max_hstate++];
        h->order = order;
-       h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
+       h->mask = ~(huge_page_size(h) - 1);
        for (i = 0; i < MAX_NUMNODES; ++i)
                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
        INIT_LIST_HEAD(&h->hugepage_activelist);
@@ -3523,7 +3467,7 @@ void hugetlb_report_meminfo(struct seq_file *m)
        for_each_hstate(h) {
                unsigned long count = h->nr_huge_pages;
 
-               total += (PAGE_SIZE << huge_page_order(h)) * count;
+               total += huge_page_size(h) * count;
 
                if (h == &default_hstate)
                        seq_printf(m,
@@ -3536,10 +3480,10 @@ void hugetlb_report_meminfo(struct seq_file *m)
                                   h->free_huge_pages,
                                   h->resv_huge_pages,
                                   h->surplus_huge_pages,
-                                  (PAGE_SIZE << huge_page_order(h)) / 1024);
+                                  huge_page_size(h) / SZ_1K);
        }
 
-       seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
+       seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
 }
 
 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
@@ -3573,7 +3517,7 @@ void hugetlb_show_meminfo(void)
                                h->nr_huge_pages_node[nid],
                                h->free_huge_pages_node[nid],
                                h->surplus_huge_pages_node[nid],
-                               1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
+                               huge_page_size(h) / SZ_1K);
 }
 
 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
@@ -3696,9 +3640,7 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 
 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
 {
-       struct hstate *hstate = hstate_vma(vma);
-
-       return 1UL << huge_page_shift(hstate);
+       return huge_page_size(hstate_vma(vma));
 }
 
 /*
@@ -4207,7 +4149,7 @@ retry_avoidcopy:
        spin_lock(ptl);
        ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
        if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
-               ClearPagePrivate(new_page);
+               ClearHPageRestoreReserve(new_page);
 
                /* Break COW */
                huge_ptep_clear_flush(vma, haddr, ptep);
@@ -4216,7 +4158,7 @@ retry_avoidcopy:
                                make_huge_pte(vma, new_page, 1));
                page_remove_rmap(old_page, true);
                hugepage_add_new_anon_rmap(new_page, vma, haddr);
-               set_page_huge_active(new_page);
+               SetHPageMigratable(new_page);
                /* Make the old page be freed below */
                new_page = old_page;
        }
@@ -4274,7 +4216,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 
        if (err)
                return err;
-       ClearPagePrivate(page);
+       ClearHPageRestoreReserve(page);
 
        /*
         * set page dirty so that it will not be removed from cache/file
@@ -4436,7 +4378,7 @@ retry:
                goto backout;
 
        if (anon_rmap) {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, vma, haddr);
        } else
                page_dup_rmap(page, true);
@@ -4453,12 +4395,12 @@ retry:
        spin_unlock(ptl);
 
        /*
-        * Only make newly allocated pages active.  Existing pages found
-        * in the pagecache could be !page_huge_active() if they have been
-        * isolated for migration.
+        * Only set HPageMigratable in newly allocated pages.  Existing pages
+        * found in the pagecache may not have HPageMigratableset if they have
+        * been isolated for migration.
         */
        if (new_page)
-               set_page_huge_active(page);
+               SetHPageMigratable(page);
 
        unlock_page(page);
 out:
@@ -4750,7 +4692,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (vm_shared) {
                page_dup_rmap(page, true);
        } else {
-               ClearPagePrivate(page);
+               ClearHPageRestoreReserve(page);
                hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
        }
 
@@ -4769,7 +4711,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
 
        spin_unlock(ptl);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        if (vm_shared)
                unlock_page(page);
        ret = 0;
@@ -5074,12 +5016,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        return pages << h->order;
 }
 
-int hugetlb_reserve_pages(struct inode *inode,
+/* Return true if reservation was successful, false otherwise.  */
+bool hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
                                        vm_flags_t vm_flags)
 {
-       long ret, chg, add = -1;
+       long chg, add = -1;
        struct hstate *h = hstate_inode(inode);
        struct hugepage_subpool *spool = subpool_inode(inode);
        struct resv_map *resv_map;
@@ -5089,7 +5032,7 @@ int hugetlb_reserve_pages(struct inode *inode,
        /* This should never happen */
        if (from > to) {
                VM_WARN(1, "%s called with a negative range\n", __func__);
-               return -EINVAL;
+               return false;
        }
 
        /*
@@ -5098,7 +5041,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * without using reserves
         */
        if (vm_flags & VM_NORESERVE)
-               return 0;
+               return true;
 
        /*
         * Shared mappings base their reservation on the number of pages that
@@ -5120,7 +5063,7 @@ int hugetlb_reserve_pages(struct inode *inode,
                /* Private mapping. */
                resv_map = resv_map_alloc();
                if (!resv_map)
-                       return -ENOMEM;
+                       return false;
 
                chg = to - from;
 
@@ -5128,18 +5071,12 @@ int hugetlb_reserve_pages(struct inode *inode,
                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
        }
 
-       if (chg < 0) {
-               ret = chg;
+       if (chg < 0)
                goto out_err;
-       }
 
-       ret = hugetlb_cgroup_charge_cgroup_rsvd(
-               hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
-
-       if (ret < 0) {
-               ret = -ENOMEM;
+       if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+                               chg * pages_per_huge_page(h), &h_cg) < 0)
                goto out_err;
-       }
 
        if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
                /* For private mappings, the hugetlb_cgroup uncharge info hangs
@@ -5154,19 +5091,15 @@ int hugetlb_reserve_pages(struct inode *inode,
         * reservations already in place (gbl_reserve).
         */
        gbl_reserve = hugepage_subpool_get_pages(spool, chg);
-       if (gbl_reserve < 0) {
-               ret = -ENOSPC;
+       if (gbl_reserve < 0)
                goto out_uncharge_cgroup;
-       }
 
        /*
         * Check enough hugepages are available for the reservation.
         * Hand the pages back to the subpool if there are not
         */
-       ret = hugetlb_acct_memory(h, gbl_reserve);
-       if (ret < 0) {
+       if (hugetlb_acct_memory(h, gbl_reserve) < 0)
                goto out_put_pages;
-       }
 
        /*
         * Account for the reservations made. Shared mappings record regions
@@ -5184,7 +5117,6 @@ int hugetlb_reserve_pages(struct inode *inode,
 
                if (unlikely(add < 0)) {
                        hugetlb_acct_memory(h, -gbl_reserve);
-                       ret = add;
                        goto out_put_pages;
                } else if (unlikely(chg > add)) {
                        /*
@@ -5205,7 +5137,8 @@ int hugetlb_reserve_pages(struct inode *inode,
                        hugetlb_acct_memory(h, -rsv_adjust);
                }
        }
-       return 0;
+       return true;
+
 out_put_pages:
        /* put back original number of pages, chg */
        (void)hugepage_subpool_put_pages(spool, chg);
@@ -5221,7 +5154,7 @@ out_err:
                        region_abort(resv_map, from, to, regions_needed);
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
                kref_put(&resv_map->refs, resv_map_release);
-       return ret;
+       return false;
 }
 
 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -5608,12 +5541,13 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
        bool ret = true;
 
        spin_lock(&hugetlb_lock);
-       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+       if (!PageHeadHuge(page) ||
+           !HPageMigratable(page) ||
            !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
-       clear_page_huge_active(page);
+       ClearHPageMigratable(page);
        list_move_tail(&page->lru, list);
 unlock:
        spin_unlock(&hugetlb_lock);
@@ -5622,9 +5556,8 @@ unlock:
 
 void putback_active_hugepage(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       set_page_huge_active(page);
+       SetHPageMigratable(page);
        list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
        spin_unlock(&hugetlb_lock);
        put_page(page);
@@ -5647,12 +5580,12 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
         * here as well otherwise the global surplus count will not match
         * the per-node's.
         */
-       if (PageHugeTemporary(newpage)) {
+       if (HPageTemporary(newpage)) {
                int old_nid = page_to_nid(oldpage);
                int new_nid = page_to_nid(newpage);
 
-               SetPageHugeTemporary(oldpage);
-               ClearPageHugeTemporary(newpage);
+               SetHPageTemporary(oldpage);
+               ClearHPageTemporary(newpage);
 
                spin_lock(&hugetlb_lock);
                if (h->surplus_huge_pages_node[old_nid]) {