Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
[sfrench/cifs-2.6.git] / mm / huge_memory.c
index 4e4ef8fa479d53b7ee7c4c8fcb86985acb790c8a..f2d19e4fe854f3941bbc24dd1e1cc76d0622b7eb 100644 (file)
@@ -629,21 +629,30 @@ release:
  *         available
  * never: never stall for any thp allocation
  */
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
 {
        const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
+       const gfp_t gfp_mask = GFP_TRANSHUGE_LIGHT | __GFP_THISNODE;
 
+       /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+               return GFP_TRANSHUGE | __GFP_THISNODE |
+                      (vma_madvised ? 0 : __GFP_NORETRY);
+
+       /* Kick kcompactd and fail quickly */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+               return gfp_mask | __GFP_KSWAPD_RECLAIM;
+
+       /* Synchronous compaction if madvised, otherwise kick kcompactd */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            __GFP_KSWAPD_RECLAIM);
+               return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM :
+                                                 __GFP_KSWAPD_RECLAIM);
+
+       /* Only do synchronous compaction if madvised */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            0);
-       return GFP_TRANSHUGE_LIGHT;
+               return gfp_mask | (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
+
+       return gfp_mask;
 }
 
 /* Caller must hold page table lock. */
@@ -715,8 +724,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
                        pte_free(vma->vm_mm, pgtable);
                return ret;
        }
-       gfp = alloc_hugepage_direct_gfpmask(vma);
-       page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
+       gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+       page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
        if (unlikely(!page)) {
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
@@ -1286,8 +1295,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
 alloc:
        if (transparent_hugepage_enabled(vma) &&
            !transparent_hugepage_debug_cow()) {
-               huge_gfp = alloc_hugepage_direct_gfpmask(vma);
-               new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
+               huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
+               new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
+                               haddr, numa_node_id());
        } else
                new_page = NULL;
 
@@ -2330,7 +2340,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
        }
 }
 
-static void freeze_page(struct page *page)
+static void unmap_page(struct page *page)
 {
        enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
                TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2345,7 +2355,7 @@ static void freeze_page(struct page *page)
        VM_BUG_ON_PAGE(!unmap_success, page);
 }
 
-static void unfreeze_page(struct page *page)
+static void remap_page(struct page *page)
 {
        int i;
        if (PageTransHuge(page)) {
@@ -2382,6 +2392,12 @@ static void __split_huge_page_tail(struct page *head, int tail,
                         (1L << PG_unevictable) |
                         (1L << PG_dirty)));
 
+       /* ->mapping in first tail page is compound_mapcount */
+       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
+                       page_tail);
+       page_tail->mapping = head->mapping;
+       page_tail->index = head->index + tail;
+
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
 
@@ -2402,12 +2418,6 @@ static void __split_huge_page_tail(struct page *head, int tail,
        if (page_is_idle(head))
                set_page_idle(page_tail);
 
-       /* ->mapping in first tail page is compound_mapcount */
-       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
-                       page_tail);
-       page_tail->mapping = head->mapping;
-
-       page_tail->index = head->index + tail;
        page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
 
        /*
@@ -2419,12 +2429,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,
-               unsigned long flags)
+               pgoff_t end, unsigned long flags)
 {
        struct page *head = compound_head(page);
        struct zone *zone = page_zone(head);
        struct lruvec *lruvec;
-       pgoff_t end = -1;
        int i;
 
        lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2432,9 +2441,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
 
-       if (!PageAnon(page))
-               end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
-
        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond i_size: drop them from page cache */
@@ -2463,7 +2469,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
        spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
 
-       unfreeze_page(head);
+       remap_page(head);
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                struct page *subpage = head + i;
@@ -2606,6 +2612,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        int count, mapcount, extra_pins, ret;
        bool mlocked;
        unsigned long flags;
+       pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2628,6 +2635,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        ret = -EBUSY;
                        goto out;
                }
+               end = -1;
                mapping = NULL;
                anon_vma_lock_write(anon_vma);
        } else {
@@ -2641,10 +2649,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
                anon_vma = NULL;
                i_mmap_lock_read(mapping);
+
+               /*
+                *__split_huge_page() may need to trim off pages beyond EOF:
+                * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
+                * which cannot be nested inside the page tree lock. So note
+                * end now: i_size itself may be changed at any moment, but
+                * head page lock is good enough to serialize the trimming.
+                */
+               end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
        }
 
        /*
-        * Racy check if we can split the page, before freeze_page() will
+        * Racy check if we can split the page, before unmap_page() will
         * split PMDs
         */
        if (!can_split_huge_page(head, &extra_pins)) {
@@ -2653,7 +2670,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        mlocked = PageMlocked(page);
-       freeze_page(head);
+       unmap_page(head);
        VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2687,7 +2704,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                if (mapping)
                        __dec_node_page_state(page, NR_SHMEM_THPS);
                spin_unlock(&pgdata->split_queue_lock);
-               __split_huge_page(page, list, flags);
+               __split_huge_page(page, list, end, flags);
                if (PageSwapCache(head)) {
                        swp_entry_t entry = { .val = page_private(head) };
 
@@ -2707,7 +2724,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 fail:          if (mapping)
                        xa_unlock(&mapping->i_pages);
                spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
-               unfreeze_page(head);
+               remap_page(head);
                ret = -EBUSY;
        }