Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / mm / khugepaged.c
index e42568284e06038ab70ec1344f63a2e5182ea90d..d7b2a4bf8671643e3345b7ac197570582ac14aef 100644 (file)
@@ -965,9 +965,7 @@ static void collapse_huge_page(struct mm_struct *mm,
                goto out_nolock;
        }
 
-       /* Do not oom kill for khugepaged charges */
-       if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
-                                          &memcg, true))) {
+       if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
                result = SCAN_CGROUP_CHARGE_FAIL;
                goto out_nolock;
        }
@@ -1326,9 +1324,7 @@ static void collapse_shmem(struct mm_struct *mm,
                goto out;
        }
 
-       /* Do not oom kill for khugepaged charges */
-       if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
-                                          &memcg, true))) {
+       if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
                result = SCAN_CGROUP_CHARGE_FAIL;
                goto out;
        }
@@ -1348,8 +1344,8 @@ static void collapse_shmem(struct mm_struct *mm,
         */
 
        index = start;
-       spin_lock_irq(&mapping->tree_lock);
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+       xa_lock_irq(&mapping->i_pages);
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                int n = min(iter.index, end) - index;
 
                /*
@@ -1362,7 +1358,7 @@ static void collapse_shmem(struct mm_struct *mm,
                }
                nr_none += n;
                for (; index < min(iter.index, end); index++) {
-                       radix_tree_insert(&mapping->page_tree, index,
+                       radix_tree_insert(&mapping->i_pages, index,
                                        new_page + (index % HPAGE_PMD_NR));
                }
 
@@ -1371,16 +1367,16 @@ static void collapse_shmem(struct mm_struct *mm,
                        break;
 
                page = radix_tree_deref_slot_protected(slot,
-                               &mapping->tree_lock);
+                               &mapping->i_pages.xa_lock);
                if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
-                       spin_unlock_irq(&mapping->tree_lock);
+                       xa_unlock_irq(&mapping->i_pages);
                        /* swap in or instantiate fallocated page */
                        if (shmem_getpage(mapping->host, index, &page,
                                                SGP_NOHUGE)) {
                                result = SCAN_FAIL;
                                goto tree_unlocked;
                        }
-                       spin_lock_irq(&mapping->tree_lock);
+                       xa_lock_irq(&mapping->i_pages);
                } else if (trylock_page(page)) {
                        get_page(page);
                } else {
@@ -1389,7 +1385,7 @@ static void collapse_shmem(struct mm_struct *mm,
                }
 
                /*
-                * The page must be locked, so we can drop the tree_lock
+                * The page must be locked, so we can drop the i_pages lock
                 * without racing with truncate.
                 */
                VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1400,7 +1396,7 @@ static void collapse_shmem(struct mm_struct *mm,
                        result = SCAN_TRUNCATED;
                        goto out_unlock;
                }
-               spin_unlock_irq(&mapping->tree_lock);
+               xa_unlock_irq(&mapping->i_pages);
 
                if (isolate_lru_page(page)) {
                        result = SCAN_DEL_PAGE_LRU;
@@ -1410,11 +1406,11 @@ static void collapse_shmem(struct mm_struct *mm,
                if (page_mapped(page))
                        unmap_mapping_pages(mapping, index, 1, false);
 
-               spin_lock_irq(&mapping->tree_lock);
+               xa_lock_irq(&mapping->i_pages);
 
-               slot = radix_tree_lookup_slot(&mapping->page_tree, index);
+               slot = radix_tree_lookup_slot(&mapping->i_pages, index);
                VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-                                       &mapping->tree_lock), page);
+                                       &mapping->i_pages.xa_lock), page);
                VM_BUG_ON_PAGE(page_mapped(page), page);
 
                /*
@@ -1435,14 +1431,14 @@ static void collapse_shmem(struct mm_struct *mm,
                list_add_tail(&page->lru, &pagelist);
 
                /* Finally, replace with the new page. */
-               radix_tree_replace_slot(&mapping->page_tree, slot,
+               radix_tree_replace_slot(&mapping->i_pages, slot,
                                new_page + (index % HPAGE_PMD_NR));
 
                slot = radix_tree_iter_resume(slot, &iter);
                index++;
                continue;
 out_lru:
-               spin_unlock_irq(&mapping->tree_lock);
+               xa_unlock_irq(&mapping->i_pages);
                putback_lru_page(page);
 out_isolate_failed:
                unlock_page(page);
@@ -1468,14 +1464,14 @@ out_unlock:
                }
 
                for (; index < end; index++) {
-                       radix_tree_insert(&mapping->page_tree, index,
+                       radix_tree_insert(&mapping->i_pages, index,
                                        new_page + (index % HPAGE_PMD_NR));
                }
                nr_none += n;
        }
 
 tree_locked:
-       spin_unlock_irq(&mapping->tree_lock);
+       xa_unlock_irq(&mapping->i_pages);
 tree_unlocked:
 
        if (result == SCAN_SUCCEED) {
@@ -1524,9 +1520,8 @@ tree_unlocked:
        } else {
                /* Something went wrong: rollback changes to the radix-tree */
                shmem_uncharge(mapping->host, nr_none);
-               spin_lock_irq(&mapping->tree_lock);
-               radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
-                               start) {
+               xa_lock_irq(&mapping->i_pages);
+               radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                        if (iter.index >= end)
                                break;
                        page = list_first_entry_or_null(&pagelist,
@@ -1536,8 +1531,7 @@ tree_unlocked:
                                        break;
                                nr_none--;
                                /* Put holes back where they were */
-                               radix_tree_delete(&mapping->page_tree,
-                                                 iter.index);
+                               radix_tree_delete(&mapping->i_pages, iter.index);
                                continue;
                        }
 
@@ -1546,16 +1540,15 @@ tree_unlocked:
                        /* Unfreeze the page. */
                        list_del(&page->lru);
                        page_ref_unfreeze(page, 2);
-                       radix_tree_replace_slot(&mapping->page_tree,
-                                               slot, page);
+                       radix_tree_replace_slot(&mapping->i_pages, slot, page);
                        slot = radix_tree_iter_resume(slot, &iter);
-                       spin_unlock_irq(&mapping->tree_lock);
+                       xa_unlock_irq(&mapping->i_pages);
                        putback_lru_page(page);
                        unlock_page(page);
-                       spin_lock_irq(&mapping->tree_lock);
+                       xa_lock_irq(&mapping->i_pages);
                }
                VM_BUG_ON(nr_none);
-               spin_unlock_irq(&mapping->tree_lock);
+               xa_unlock_irq(&mapping->i_pages);
 
                /* Unfreeze new_page, caller would take care about freeing it */
                page_ref_unfreeze(new_page, 1);
@@ -1583,7 +1576,7 @@ static void khugepaged_scan_shmem(struct mm_struct *mm,
        swap = 0;
        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        rcu_read_lock();
-       radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
+       radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
                if (iter.index >= start + HPAGE_PMD_NR)
                        break;
 
@@ -1883,8 +1876,16 @@ static void set_recommended_min_free_kbytes(void)
        int nr_zones = 0;
        unsigned long recommended_min;
 
-       for_each_populated_zone(zone)
+       for_each_populated_zone(zone) {
+               /*
+                * We don't need to worry about fragmentation of
+                * ZONE_MOVABLE since it only has movable pages.
+                */
+               if (zone_idx(zone) > gfp_zone(GFP_USER))
+                       continue;
+
                nr_zones++;
+       }
 
        /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
        recommended_min = pageblock_nr_pages * nr_zones * 2;