Merge tag 'for-linus-5.0' of git://github.com/cminyard/linux-ipmi
[sfrench/cifs-2.6.git] / mm / migrate.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pfn_t.h>
42 #include <linux/memremap.h>
43 #include <linux/userfaultfd_k.h>
44 #include <linux/balloon_compaction.h>
45 #include <linux/mmu_notifier.h>
46 #include <linux/page_idle.h>
47 #include <linux/page_owner.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ptrace.h>
50
51 #include <asm/tlbflush.h>
52
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/migrate.h>
55
56 #include "internal.h"
57
58 /*
59  * migrate_prep() needs to be called before we start compiling a list of pages
60  * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
61  * undesirable, use migrate_prep_local()
62  */
63 int migrate_prep(void)
64 {
65         /*
66          * Clear the LRU lists so pages can be isolated.
67          * Note that pages may be moved off the LRU after we have
68          * drained them. Those pages will fail to migrate like other
69          * pages that may be busy.
70          */
71         lru_add_drain_all();
72
73         return 0;
74 }
75
76 /* Do the necessary work of migrate_prep but not if it involves other CPUs */
77 int migrate_prep_local(void)
78 {
79         lru_add_drain();
80
81         return 0;
82 }
83
84 int isolate_movable_page(struct page *page, isolate_mode_t mode)
85 {
86         struct address_space *mapping;
87
88         /*
89          * Avoid burning cycles with pages that are yet under __free_pages(),
90          * or just got freed under us.
91          *
92          * In case we 'win' a race for a movable page being freed under us and
93          * raise its refcount preventing __free_pages() from doing its job
94          * the put_page() at the end of this block will take care of
95          * release this page, thus avoiding a nasty leakage.
96          */
97         if (unlikely(!get_page_unless_zero(page)))
98                 goto out;
99
100         /*
101          * Check PageMovable before holding a PG_lock because page's owner
102          * assumes anybody doesn't touch PG_lock of newly allocated page
103          * so unconditionally grapping the lock ruins page's owner side.
104          */
105         if (unlikely(!__PageMovable(page)))
106                 goto out_putpage;
107         /*
108          * As movable pages are not isolated from LRU lists, concurrent
109          * compaction threads can race against page migration functions
110          * as well as race against the releasing a page.
111          *
112          * In order to avoid having an already isolated movable page
113          * being (wrongly) re-isolated while it is under migration,
114          * or to avoid attempting to isolate pages being released,
115          * lets be sure we have the page lock
116          * before proceeding with the movable page isolation steps.
117          */
118         if (unlikely(!trylock_page(page)))
119                 goto out_putpage;
120
121         if (!PageMovable(page) || PageIsolated(page))
122                 goto out_no_isolated;
123
124         mapping = page_mapping(page);
125         VM_BUG_ON_PAGE(!mapping, page);
126
127         if (!mapping->a_ops->isolate_page(page, mode))
128                 goto out_no_isolated;
129
130         /* Driver shouldn't use PG_isolated bit of page->flags */
131         WARN_ON_ONCE(PageIsolated(page));
132         __SetPageIsolated(page);
133         unlock_page(page);
134
135         return 0;
136
137 out_no_isolated:
138         unlock_page(page);
139 out_putpage:
140         put_page(page);
141 out:
142         return -EBUSY;
143 }
144
145 /* It should be called on page which is PG_movable */
146 void putback_movable_page(struct page *page)
147 {
148         struct address_space *mapping;
149
150         VM_BUG_ON_PAGE(!PageLocked(page), page);
151         VM_BUG_ON_PAGE(!PageMovable(page), page);
152         VM_BUG_ON_PAGE(!PageIsolated(page), page);
153
154         mapping = page_mapping(page);
155         mapping->a_ops->putback_page(page);
156         __ClearPageIsolated(page);
157 }
158
159 /*
160  * Put previously isolated pages back onto the appropriate lists
161  * from where they were once taken off for compaction/migration.
162  *
163  * This function shall be used whenever the isolated pageset has been
164  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
165  * and isolate_huge_page().
166  */
167 void putback_movable_pages(struct list_head *l)
168 {
169         struct page *page;
170         struct page *page2;
171
172         list_for_each_entry_safe(page, page2, l, lru) {
173                 if (unlikely(PageHuge(page))) {
174                         putback_active_hugepage(page);
175                         continue;
176                 }
177                 list_del(&page->lru);
178                 /*
179                  * We isolated non-lru movable page so here we can use
180                  * __PageMovable because LRU page's mapping cannot have
181                  * PAGE_MAPPING_MOVABLE.
182                  */
183                 if (unlikely(__PageMovable(page))) {
184                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
185                         lock_page(page);
186                         if (PageMovable(page))
187                                 putback_movable_page(page);
188                         else
189                                 __ClearPageIsolated(page);
190                         unlock_page(page);
191                         put_page(page);
192                 } else {
193                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
194                                         page_is_file_cache(page), -hpage_nr_pages(page));
195                         putback_lru_page(page);
196                 }
197         }
198 }
199
200 /*
201  * Restore a potential migration pte to a working pte entry
202  */
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
204                                  unsigned long addr, void *old)
205 {
206         struct page_vma_mapped_walk pvmw = {
207                 .page = old,
208                 .vma = vma,
209                 .address = addr,
210                 .flags = PVMW_SYNC | PVMW_MIGRATION,
211         };
212         struct page *new;
213         pte_t pte;
214         swp_entry_t entry;
215
216         VM_BUG_ON_PAGE(PageTail(page), page);
217         while (page_vma_mapped_walk(&pvmw)) {
218                 if (PageKsm(page))
219                         new = page;
220                 else
221                         new = page - pvmw.page->index +
222                                 linear_page_index(vma, pvmw.address);
223
224 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
225                 /* PMD-mapped THP migration entry */
226                 if (!pvmw.pte) {
227                         VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
228                         remove_migration_pmd(&pvmw, new);
229                         continue;
230                 }
231 #endif
232
233                 get_page(new);
234                 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
235                 if (pte_swp_soft_dirty(*pvmw.pte))
236                         pte = pte_mksoft_dirty(pte);
237
238                 /*
239                  * Recheck VMA as permissions can change since migration started
240                  */
241                 entry = pte_to_swp_entry(*pvmw.pte);
242                 if (is_write_migration_entry(entry))
243                         pte = maybe_mkwrite(pte, vma);
244
245                 if (unlikely(is_zone_device_page(new))) {
246                         if (is_device_private_page(new)) {
247                                 entry = make_device_private_entry(new, pte_write(pte));
248                                 pte = swp_entry_to_pte(entry);
249                         } else if (is_device_public_page(new)) {
250                                 pte = pte_mkdevmap(pte);
251                                 flush_dcache_page(new);
252                         }
253                 } else
254                         flush_dcache_page(new);
255
256 #ifdef CONFIG_HUGETLB_PAGE
257                 if (PageHuge(new)) {
258                         pte = pte_mkhuge(pte);
259                         pte = arch_make_huge_pte(pte, vma, new, 0);
260                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
261                         if (PageAnon(new))
262                                 hugepage_add_anon_rmap(new, vma, pvmw.address);
263                         else
264                                 page_dup_rmap(new, true);
265                 } else
266 #endif
267                 {
268                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
269
270                         if (PageAnon(new))
271                                 page_add_anon_rmap(new, vma, pvmw.address, false);
272                         else
273                                 page_add_file_rmap(new, false);
274                 }
275                 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
276                         mlock_vma_page(new);
277
278                 if (PageTransHuge(page) && PageMlocked(page))
279                         clear_page_mlock(page);
280
281                 /* No need to invalidate - it was non-present before */
282                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
283         }
284
285         return true;
286 }
287
288 /*
289  * Get rid of all migration entries and replace them by
290  * references to the indicated page.
291  */
292 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
293 {
294         struct rmap_walk_control rwc = {
295                 .rmap_one = remove_migration_pte,
296                 .arg = old,
297         };
298
299         if (locked)
300                 rmap_walk_locked(new, &rwc);
301         else
302                 rmap_walk(new, &rwc);
303 }
304
305 /*
306  * Something used the pte of a page under migration. We need to
307  * get to the page and wait until migration is finished.
308  * When we return from this function the fault will be retried.
309  */
310 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
311                                 spinlock_t *ptl)
312 {
313         pte_t pte;
314         swp_entry_t entry;
315         struct page *page;
316
317         spin_lock(ptl);
318         pte = *ptep;
319         if (!is_swap_pte(pte))
320                 goto out;
321
322         entry = pte_to_swp_entry(pte);
323         if (!is_migration_entry(entry))
324                 goto out;
325
326         page = migration_entry_to_page(entry);
327
328         /*
329          * Once page cache replacement of page migration started, page_count
330          * is zero; but we must not call put_and_wait_on_page_locked() without
331          * a ref. Use get_page_unless_zero(), and just fault again if it fails.
332          */
333         if (!get_page_unless_zero(page))
334                 goto out;
335         pte_unmap_unlock(ptep, ptl);
336         put_and_wait_on_page_locked(page);
337         return;
338 out:
339         pte_unmap_unlock(ptep, ptl);
340 }
341
342 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
343                                 unsigned long address)
344 {
345         spinlock_t *ptl = pte_lockptr(mm, pmd);
346         pte_t *ptep = pte_offset_map(pmd, address);
347         __migration_entry_wait(mm, ptep, ptl);
348 }
349
350 void migration_entry_wait_huge(struct vm_area_struct *vma,
351                 struct mm_struct *mm, pte_t *pte)
352 {
353         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
354         __migration_entry_wait(mm, pte, ptl);
355 }
356
357 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
358 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
359 {
360         spinlock_t *ptl;
361         struct page *page;
362
363         ptl = pmd_lock(mm, pmd);
364         if (!is_pmd_migration_entry(*pmd))
365                 goto unlock;
366         page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
367         if (!get_page_unless_zero(page))
368                 goto unlock;
369         spin_unlock(ptl);
370         put_and_wait_on_page_locked(page);
371         return;
372 unlock:
373         spin_unlock(ptl);
374 }
375 #endif
376
377 static int expected_page_refs(struct page *page)
378 {
379         int expected_count = 1;
380
381         /*
382          * Device public or private pages have an extra refcount as they are
383          * ZONE_DEVICE pages.
384          */
385         expected_count += is_device_private_page(page);
386         expected_count += is_device_public_page(page);
387         if (page_mapping(page))
388                 expected_count += hpage_nr_pages(page) + page_has_private(page);
389
390         return expected_count;
391 }
392
393 /*
394  * Replace the page in the mapping.
395  *
396  * The number of remaining references must be:
397  * 1 for anonymous pages without a mapping
398  * 2 for pages with a mapping
399  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
400  */
401 int migrate_page_move_mapping(struct address_space *mapping,
402                 struct page *newpage, struct page *page, enum migrate_mode mode,
403                 int extra_count)
404 {
405         XA_STATE(xas, &mapping->i_pages, page_index(page));
406         struct zone *oldzone, *newzone;
407         int dirty;
408         int expected_count = expected_page_refs(page) + extra_count;
409
410         if (!mapping) {
411                 /* Anonymous page without mapping */
412                 if (page_count(page) != expected_count)
413                         return -EAGAIN;
414
415                 /* No turning back from here */
416                 newpage->index = page->index;
417                 newpage->mapping = page->mapping;
418                 if (PageSwapBacked(page))
419                         __SetPageSwapBacked(newpage);
420
421                 return MIGRATEPAGE_SUCCESS;
422         }
423
424         oldzone = page_zone(page);
425         newzone = page_zone(newpage);
426
427         xas_lock_irq(&xas);
428         if (page_count(page) != expected_count || xas_load(&xas) != page) {
429                 xas_unlock_irq(&xas);
430                 return -EAGAIN;
431         }
432
433         if (!page_ref_freeze(page, expected_count)) {
434                 xas_unlock_irq(&xas);
435                 return -EAGAIN;
436         }
437
438         /*
439          * Now we know that no one else is looking at the page:
440          * no turning back from here.
441          */
442         newpage->index = page->index;
443         newpage->mapping = page->mapping;
444         page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
445         if (PageSwapBacked(page)) {
446                 __SetPageSwapBacked(newpage);
447                 if (PageSwapCache(page)) {
448                         SetPageSwapCache(newpage);
449                         set_page_private(newpage, page_private(page));
450                 }
451         } else {
452                 VM_BUG_ON_PAGE(PageSwapCache(page), page);
453         }
454
455         /* Move dirty while page refs frozen and newpage not yet exposed */
456         dirty = PageDirty(page);
457         if (dirty) {
458                 ClearPageDirty(page);
459                 SetPageDirty(newpage);
460         }
461
462         xas_store(&xas, newpage);
463         if (PageTransHuge(page)) {
464                 int i;
465
466                 for (i = 1; i < HPAGE_PMD_NR; i++) {
467                         xas_next(&xas);
468                         xas_store(&xas, newpage + i);
469                 }
470         }
471
472         /*
473          * Drop cache reference from old page by unfreezing
474          * to one less reference.
475          * We know this isn't the last reference.
476          */
477         page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
478
479         xas_unlock(&xas);
480         /* Leave irq disabled to prevent preemption while updating stats */
481
482         /*
483          * If moved to a different zone then also account
484          * the page for that zone. Other VM counters will be
485          * taken care of when we establish references to the
486          * new page and drop references to the old page.
487          *
488          * Note that anonymous pages are accounted for
489          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
490          * are mapped to swap space.
491          */
492         if (newzone != oldzone) {
493                 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
494                 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
495                 if (PageSwapBacked(page) && !PageSwapCache(page)) {
496                         __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
497                         __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
498                 }
499                 if (dirty && mapping_cap_account_dirty(mapping)) {
500                         __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
501                         __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
502                         __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
503                         __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
504                 }
505         }
506         local_irq_enable();
507
508         return MIGRATEPAGE_SUCCESS;
509 }
510 EXPORT_SYMBOL(migrate_page_move_mapping);
511
512 /*
513  * The expected number of remaining references is the same as that
514  * of migrate_page_move_mapping().
515  */
516 int migrate_huge_page_move_mapping(struct address_space *mapping,
517                                    struct page *newpage, struct page *page)
518 {
519         XA_STATE(xas, &mapping->i_pages, page_index(page));
520         int expected_count;
521
522         xas_lock_irq(&xas);
523         expected_count = 2 + page_has_private(page);
524         if (page_count(page) != expected_count || xas_load(&xas) != page) {
525                 xas_unlock_irq(&xas);
526                 return -EAGAIN;
527         }
528
529         if (!page_ref_freeze(page, expected_count)) {
530                 xas_unlock_irq(&xas);
531                 return -EAGAIN;
532         }
533
534         newpage->index = page->index;
535         newpage->mapping = page->mapping;
536
537         get_page(newpage);
538
539         xas_store(&xas, newpage);
540
541         page_ref_unfreeze(page, expected_count - 1);
542
543         xas_unlock_irq(&xas);
544
545         return MIGRATEPAGE_SUCCESS;
546 }
547
548 /*
549  * Gigantic pages are so large that we do not guarantee that page++ pointer
550  * arithmetic will work across the entire page.  We need something more
551  * specialized.
552  */
553 static void __copy_gigantic_page(struct page *dst, struct page *src,
554                                 int nr_pages)
555 {
556         int i;
557         struct page *dst_base = dst;
558         struct page *src_base = src;
559
560         for (i = 0; i < nr_pages; ) {
561                 cond_resched();
562                 copy_highpage(dst, src);
563
564                 i++;
565                 dst = mem_map_next(dst, dst_base, i);
566                 src = mem_map_next(src, src_base, i);
567         }
568 }
569
570 static void copy_huge_page(struct page *dst, struct page *src)
571 {
572         int i;
573         int nr_pages;
574
575         if (PageHuge(src)) {
576                 /* hugetlbfs page */
577                 struct hstate *h = page_hstate(src);
578                 nr_pages = pages_per_huge_page(h);
579
580                 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
581                         __copy_gigantic_page(dst, src, nr_pages);
582                         return;
583                 }
584         } else {
585                 /* thp page */
586                 BUG_ON(!PageTransHuge(src));
587                 nr_pages = hpage_nr_pages(src);
588         }
589
590         for (i = 0; i < nr_pages; i++) {
591                 cond_resched();
592                 copy_highpage(dst + i, src + i);
593         }
594 }
595
596 /*
597  * Copy the page to its new location
598  */
599 void migrate_page_states(struct page *newpage, struct page *page)
600 {
601         int cpupid;
602
603         if (PageError(page))
604                 SetPageError(newpage);
605         if (PageReferenced(page))
606                 SetPageReferenced(newpage);
607         if (PageUptodate(page))
608                 SetPageUptodate(newpage);
609         if (TestClearPageActive(page)) {
610                 VM_BUG_ON_PAGE(PageUnevictable(page), page);
611                 SetPageActive(newpage);
612         } else if (TestClearPageUnevictable(page))
613                 SetPageUnevictable(newpage);
614         if (PageWorkingset(page))
615                 SetPageWorkingset(newpage);
616         if (PageChecked(page))
617                 SetPageChecked(newpage);
618         if (PageMappedToDisk(page))
619                 SetPageMappedToDisk(newpage);
620
621         /* Move dirty on pages not done by migrate_page_move_mapping() */
622         if (PageDirty(page))
623                 SetPageDirty(newpage);
624
625         if (page_is_young(page))
626                 set_page_young(newpage);
627         if (page_is_idle(page))
628                 set_page_idle(newpage);
629
630         /*
631          * Copy NUMA information to the new page, to prevent over-eager
632          * future migrations of this same page.
633          */
634         cpupid = page_cpupid_xchg_last(page, -1);
635         page_cpupid_xchg_last(newpage, cpupid);
636
637         ksm_migrate_page(newpage, page);
638         /*
639          * Please do not reorder this without considering how mm/ksm.c's
640          * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
641          */
642         if (PageSwapCache(page))
643                 ClearPageSwapCache(page);
644         ClearPagePrivate(page);
645         set_page_private(page, 0);
646
647         /*
648          * If any waiters have accumulated on the new page then
649          * wake them up.
650          */
651         if (PageWriteback(newpage))
652                 end_page_writeback(newpage);
653
654         copy_page_owner(page, newpage);
655
656         mem_cgroup_migrate(page, newpage);
657 }
658 EXPORT_SYMBOL(migrate_page_states);
659
660 void migrate_page_copy(struct page *newpage, struct page *page)
661 {
662         if (PageHuge(page) || PageTransHuge(page))
663                 copy_huge_page(newpage, page);
664         else
665                 copy_highpage(newpage, page);
666
667         migrate_page_states(newpage, page);
668 }
669 EXPORT_SYMBOL(migrate_page_copy);
670
671 /************************************************************
672  *                    Migration functions
673  ***********************************************************/
674
675 /*
676  * Common logic to directly migrate a single LRU page suitable for
677  * pages that do not use PagePrivate/PagePrivate2.
678  *
679  * Pages are locked upon entry and exit.
680  */
681 int migrate_page(struct address_space *mapping,
682                 struct page *newpage, struct page *page,
683                 enum migrate_mode mode)
684 {
685         int rc;
686
687         BUG_ON(PageWriteback(page));    /* Writeback must be complete */
688
689         rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
690
691         if (rc != MIGRATEPAGE_SUCCESS)
692                 return rc;
693
694         if (mode != MIGRATE_SYNC_NO_COPY)
695                 migrate_page_copy(newpage, page);
696         else
697                 migrate_page_states(newpage, page);
698         return MIGRATEPAGE_SUCCESS;
699 }
700 EXPORT_SYMBOL(migrate_page);
701
702 #ifdef CONFIG_BLOCK
703 /* Returns true if all buffers are successfully locked */
704 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
705                                                         enum migrate_mode mode)
706 {
707         struct buffer_head *bh = head;
708
709         /* Simple case, sync compaction */
710         if (mode != MIGRATE_ASYNC) {
711                 do {
712                         get_bh(bh);
713                         lock_buffer(bh);
714                         bh = bh->b_this_page;
715
716                 } while (bh != head);
717
718                 return true;
719         }
720
721         /* async case, we cannot block on lock_buffer so use trylock_buffer */
722         do {
723                 get_bh(bh);
724                 if (!trylock_buffer(bh)) {
725                         /*
726                          * We failed to lock the buffer and cannot stall in
727                          * async migration. Release the taken locks
728                          */
729                         struct buffer_head *failed_bh = bh;
730                         put_bh(failed_bh);
731                         bh = head;
732                         while (bh != failed_bh) {
733                                 unlock_buffer(bh);
734                                 put_bh(bh);
735                                 bh = bh->b_this_page;
736                         }
737                         return false;
738                 }
739
740                 bh = bh->b_this_page;
741         } while (bh != head);
742         return true;
743 }
744
745 static int __buffer_migrate_page(struct address_space *mapping,
746                 struct page *newpage, struct page *page, enum migrate_mode mode,
747                 bool check_refs)
748 {
749         struct buffer_head *bh, *head;
750         int rc;
751         int expected_count;
752
753         if (!page_has_buffers(page))
754                 return migrate_page(mapping, newpage, page, mode);
755
756         /* Check whether page does not have extra refs before we do more work */
757         expected_count = expected_page_refs(page);
758         if (page_count(page) != expected_count)
759                 return -EAGAIN;
760
761         head = page_buffers(page);
762         if (!buffer_migrate_lock_buffers(head, mode))
763                 return -EAGAIN;
764
765         if (check_refs) {
766                 bool busy;
767                 bool invalidated = false;
768
769 recheck_buffers:
770                 busy = false;
771                 spin_lock(&mapping->private_lock);
772                 bh = head;
773                 do {
774                         if (atomic_read(&bh->b_count)) {
775                                 busy = true;
776                                 break;
777                         }
778                         bh = bh->b_this_page;
779                 } while (bh != head);
780                 spin_unlock(&mapping->private_lock);
781                 if (busy) {
782                         if (invalidated) {
783                                 rc = -EAGAIN;
784                                 goto unlock_buffers;
785                         }
786                         invalidate_bh_lrus();
787                         invalidated = true;
788                         goto recheck_buffers;
789                 }
790         }
791
792         rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
793         if (rc != MIGRATEPAGE_SUCCESS)
794                 goto unlock_buffers;
795
796         ClearPagePrivate(page);
797         set_page_private(newpage, page_private(page));
798         set_page_private(page, 0);
799         put_page(page);
800         get_page(newpage);
801
802         bh = head;
803         do {
804                 set_bh_page(bh, newpage, bh_offset(bh));
805                 bh = bh->b_this_page;
806
807         } while (bh != head);
808
809         SetPagePrivate(newpage);
810
811         if (mode != MIGRATE_SYNC_NO_COPY)
812                 migrate_page_copy(newpage, page);
813         else
814                 migrate_page_states(newpage, page);
815
816         rc = MIGRATEPAGE_SUCCESS;
817 unlock_buffers:
818         bh = head;
819         do {
820                 unlock_buffer(bh);
821                 put_bh(bh);
822                 bh = bh->b_this_page;
823
824         } while (bh != head);
825
826         return rc;
827 }
828
829 /*
830  * Migration function for pages with buffers. This function can only be used
831  * if the underlying filesystem guarantees that no other references to "page"
832  * exist. For example attached buffer heads are accessed only under page lock.
833  */
834 int buffer_migrate_page(struct address_space *mapping,
835                 struct page *newpage, struct page *page, enum migrate_mode mode)
836 {
837         return __buffer_migrate_page(mapping, newpage, page, mode, false);
838 }
839 EXPORT_SYMBOL(buffer_migrate_page);
840
841 /*
842  * Same as above except that this variant is more careful and checks that there
843  * are also no buffer head references. This function is the right one for
844  * mappings where buffer heads are directly looked up and referenced (such as
845  * block device mappings).
846  */
847 int buffer_migrate_page_norefs(struct address_space *mapping,
848                 struct page *newpage, struct page *page, enum migrate_mode mode)
849 {
850         return __buffer_migrate_page(mapping, newpage, page, mode, true);
851 }
852 #endif
853
854 /*
855  * Writeback a page to clean the dirty state
856  */
857 static int writeout(struct address_space *mapping, struct page *page)
858 {
859         struct writeback_control wbc = {
860                 .sync_mode = WB_SYNC_NONE,
861                 .nr_to_write = 1,
862                 .range_start = 0,
863                 .range_end = LLONG_MAX,
864                 .for_reclaim = 1
865         };
866         int rc;
867
868         if (!mapping->a_ops->writepage)
869                 /* No write method for the address space */
870                 return -EINVAL;
871
872         if (!clear_page_dirty_for_io(page))
873                 /* Someone else already triggered a write */
874                 return -EAGAIN;
875
876         /*
877          * A dirty page may imply that the underlying filesystem has
878          * the page on some queue. So the page must be clean for
879          * migration. Writeout may mean we loose the lock and the
880          * page state is no longer what we checked for earlier.
881          * At this point we know that the migration attempt cannot
882          * be successful.
883          */
884         remove_migration_ptes(page, page, false);
885
886         rc = mapping->a_ops->writepage(page, &wbc);
887
888         if (rc != AOP_WRITEPAGE_ACTIVATE)
889                 /* unlocked. Relock */
890                 lock_page(page);
891
892         return (rc < 0) ? -EIO : -EAGAIN;
893 }
894
895 /*
896  * Default handling if a filesystem does not provide a migration function.
897  */
898 static int fallback_migrate_page(struct address_space *mapping,
899         struct page *newpage, struct page *page, enum migrate_mode mode)
900 {
901         if (PageDirty(page)) {
902                 /* Only writeback pages in full synchronous migration */
903                 switch (mode) {
904                 case MIGRATE_SYNC:
905                 case MIGRATE_SYNC_NO_COPY:
906                         break;
907                 default:
908                         return -EBUSY;
909                 }
910                 return writeout(mapping, page);
911         }
912
913         /*
914          * Buffers may be managed in a filesystem specific way.
915          * We must have no buffers or drop them.
916          */
917         if (page_has_private(page) &&
918             !try_to_release_page(page, GFP_KERNEL))
919                 return -EAGAIN;
920
921         return migrate_page(mapping, newpage, page, mode);
922 }
923
924 /*
925  * Move a page to a newly allocated page
926  * The page is locked and all ptes have been successfully removed.
927  *
928  * The new page will have replaced the old page if this function
929  * is successful.
930  *
931  * Return value:
932  *   < 0 - error code
933  *  MIGRATEPAGE_SUCCESS - success
934  */
935 static int move_to_new_page(struct page *newpage, struct page *page,
936                                 enum migrate_mode mode)
937 {
938         struct address_space *mapping;
939         int rc = -EAGAIN;
940         bool is_lru = !__PageMovable(page);
941
942         VM_BUG_ON_PAGE(!PageLocked(page), page);
943         VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
944
945         mapping = page_mapping(page);
946
947         if (likely(is_lru)) {
948                 if (!mapping)
949                         rc = migrate_page(mapping, newpage, page, mode);
950                 else if (mapping->a_ops->migratepage)
951                         /*
952                          * Most pages have a mapping and most filesystems
953                          * provide a migratepage callback. Anonymous pages
954                          * are part of swap space which also has its own
955                          * migratepage callback. This is the most common path
956                          * for page migration.
957                          */
958                         rc = mapping->a_ops->migratepage(mapping, newpage,
959                                                         page, mode);
960                 else
961                         rc = fallback_migrate_page(mapping, newpage,
962                                                         page, mode);
963         } else {
964                 /*
965                  * In case of non-lru page, it could be released after
966                  * isolation step. In that case, we shouldn't try migration.
967                  */
968                 VM_BUG_ON_PAGE(!PageIsolated(page), page);
969                 if (!PageMovable(page)) {
970                         rc = MIGRATEPAGE_SUCCESS;
971                         __ClearPageIsolated(page);
972                         goto out;
973                 }
974
975                 rc = mapping->a_ops->migratepage(mapping, newpage,
976                                                 page, mode);
977                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
978                         !PageIsolated(page));
979         }
980
981         /*
982          * When successful, old pagecache page->mapping must be cleared before
983          * page is freed; but stats require that PageAnon be left as PageAnon.
984          */
985         if (rc == MIGRATEPAGE_SUCCESS) {
986                 if (__PageMovable(page)) {
987                         VM_BUG_ON_PAGE(!PageIsolated(page), page);
988
989                         /*
990                          * We clear PG_movable under page_lock so any compactor
991                          * cannot try to migrate this page.
992                          */
993                         __ClearPageIsolated(page);
994                 }
995
996                 /*
997                  * Anonymous and movable page->mapping will be cleard by
998                  * free_pages_prepare so don't reset it here for keeping
999                  * the type to work PageAnon, for example.
1000                  */
1001                 if (!PageMappingFlags(page))
1002                         page->mapping = NULL;
1003         }
1004 out:
1005         return rc;
1006 }
1007
1008 static int __unmap_and_move(struct page *page, struct page *newpage,
1009                                 int force, enum migrate_mode mode)
1010 {
1011         int rc = -EAGAIN;
1012         int page_was_mapped = 0;
1013         struct anon_vma *anon_vma = NULL;
1014         bool is_lru = !__PageMovable(page);
1015
1016         if (!trylock_page(page)) {
1017                 if (!force || mode == MIGRATE_ASYNC)
1018                         goto out;
1019
1020                 /*
1021                  * It's not safe for direct compaction to call lock_page.
1022                  * For example, during page readahead pages are added locked
1023                  * to the LRU. Later, when the IO completes the pages are
1024                  * marked uptodate and unlocked. However, the queueing
1025                  * could be merging multiple pages for one bio (e.g.
1026                  * mpage_readpages). If an allocation happens for the
1027                  * second or third page, the process can end up locking
1028                  * the same page twice and deadlocking. Rather than
1029                  * trying to be clever about what pages can be locked,
1030                  * avoid the use of lock_page for direct compaction
1031                  * altogether.
1032                  */
1033                 if (current->flags & PF_MEMALLOC)
1034                         goto out;
1035
1036                 lock_page(page);
1037         }
1038
1039         if (PageWriteback(page)) {
1040                 /*
1041                  * Only in the case of a full synchronous migration is it
1042                  * necessary to wait for PageWriteback. In the async case,
1043                  * the retry loop is too short and in the sync-light case,
1044                  * the overhead of stalling is too much
1045                  */
1046                 switch (mode) {
1047                 case MIGRATE_SYNC:
1048                 case MIGRATE_SYNC_NO_COPY:
1049                         break;
1050                 default:
1051                         rc = -EBUSY;
1052                         goto out_unlock;
1053                 }
1054                 if (!force)
1055                         goto out_unlock;
1056                 wait_on_page_writeback(page);
1057         }
1058
1059         /*
1060          * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
1061          * we cannot notice that anon_vma is freed while we migrates a page.
1062          * This get_anon_vma() delays freeing anon_vma pointer until the end
1063          * of migration. File cache pages are no problem because of page_lock()
1064          * File Caches may use write_page() or lock_page() in migration, then,
1065          * just care Anon page here.
1066          *
1067          * Only page_get_anon_vma() understands the subtleties of
1068          * getting a hold on an anon_vma from outside one of its mms.
1069          * But if we cannot get anon_vma, then we won't need it anyway,
1070          * because that implies that the anon page is no longer mapped
1071          * (and cannot be remapped so long as we hold the page lock).
1072          */
1073         if (PageAnon(page) && !PageKsm(page))
1074                 anon_vma = page_get_anon_vma(page);
1075
1076         /*
1077          * Block others from accessing the new page when we get around to
1078          * establishing additional references. We are usually the only one
1079          * holding a reference to newpage at this point. We used to have a BUG
1080          * here if trylock_page(newpage) fails, but would like to allow for
1081          * cases where there might be a race with the previous use of newpage.
1082          * This is much like races on refcount of oldpage: just don't BUG().
1083          */
1084         if (unlikely(!trylock_page(newpage)))
1085                 goto out_unlock;
1086
1087         if (unlikely(!is_lru)) {
1088                 rc = move_to_new_page(newpage, page, mode);
1089                 goto out_unlock_both;
1090         }
1091
1092         /*
1093          * Corner case handling:
1094          * 1. When a new swap-cache page is read into, it is added to the LRU
1095          * and treated as swapcache but it has no rmap yet.
1096          * Calling try_to_unmap() against a page->mapping==NULL page will
1097          * trigger a BUG.  So handle it here.
1098          * 2. An orphaned page (see truncate_complete_page) might have
1099          * fs-private metadata. The page can be picked up due to memory
1100          * offlining.  Everywhere else except page reclaim, the page is
1101          * invisible to the vm, so the page can not be migrated.  So try to
1102          * free the metadata, so the page can be freed.
1103          */
1104         if (!page->mapping) {
1105                 VM_BUG_ON_PAGE(PageAnon(page), page);
1106                 if (page_has_private(page)) {
1107                         try_to_free_buffers(page);
1108                         goto out_unlock_both;
1109                 }
1110         } else if (page_mapped(page)) {
1111                 /* Establish migration ptes */
1112                 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1113                                 page);
1114                 try_to_unmap(page,
1115                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1116                 page_was_mapped = 1;
1117         }
1118
1119         if (!page_mapped(page))
1120                 rc = move_to_new_page(newpage, page, mode);
1121
1122         if (page_was_mapped)
1123                 remove_migration_ptes(page,
1124                         rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1125
1126 out_unlock_both:
1127         unlock_page(newpage);
1128 out_unlock:
1129         /* Drop an anon_vma reference if we took one */
1130         if (anon_vma)
1131                 put_anon_vma(anon_vma);
1132         unlock_page(page);
1133 out:
1134         /*
1135          * If migration is successful, decrease refcount of the newpage
1136          * which will not free the page because new page owner increased
1137          * refcounter. As well, if it is LRU page, add the page to LRU
1138          * list in here.
1139          */
1140         if (rc == MIGRATEPAGE_SUCCESS) {
1141                 if (unlikely(__PageMovable(newpage)))
1142                         put_page(newpage);
1143                 else
1144                         putback_lru_page(newpage);
1145         }
1146
1147         return rc;
1148 }
1149
1150 /*
1151  * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
1152  * around it.
1153  */
1154 #if defined(CONFIG_ARM) && \
1155         defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
1156 #define ICE_noinline noinline
1157 #else
1158 #define ICE_noinline
1159 #endif
1160
1161 /*
1162  * Obtain the lock on page, remove all ptes and migrate the page
1163  * to the newly allocated page in newpage.
1164  */
1165 static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1166                                    free_page_t put_new_page,
1167                                    unsigned long private, struct page *page,
1168                                    int force, enum migrate_mode mode,
1169                                    enum migrate_reason reason)
1170 {
1171         int rc = MIGRATEPAGE_SUCCESS;
1172         struct page *newpage;
1173
1174         if (!thp_migration_supported() && PageTransHuge(page))
1175                 return -ENOMEM;
1176
1177         newpage = get_new_page(page, private);
1178         if (!newpage)
1179                 return -ENOMEM;
1180
1181         if (page_count(page) == 1) {
1182                 /* page was freed from under us. So we are done. */
1183                 ClearPageActive(page);
1184                 ClearPageUnevictable(page);
1185                 if (unlikely(__PageMovable(page))) {
1186                         lock_page(page);
1187                         if (!PageMovable(page))
1188                                 __ClearPageIsolated(page);
1189                         unlock_page(page);
1190                 }
1191                 if (put_new_page)
1192                         put_new_page(newpage, private);
1193                 else
1194                         put_page(newpage);
1195                 goto out;
1196         }
1197
1198         rc = __unmap_and_move(page, newpage, force, mode);
1199         if (rc == MIGRATEPAGE_SUCCESS)
1200                 set_page_owner_migrate_reason(newpage, reason);
1201
1202 out:
1203         if (rc != -EAGAIN) {
1204                 /*
1205                  * A page that has been migrated has all references
1206                  * removed and will be freed. A page that has not been
1207                  * migrated will have kepts its references and be
1208                  * restored.
1209                  */
1210                 list_del(&page->lru);
1211
1212                 /*
1213                  * Compaction can migrate also non-LRU pages which are
1214                  * not accounted to NR_ISOLATED_*. They can be recognized
1215                  * as __PageMovable
1216                  */
1217                 if (likely(!__PageMovable(page)))
1218                         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1219                                         page_is_file_cache(page), -hpage_nr_pages(page));
1220         }
1221
1222         /*
1223          * If migration is successful, releases reference grabbed during
1224          * isolation. Otherwise, restore the page to right list unless
1225          * we want to retry.
1226          */
1227         if (rc == MIGRATEPAGE_SUCCESS) {
1228                 put_page(page);
1229                 if (reason == MR_MEMORY_FAILURE) {
1230                         /*
1231                          * Set PG_HWPoison on just freed page
1232                          * intentionally. Although it's rather weird,
1233                          * it's how HWPoison flag works at the moment.
1234                          */
1235                         if (set_hwpoison_free_buddy_page(page))
1236                                 num_poisoned_pages_inc();
1237                 }
1238         } else {
1239                 if (rc != -EAGAIN) {
1240                         if (likely(!__PageMovable(page))) {
1241                                 putback_lru_page(page);
1242                                 goto put_new;
1243                         }
1244
1245                         lock_page(page);
1246                         if (PageMovable(page))
1247                                 putback_movable_page(page);
1248                         else
1249                                 __ClearPageIsolated(page);
1250                         unlock_page(page);
1251                         put_page(page);
1252                 }
1253 put_new:
1254                 if (put_new_page)
1255                         put_new_page(newpage, private);
1256                 else
1257                         put_page(newpage);
1258         }
1259
1260         return rc;
1261 }
1262
1263 /*
1264  * Counterpart of unmap_and_move_page() for hugepage migration.
1265  *
1266  * This function doesn't wait the completion of hugepage I/O
1267  * because there is no race between I/O and migration for hugepage.
1268  * Note that currently hugepage I/O occurs only in direct I/O
1269  * where no lock is held and PG_writeback is irrelevant,
1270  * and writeback status of all subpages are counted in the reference
1271  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1272  * under direct I/O, the reference of the head page is 512 and a bit more.)
1273  * This means that when we try to migrate hugepage whose subpages are
1274  * doing direct I/O, some references remain after try_to_unmap() and
1275  * hugepage migration fails without data corruption.
1276  *
1277  * There is also no race when direct I/O is issued on the page under migration,
1278  * because then pte is replaced with migration swap entry and direct I/O code
1279  * will wait in the page fault for migration to complete.
1280  */
1281 static int unmap_and_move_huge_page(new_page_t get_new_page,
1282                                 free_page_t put_new_page, unsigned long private,
1283                                 struct page *hpage, int force,
1284                                 enum migrate_mode mode, int reason)
1285 {
1286         int rc = -EAGAIN;
1287         int page_was_mapped = 0;
1288         struct page *new_hpage;
1289         struct anon_vma *anon_vma = NULL;
1290
1291         /*
1292          * Movability of hugepages depends on architectures and hugepage size.
1293          * This check is necessary because some callers of hugepage migration
1294          * like soft offline and memory hotremove don't walk through page
1295          * tables or check whether the hugepage is pmd-based or not before
1296          * kicking migration.
1297          */
1298         if (!hugepage_migration_supported(page_hstate(hpage))) {
1299                 putback_active_hugepage(hpage);
1300                 return -ENOSYS;
1301         }
1302
1303         new_hpage = get_new_page(hpage, private);
1304         if (!new_hpage)
1305                 return -ENOMEM;
1306
1307         if (!trylock_page(hpage)) {
1308                 if (!force)
1309                         goto out;
1310                 switch (mode) {
1311                 case MIGRATE_SYNC:
1312                 case MIGRATE_SYNC_NO_COPY:
1313                         break;
1314                 default:
1315                         goto out;
1316                 }
1317                 lock_page(hpage);
1318         }
1319
1320         if (PageAnon(hpage))
1321                 anon_vma = page_get_anon_vma(hpage);
1322
1323         if (unlikely(!trylock_page(new_hpage)))
1324                 goto put_anon;
1325
1326         if (page_mapped(hpage)) {
1327                 try_to_unmap(hpage,
1328                         TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1329                 page_was_mapped = 1;
1330         }
1331
1332         if (!page_mapped(hpage))
1333                 rc = move_to_new_page(new_hpage, hpage, mode);
1334
1335         if (page_was_mapped)
1336                 remove_migration_ptes(hpage,
1337                         rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1338
1339         unlock_page(new_hpage);
1340
1341 put_anon:
1342         if (anon_vma)
1343                 put_anon_vma(anon_vma);
1344
1345         if (rc == MIGRATEPAGE_SUCCESS) {
1346                 move_hugetlb_state(hpage, new_hpage, reason);
1347                 put_new_page = NULL;
1348         }
1349
1350         unlock_page(hpage);
1351 out:
1352         if (rc != -EAGAIN)
1353                 putback_active_hugepage(hpage);
1354
1355         /*
1356          * If migration was not successful and there's a freeing callback, use
1357          * it.  Otherwise, put_page() will drop the reference grabbed during
1358          * isolation.
1359          */
1360         if (put_new_page)
1361                 put_new_page(new_hpage, private);
1362         else
1363                 putback_active_hugepage(new_hpage);
1364
1365         return rc;
1366 }
1367
1368 /*
1369  * migrate_pages - migrate the pages specified in a list, to the free pages
1370  *                 supplied as the target for the page migration
1371  *
1372  * @from:               The list of pages to be migrated.
1373  * @get_new_page:       The function used to allocate free pages to be used
1374  *                      as the target of the page migration.
1375  * @put_new_page:       The function used to free target pages if migration
1376  *                      fails, or NULL if no special handling is necessary.
1377  * @private:            Private data to be passed on to get_new_page()
1378  * @mode:               The migration mode that specifies the constraints for
1379  *                      page migration, if any.
1380  * @reason:             The reason for page migration.
1381  *
1382  * The function returns after 10 attempts or if no pages are movable any more
1383  * because the list has become empty or no retryable pages exist any more.
1384  * The caller should call putback_movable_pages() to return pages to the LRU
1385  * or free list only if ret != 0.
1386  *
1387  * Returns the number of pages that were not migrated, or an error code.
1388  */
1389 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1390                 free_page_t put_new_page, unsigned long private,
1391                 enum migrate_mode mode, int reason)
1392 {
1393         int retry = 1;
1394         int nr_failed = 0;
1395         int nr_succeeded = 0;
1396         int pass = 0;
1397         struct page *page;
1398         struct page *page2;
1399         int swapwrite = current->flags & PF_SWAPWRITE;
1400         int rc;
1401
1402         if (!swapwrite)
1403                 current->flags |= PF_SWAPWRITE;
1404
1405         for(pass = 0; pass < 10 && retry; pass++) {
1406                 retry = 0;
1407
1408                 list_for_each_entry_safe(page, page2, from, lru) {
1409 retry:
1410                         cond_resched();
1411
1412                         if (PageHuge(page))
1413                                 rc = unmap_and_move_huge_page(get_new_page,
1414                                                 put_new_page, private, page,
1415                                                 pass > 2, mode, reason);
1416                         else
1417                                 rc = unmap_and_move(get_new_page, put_new_page,
1418                                                 private, page, pass > 2, mode,
1419                                                 reason);
1420
1421                         switch(rc) {
1422                         case -ENOMEM:
1423                                 /*
1424                                  * THP migration might be unsupported or the
1425                                  * allocation could've failed so we should
1426                                  * retry on the same page with the THP split
1427                                  * to base pages.
1428                                  *
1429                                  * Head page is retried immediately and tail
1430                                  * pages are added to the tail of the list so
1431                                  * we encounter them after the rest of the list
1432                                  * is processed.
1433                                  */
1434                                 if (PageTransHuge(page) && !PageHuge(page)) {
1435                                         lock_page(page);
1436                                         rc = split_huge_page_to_list(page, from);
1437                                         unlock_page(page);
1438                                         if (!rc) {
1439                                                 list_safe_reset_next(page, page2, lru);
1440                                                 goto retry;
1441                                         }
1442                                 }
1443                                 nr_failed++;
1444                                 goto out;
1445                         case -EAGAIN:
1446                                 retry++;
1447                                 break;
1448                         case MIGRATEPAGE_SUCCESS:
1449                                 nr_succeeded++;
1450                                 break;
1451                         default:
1452                                 /*
1453                                  * Permanent failure (-EBUSY, -ENOSYS, etc.):
1454                                  * unlike -EAGAIN case, the failed page is
1455                                  * removed from migration page list and not
1456                                  * retried in the next outer loop.
1457                                  */
1458                                 nr_failed++;
1459                                 break;
1460                         }
1461                 }
1462         }
1463         nr_failed += retry;
1464         rc = nr_failed;
1465 out:
1466         if (nr_succeeded)
1467                 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1468         if (nr_failed)
1469                 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1470         trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1471
1472         if (!swapwrite)
1473                 current->flags &= ~PF_SWAPWRITE;
1474
1475         return rc;
1476 }
1477
1478 #ifdef CONFIG_NUMA
1479
1480 static int store_status(int __user *status, int start, int value, int nr)
1481 {
1482         while (nr-- > 0) {
1483                 if (put_user(value, status + start))
1484                         return -EFAULT;
1485                 start++;
1486         }
1487
1488         return 0;
1489 }
1490
1491 static int do_move_pages_to_node(struct mm_struct *mm,
1492                 struct list_head *pagelist, int node)
1493 {
1494         int err;
1495
1496         if (list_empty(pagelist))
1497                 return 0;
1498
1499         err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
1500                         MIGRATE_SYNC, MR_SYSCALL);
1501         if (err)
1502                 putback_movable_pages(pagelist);
1503         return err;
1504 }
1505
1506 /*
1507  * Resolves the given address to a struct page, isolates it from the LRU and
1508  * puts it to the given pagelist.
1509  * Returns -errno if the page cannot be found/isolated or 0 when it has been
1510  * queued or the page doesn't need to be migrated because it is already on
1511  * the target node
1512  */
1513 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1514                 int node, struct list_head *pagelist, bool migrate_all)
1515 {
1516         struct vm_area_struct *vma;
1517         struct page *page;
1518         unsigned int follflags;
1519         int err;
1520
1521         down_read(&mm->mmap_sem);
1522         err = -EFAULT;
1523         vma = find_vma(mm, addr);
1524         if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1525                 goto out;
1526
1527         /* FOLL_DUMP to ignore special (like zero) pages */
1528         follflags = FOLL_GET | FOLL_DUMP;
1529         page = follow_page(vma, addr, follflags);
1530
1531         err = PTR_ERR(page);
1532         if (IS_ERR(page))
1533                 goto out;
1534
1535         err = -ENOENT;
1536         if (!page)
1537                 goto out;
1538
1539         err = 0;
1540         if (page_to_nid(page) == node)
1541                 goto out_putpage;
1542
1543         err = -EACCES;
1544         if (page_mapcount(page) > 1 && !migrate_all)
1545                 goto out_putpage;
1546
1547         if (PageHuge(page)) {
1548                 if (PageHead(page)) {
1549                         isolate_huge_page(page, pagelist);
1550                         err = 0;
1551                 }
1552         } else {
1553                 struct page *head;
1554
1555                 head = compound_head(page);
1556                 err = isolate_lru_page(head);
1557                 if (err)
1558                         goto out_putpage;
1559
1560                 err = 0;
1561                 list_add_tail(&head->lru, pagelist);
1562                 mod_node_page_state(page_pgdat(head),
1563                         NR_ISOLATED_ANON + page_is_file_cache(head),
1564                         hpage_nr_pages(head));
1565         }
1566 out_putpage:
1567         /*
1568          * Either remove the duplicate refcount from
1569          * isolate_lru_page() or drop the page ref if it was
1570          * not isolated.
1571          */
1572         put_page(page);
1573 out:
1574         up_read(&mm->mmap_sem);
1575         return err;
1576 }
1577
1578 /*
1579  * Migrate an array of page address onto an array of nodes and fill
1580  * the corresponding array of status.
1581  */
1582 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1583                          unsigned long nr_pages,
1584                          const void __user * __user *pages,
1585                          const int __user *nodes,
1586                          int __user *status, int flags)
1587 {
1588         int current_node = NUMA_NO_NODE;
1589         LIST_HEAD(pagelist);
1590         int start, i;
1591         int err = 0, err1;
1592
1593         migrate_prep();
1594
1595         for (i = start = 0; i < nr_pages; i++) {
1596                 const void __user *p;
1597                 unsigned long addr;
1598                 int node;
1599
1600                 err = -EFAULT;
1601                 if (get_user(p, pages + i))
1602                         goto out_flush;
1603                 if (get_user(node, nodes + i))
1604                         goto out_flush;
1605                 addr = (unsigned long)p;
1606
1607                 err = -ENODEV;
1608                 if (node < 0 || node >= MAX_NUMNODES)
1609                         goto out_flush;
1610                 if (!node_state(node, N_MEMORY))
1611                         goto out_flush;
1612
1613                 err = -EACCES;
1614                 if (!node_isset(node, task_nodes))
1615                         goto out_flush;
1616
1617                 if (current_node == NUMA_NO_NODE) {
1618                         current_node = node;
1619                         start = i;
1620                 } else if (node != current_node) {
1621                         err = do_move_pages_to_node(mm, &pagelist, current_node);
1622                         if (err)
1623                                 goto out;
1624                         err = store_status(status, start, current_node, i - start);
1625                         if (err)
1626                                 goto out;
1627                         start = i;
1628                         current_node = node;
1629                 }
1630
1631                 /*
1632                  * Errors in the page lookup or isolation are not fatal and we simply
1633                  * report them via status
1634                  */
1635                 err = add_page_for_migration(mm, addr, current_node,
1636                                 &pagelist, flags & MPOL_MF_MOVE_ALL);
1637                 if (!err)
1638                         continue;
1639
1640                 err = store_status(status, i, err, 1);
1641                 if (err)
1642                         goto out_flush;
1643
1644                 err = do_move_pages_to_node(mm, &pagelist, current_node);
1645                 if (err)
1646                         goto out;
1647                 if (i > start) {
1648                         err = store_status(status, start, current_node, i - start);
1649                         if (err)
1650                                 goto out;
1651                 }
1652                 current_node = NUMA_NO_NODE;
1653         }
1654 out_flush:
1655         if (list_empty(&pagelist))
1656                 return err;
1657
1658         /* Make sure we do not overwrite the existing error */
1659         err1 = do_move_pages_to_node(mm, &pagelist, current_node);
1660         if (!err1)
1661                 err1 = store_status(status, start, current_node, i - start);
1662         if (!err)
1663                 err = err1;
1664 out:
1665         return err;
1666 }
1667
1668 /*
1669  * Determine the nodes of an array of pages and store it in an array of status.
1670  */
1671 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1672                                 const void __user **pages, int *status)
1673 {
1674         unsigned long i;
1675
1676         down_read(&mm->mmap_sem);
1677
1678         for (i = 0; i < nr_pages; i++) {
1679                 unsigned long addr = (unsigned long)(*pages);
1680                 struct vm_area_struct *vma;
1681                 struct page *page;
1682                 int err = -EFAULT;
1683
1684                 vma = find_vma(mm, addr);
1685                 if (!vma || addr < vma->vm_start)
1686                         goto set_status;
1687
1688                 /* FOLL_DUMP to ignore special (like zero) pages */
1689                 page = follow_page(vma, addr, FOLL_DUMP);
1690
1691                 err = PTR_ERR(page);
1692                 if (IS_ERR(page))
1693                         goto set_status;
1694
1695                 err = page ? page_to_nid(page) : -ENOENT;
1696 set_status:
1697                 *status = err;
1698
1699                 pages++;
1700                 status++;
1701         }
1702
1703         up_read(&mm->mmap_sem);
1704 }
1705
1706 /*
1707  * Determine the nodes of a user array of pages and store it in
1708  * a user array of status.
1709  */
1710 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1711                          const void __user * __user *pages,
1712                          int __user *status)
1713 {
1714 #define DO_PAGES_STAT_CHUNK_NR 16
1715         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1716         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1717
1718         while (nr_pages) {
1719                 unsigned long chunk_nr;
1720
1721                 chunk_nr = nr_pages;
1722                 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1723                         chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1724
1725                 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1726                         break;
1727
1728                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1729
1730                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1731                         break;
1732
1733                 pages += chunk_nr;
1734                 status += chunk_nr;
1735                 nr_pages -= chunk_nr;
1736         }
1737         return nr_pages ? -EFAULT : 0;
1738 }
1739
1740 /*
1741  * Move a list of pages in the address space of the currently executing
1742  * process.
1743  */
1744 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
1745                              const void __user * __user *pages,
1746                              const int __user *nodes,
1747                              int __user *status, int flags)
1748 {
1749         struct task_struct *task;
1750         struct mm_struct *mm;
1751         int err;
1752         nodemask_t task_nodes;
1753
1754         /* Check flags */
1755         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1756                 return -EINVAL;
1757
1758         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1759                 return -EPERM;
1760
1761         /* Find the mm_struct */
1762         rcu_read_lock();
1763         task = pid ? find_task_by_vpid(pid) : current;
1764         if (!task) {
1765                 rcu_read_unlock();
1766                 return -ESRCH;
1767         }
1768         get_task_struct(task);
1769
1770         /*
1771          * Check if this process has the right to modify the specified
1772          * process. Use the regular "ptrace_may_access()" checks.
1773          */
1774         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1775                 rcu_read_unlock();
1776                 err = -EPERM;
1777                 goto out;
1778         }
1779         rcu_read_unlock();
1780
1781         err = security_task_movememory(task);
1782         if (err)
1783                 goto out;
1784
1785         task_nodes = cpuset_mems_allowed(task);
1786         mm = get_task_mm(task);
1787         put_task_struct(task);
1788
1789         if (!mm)
1790                 return -EINVAL;
1791
1792         if (nodes)
1793                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1794                                     nodes, status, flags);
1795         else
1796                 err = do_pages_stat(mm, nr_pages, pages, status);
1797
1798         mmput(mm);
1799         return err;
1800
1801 out:
1802         put_task_struct(task);
1803         return err;
1804 }
1805
1806 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1807                 const void __user * __user *, pages,
1808                 const int __user *, nodes,
1809                 int __user *, status, int, flags)
1810 {
1811         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1812 }
1813
1814 #ifdef CONFIG_COMPAT
1815 COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
1816                        compat_uptr_t __user *, pages32,
1817                        const int __user *, nodes,
1818                        int __user *, status,
1819                        int, flags)
1820 {
1821         const void __user * __user *pages;
1822         int i;
1823
1824         pages = compat_alloc_user_space(nr_pages * sizeof(void *));
1825         for (i = 0; i < nr_pages; i++) {
1826                 compat_uptr_t p;
1827
1828                 if (get_user(p, pages32 + i) ||
1829                         put_user(compat_ptr(p), pages + i))
1830                         return -EFAULT;
1831         }
1832         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
1833 }
1834 #endif /* CONFIG_COMPAT */
1835
1836 #ifdef CONFIG_NUMA_BALANCING
1837 /*
1838  * Returns true if this is a safe migration target node for misplaced NUMA
1839  * pages. Currently it only checks the watermarks which crude
1840  */
1841 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1842                                    unsigned long nr_migrate_pages)
1843 {
1844         int z;
1845
1846         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1847                 struct zone *zone = pgdat->node_zones + z;
1848
1849                 if (!populated_zone(zone))
1850                         continue;
1851
1852                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
1853                 if (!zone_watermark_ok(zone, 0,
1854                                        high_wmark_pages(zone) +
1855                                        nr_migrate_pages,
1856                                        0, 0))
1857                         continue;
1858                 return true;
1859         }
1860         return false;
1861 }
1862
1863 static struct page *alloc_misplaced_dst_page(struct page *page,
1864                                            unsigned long data)
1865 {
1866         int nid = (int) data;
1867         struct page *newpage;
1868
1869         newpage = __alloc_pages_node(nid,
1870                                          (GFP_HIGHUSER_MOVABLE |
1871                                           __GFP_THISNODE | __GFP_NOMEMALLOC |
1872                                           __GFP_NORETRY | __GFP_NOWARN) &
1873                                          ~__GFP_RECLAIM, 0);
1874
1875         return newpage;
1876 }
1877
1878 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1879 {
1880         int page_lru;
1881
1882         VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1883
1884         /* Avoid migrating to a node that is nearly full */
1885         if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1886                 return 0;
1887
1888         if (isolate_lru_page(page))
1889                 return 0;
1890
1891         /*
1892          * migrate_misplaced_transhuge_page() skips page migration's usual
1893          * check on page_count(), so we must do it here, now that the page
1894          * has been isolated: a GUP pin, or any other pin, prevents migration.
1895          * The expected page count is 3: 1 for page's mapcount and 1 for the
1896          * caller's pin and 1 for the reference taken by isolate_lru_page().
1897          */
1898         if (PageTransHuge(page) && page_count(page) != 3) {
1899                 putback_lru_page(page);
1900                 return 0;
1901         }
1902
1903         page_lru = page_is_file_cache(page);
1904         mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1905                                 hpage_nr_pages(page));
1906
1907         /*
1908          * Isolating the page has taken another reference, so the
1909          * caller's reference can be safely dropped without the page
1910          * disappearing underneath us during migration.
1911          */
1912         put_page(page);
1913         return 1;
1914 }
1915
1916 bool pmd_trans_migrating(pmd_t pmd)
1917 {
1918         struct page *page = pmd_page(pmd);
1919         return PageLocked(page);
1920 }
1921
1922 /*
1923  * Attempt to migrate a misplaced page to the specified destination
1924  * node. Caller is expected to have an elevated reference count on
1925  * the page that will be dropped by this function before returning.
1926  */
1927 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1928                            int node)
1929 {
1930         pg_data_t *pgdat = NODE_DATA(node);
1931         int isolated;
1932         int nr_remaining;
1933         LIST_HEAD(migratepages);
1934
1935         /*
1936          * Don't migrate file pages that are mapped in multiple processes
1937          * with execute permissions as they are probably shared libraries.
1938          */
1939         if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1940             (vma->vm_flags & VM_EXEC))
1941                 goto out;
1942
1943         /*
1944          * Also do not migrate dirty pages as not all filesystems can move
1945          * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
1946          */
1947         if (page_is_file_cache(page) && PageDirty(page))
1948                 goto out;
1949
1950         isolated = numamigrate_isolate_page(pgdat, page);
1951         if (!isolated)
1952                 goto out;
1953
1954         list_add(&page->lru, &migratepages);
1955         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1956                                      NULL, node, MIGRATE_ASYNC,
1957                                      MR_NUMA_MISPLACED);
1958         if (nr_remaining) {
1959                 if (!list_empty(&migratepages)) {
1960                         list_del(&page->lru);
1961                         dec_node_page_state(page, NR_ISOLATED_ANON +
1962                                         page_is_file_cache(page));
1963                         putback_lru_page(page);
1964                 }
1965                 isolated = 0;
1966         } else
1967                 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1968         BUG_ON(!list_empty(&migratepages));
1969         return isolated;
1970
1971 out:
1972         put_page(page);
1973         return 0;
1974 }
1975 #endif /* CONFIG_NUMA_BALANCING */
1976
1977 #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1978 /*
1979  * Migrates a THP to a given target node. page must be locked and is unlocked
1980  * before returning.
1981  */
1982 int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1983                                 struct vm_area_struct *vma,
1984                                 pmd_t *pmd, pmd_t entry,
1985                                 unsigned long address,
1986                                 struct page *page, int node)
1987 {
1988         spinlock_t *ptl;
1989         pg_data_t *pgdat = NODE_DATA(node);
1990         int isolated = 0;
1991         struct page *new_page = NULL;
1992         int page_lru = page_is_file_cache(page);
1993         unsigned long start = address & HPAGE_PMD_MASK;
1994
1995         new_page = alloc_pages_node(node,
1996                 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1997                 HPAGE_PMD_ORDER);
1998         if (!new_page)
1999                 goto out_fail;
2000         prep_transhuge_page(new_page);
2001
2002         isolated = numamigrate_isolate_page(pgdat, page);
2003         if (!isolated) {
2004                 put_page(new_page);
2005                 goto out_fail;
2006         }
2007
2008         /* Prepare a page as a migration target */
2009         __SetPageLocked(new_page);
2010         if (PageSwapBacked(page))
2011                 __SetPageSwapBacked(new_page);
2012
2013         /* anon mapping, we can simply copy page->mapping to the new page: */
2014         new_page->mapping = page->mapping;
2015         new_page->index = page->index;
2016         /* flush the cache before copying using the kernel virtual address */
2017         flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
2018         migrate_page_copy(new_page, page);
2019         WARN_ON(PageLRU(new_page));
2020
2021         /* Recheck the target PMD */
2022         ptl = pmd_lock(mm, pmd);
2023         if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
2024                 spin_unlock(ptl);
2025
2026                 /* Reverse changes made by migrate_page_copy() */
2027                 if (TestClearPageActive(new_page))
2028                         SetPageActive(page);
2029                 if (TestClearPageUnevictable(new_page))
2030                         SetPageUnevictable(page);
2031
2032                 unlock_page(new_page);
2033                 put_page(new_page);             /* Free it */
2034
2035                 /* Retake the callers reference and putback on LRU */
2036                 get_page(page);
2037                 putback_lru_page(page);
2038                 mod_node_page_state(page_pgdat(page),
2039                          NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
2040
2041                 goto out_unlock;
2042         }
2043
2044         entry = mk_huge_pmd(new_page, vma->vm_page_prot);
2045         entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
2046
2047         /*
2048          * Overwrite the old entry under pagetable lock and establish
2049          * the new PTE. Any parallel GUP will either observe the old
2050          * page blocking on the page lock, block on the page table
2051          * lock or observe the new page. The SetPageUptodate on the
2052          * new page and page_add_new_anon_rmap guarantee the copy is
2053          * visible before the pagetable update.
2054          */
2055         page_add_anon_rmap(new_page, vma, start, true);
2056         /*
2057          * At this point the pmd is numa/protnone (i.e. non present) and the TLB
2058          * has already been flushed globally.  So no TLB can be currently
2059          * caching this non present pmd mapping.  There's no need to clear the
2060          * pmd before doing set_pmd_at(), nor to flush the TLB after
2061          * set_pmd_at().  Clearing the pmd here would introduce a race
2062          * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
2063          * mmap_sem for reading.  If the pmd is set to NULL at any given time,
2064          * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
2065          * pmd.
2066          */
2067         set_pmd_at(mm, start, pmd, entry);
2068         update_mmu_cache_pmd(vma, address, &entry);
2069
2070         page_ref_unfreeze(page, 2);
2071         mlock_migrate_page(new_page, page);
2072         page_remove_rmap(page, true);
2073         set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2074
2075         spin_unlock(ptl);
2076
2077         /* Take an "isolate" reference and put new page on the LRU. */
2078         get_page(new_page);
2079         putback_lru_page(new_page);
2080
2081         unlock_page(new_page);
2082         unlock_page(page);
2083         put_page(page);                 /* Drop the rmap reference */
2084         put_page(page);                 /* Drop the LRU isolation reference */
2085
2086         count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2087         count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2088
2089         mod_node_page_state(page_pgdat(page),
2090                         NR_ISOLATED_ANON + page_lru,
2091                         -HPAGE_PMD_NR);
2092         return isolated;
2093
2094 out_fail:
2095         count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2096         ptl = pmd_lock(mm, pmd);
2097         if (pmd_same(*pmd, entry)) {
2098                 entry = pmd_modify(entry, vma->vm_page_prot);
2099                 set_pmd_at(mm, start, pmd, entry);
2100                 update_mmu_cache_pmd(vma, address, &entry);
2101         }
2102         spin_unlock(ptl);
2103
2104 out_unlock:
2105         unlock_page(page);
2106         put_page(page);
2107         return 0;
2108 }
2109 #endif /* CONFIG_NUMA_BALANCING */
2110
2111 #endif /* CONFIG_NUMA */
2112
2113 #if defined(CONFIG_MIGRATE_VMA_HELPER)
2114 struct migrate_vma {
2115         struct vm_area_struct   *vma;
2116         unsigned long           *dst;
2117         unsigned long           *src;
2118         unsigned long           cpages;
2119         unsigned long           npages;
2120         unsigned long           start;
2121         unsigned long           end;
2122 };
2123
2124 static int migrate_vma_collect_hole(unsigned long start,
2125                                     unsigned long end,
2126                                     struct mm_walk *walk)
2127 {
2128         struct migrate_vma *migrate = walk->private;
2129         unsigned long addr;
2130
2131         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2132                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2133                 migrate->dst[migrate->npages] = 0;
2134                 migrate->npages++;
2135                 migrate->cpages++;
2136         }
2137
2138         return 0;
2139 }
2140
2141 static int migrate_vma_collect_skip(unsigned long start,
2142                                     unsigned long end,
2143                                     struct mm_walk *walk)
2144 {
2145         struct migrate_vma *migrate = walk->private;
2146         unsigned long addr;
2147
2148         for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
2149                 migrate->dst[migrate->npages] = 0;
2150                 migrate->src[migrate->npages++] = 0;
2151         }
2152
2153         return 0;
2154 }
2155
2156 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2157                                    unsigned long start,
2158                                    unsigned long end,
2159                                    struct mm_walk *walk)
2160 {
2161         struct migrate_vma *migrate = walk->private;
2162         struct vm_area_struct *vma = walk->vma;
2163         struct mm_struct *mm = vma->vm_mm;
2164         unsigned long addr = start, unmapped = 0;
2165         spinlock_t *ptl;
2166         pte_t *ptep;
2167
2168 again:
2169         if (pmd_none(*pmdp))
2170                 return migrate_vma_collect_hole(start, end, walk);
2171
2172         if (pmd_trans_huge(*pmdp)) {
2173                 struct page *page;
2174
2175                 ptl = pmd_lock(mm, pmdp);
2176                 if (unlikely(!pmd_trans_huge(*pmdp))) {
2177                         spin_unlock(ptl);
2178                         goto again;
2179                 }
2180
2181                 page = pmd_page(*pmdp);
2182                 if (is_huge_zero_page(page)) {
2183                         spin_unlock(ptl);
2184                         split_huge_pmd(vma, pmdp, addr);
2185                         if (pmd_trans_unstable(pmdp))
2186                                 return migrate_vma_collect_skip(start, end,
2187                                                                 walk);
2188                 } else {
2189                         int ret;
2190
2191                         get_page(page);
2192                         spin_unlock(ptl);
2193                         if (unlikely(!trylock_page(page)))
2194                                 return migrate_vma_collect_skip(start, end,
2195                                                                 walk);
2196                         ret = split_huge_page(page);
2197                         unlock_page(page);
2198                         put_page(page);
2199                         if (ret)
2200                                 return migrate_vma_collect_skip(start, end,
2201                                                                 walk);
2202                         if (pmd_none(*pmdp))
2203                                 return migrate_vma_collect_hole(start, end,
2204                                                                 walk);
2205                 }
2206         }
2207
2208         if (unlikely(pmd_bad(*pmdp)))
2209                 return migrate_vma_collect_skip(start, end, walk);
2210
2211         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2212         arch_enter_lazy_mmu_mode();
2213
2214         for (; addr < end; addr += PAGE_SIZE, ptep++) {
2215                 unsigned long mpfn, pfn;
2216                 struct page *page;
2217                 swp_entry_t entry;
2218                 pte_t pte;
2219
2220                 pte = *ptep;
2221                 pfn = pte_pfn(pte);
2222
2223                 if (pte_none(pte)) {
2224                         mpfn = MIGRATE_PFN_MIGRATE;
2225                         migrate->cpages++;
2226                         pfn = 0;
2227                         goto next;
2228                 }
2229
2230                 if (!pte_present(pte)) {
2231                         mpfn = pfn = 0;
2232
2233                         /*
2234                          * Only care about unaddressable device page special
2235                          * page table entry. Other special swap entries are not
2236                          * migratable, and we ignore regular swapped page.
2237                          */
2238                         entry = pte_to_swp_entry(pte);
2239                         if (!is_device_private_entry(entry))
2240                                 goto next;
2241
2242                         page = device_private_entry_to_page(entry);
2243                         mpfn = migrate_pfn(page_to_pfn(page))|
2244                                 MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
2245                         if (is_write_device_private_entry(entry))
2246                                 mpfn |= MIGRATE_PFN_WRITE;
2247                 } else {
2248                         if (is_zero_pfn(pfn)) {
2249                                 mpfn = MIGRATE_PFN_MIGRATE;
2250                                 migrate->cpages++;
2251                                 pfn = 0;
2252                                 goto next;
2253                         }
2254                         page = _vm_normal_page(migrate->vma, addr, pte, true);
2255                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2256                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2257                 }
2258
2259                 /* FIXME support THP */
2260                 if (!page || !page->mapping || PageTransCompound(page)) {
2261                         mpfn = pfn = 0;
2262                         goto next;
2263                 }
2264                 pfn = page_to_pfn(page);
2265
2266                 /*
2267                  * By getting a reference on the page we pin it and that blocks
2268                  * any kind of migration. Side effect is that it "freezes" the
2269                  * pte.
2270                  *
2271                  * We drop this reference after isolating the page from the lru
2272                  * for non device page (device page are not on the lru and thus
2273                  * can't be dropped from it).
2274                  */
2275                 get_page(page);
2276                 migrate->cpages++;
2277
2278                 /*
2279                  * Optimize for the common case where page is only mapped once
2280                  * in one process. If we can lock the page, then we can safely
2281                  * set up a special migration page table entry now.
2282                  */
2283                 if (trylock_page(page)) {
2284                         pte_t swp_pte;
2285
2286                         mpfn |= MIGRATE_PFN_LOCKED;
2287                         ptep_get_and_clear(mm, addr, ptep);
2288
2289                         /* Setup special migration page table entry */
2290                         entry = make_migration_entry(page, mpfn &
2291                                                      MIGRATE_PFN_WRITE);
2292                         swp_pte = swp_entry_to_pte(entry);
2293                         if (pte_soft_dirty(pte))
2294                                 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2295                         set_pte_at(mm, addr, ptep, swp_pte);
2296
2297                         /*
2298                          * This is like regular unmap: we remove the rmap and
2299                          * drop page refcount. Page won't be freed, as we took
2300                          * a reference just above.
2301                          */
2302                         page_remove_rmap(page, false);
2303                         put_page(page);
2304
2305                         if (pte_present(pte))
2306                                 unmapped++;
2307                 }
2308
2309 next:
2310                 migrate->dst[migrate->npages] = 0;
2311                 migrate->src[migrate->npages++] = mpfn;
2312         }
2313         arch_leave_lazy_mmu_mode();
2314         pte_unmap_unlock(ptep - 1, ptl);
2315
2316         /* Only flush the TLB if we actually modified any entries */
2317         if (unmapped)
2318                 flush_tlb_range(walk->vma, start, end);
2319
2320         return 0;
2321 }
2322
2323 /*
2324  * migrate_vma_collect() - collect pages over a range of virtual addresses
2325  * @migrate: migrate struct containing all migration information
2326  *
2327  * This will walk the CPU page table. For each virtual address backed by a
2328  * valid page, it updates the src array and takes a reference on the page, in
2329  * order to pin the page until we lock it and unmap it.
2330  */
2331 static void migrate_vma_collect(struct migrate_vma *migrate)
2332 {
2333         struct mmu_notifier_range range;
2334         struct mm_walk mm_walk;
2335
2336         mm_walk.pmd_entry = migrate_vma_collect_pmd;
2337         mm_walk.pte_entry = NULL;
2338         mm_walk.pte_hole = migrate_vma_collect_hole;
2339         mm_walk.hugetlb_entry = NULL;
2340         mm_walk.test_walk = NULL;
2341         mm_walk.vma = migrate->vma;
2342         mm_walk.mm = migrate->vma->vm_mm;
2343         mm_walk.private = migrate;
2344
2345         mmu_notifier_range_init(&range, mm_walk.mm, migrate->start,
2346                                 migrate->end);
2347         mmu_notifier_invalidate_range_start(&range);
2348         walk_page_range(migrate->start, migrate->end, &mm_walk);
2349         mmu_notifier_invalidate_range_end(&range);
2350
2351         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2352 }
2353
2354 /*
2355  * migrate_vma_check_page() - check if page is pinned or not
2356  * @page: struct page to check
2357  *
2358  * Pinned pages cannot be migrated. This is the same test as in
2359  * migrate_page_move_mapping(), except that here we allow migration of a
2360  * ZONE_DEVICE page.
2361  */
2362 static bool migrate_vma_check_page(struct page *page)
2363 {
2364         /*
2365          * One extra ref because caller holds an extra reference, either from
2366          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2367          * a device page.
2368          */
2369         int extra = 1;
2370
2371         /*
2372          * FIXME support THP (transparent huge page), it is bit more complex to
2373          * check them than regular pages, because they can be mapped with a pmd
2374          * or with a pte (split pte mapping).
2375          */
2376         if (PageCompound(page))
2377                 return false;
2378
2379         /* Page from ZONE_DEVICE have one extra reference */
2380         if (is_zone_device_page(page)) {
2381                 /*
2382                  * Private page can never be pin as they have no valid pte and
2383                  * GUP will fail for those. Yet if there is a pending migration
2384                  * a thread might try to wait on the pte migration entry and
2385                  * will bump the page reference count. Sadly there is no way to
2386                  * differentiate a regular pin from migration wait. Hence to
2387                  * avoid 2 racing thread trying to migrate back to CPU to enter
2388                  * infinite loop (one stoping migration because the other is
2389                  * waiting on pte migration entry). We always return true here.
2390                  *
2391                  * FIXME proper solution is to rework migration_entry_wait() so
2392                  * it does not need to take a reference on page.
2393                  */
2394                 if (is_device_private_page(page))
2395                         return true;
2396
2397                 /*
2398                  * Only allow device public page to be migrated and account for
2399                  * the extra reference count imply by ZONE_DEVICE pages.
2400                  */
2401                 if (!is_device_public_page(page))
2402                         return false;
2403                 extra++;
2404         }
2405
2406         /* For file back page */
2407         if (page_mapping(page))
2408                 extra += 1 + page_has_private(page);
2409
2410         if ((page_count(page) - extra) > page_mapcount(page))
2411                 return false;
2412
2413         return true;
2414 }
2415
2416 /*
2417  * migrate_vma_prepare() - lock pages and isolate them from the lru
2418  * @migrate: migrate struct containing all migration information
2419  *
2420  * This locks pages that have been collected by migrate_vma_collect(). Once each
2421  * page is locked it is isolated from the lru (for non-device pages). Finally,
2422  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2423  * migrated by concurrent kernel threads.
2424  */
2425 static void migrate_vma_prepare(struct migrate_vma *migrate)
2426 {
2427         const unsigned long npages = migrate->npages;
2428         const unsigned long start = migrate->start;
2429         unsigned long addr, i, restore = 0;
2430         bool allow_drain = true;
2431
2432         lru_add_drain();
2433
2434         for (i = 0; (i < npages) && migrate->cpages; i++) {
2435                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2436                 bool remap = true;
2437
2438                 if (!page)
2439                         continue;
2440
2441                 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2442                         /*
2443                          * Because we are migrating several pages there can be
2444                          * a deadlock between 2 concurrent migration where each
2445                          * are waiting on each other page lock.
2446                          *
2447                          * Make migrate_vma() a best effort thing and backoff
2448                          * for any page we can not lock right away.
2449                          */
2450                         if (!trylock_page(page)) {
2451                                 migrate->src[i] = 0;
2452                                 migrate->cpages--;
2453                                 put_page(page);
2454                                 continue;
2455                         }
2456                         remap = false;
2457                         migrate->src[i] |= MIGRATE_PFN_LOCKED;
2458                 }
2459
2460                 /* ZONE_DEVICE pages are not on LRU */
2461                 if (!is_zone_device_page(page)) {
2462                         if (!PageLRU(page) && allow_drain) {
2463                                 /* Drain CPU's pagevec */
2464                                 lru_add_drain_all();
2465                                 allow_drain = false;
2466                         }
2467
2468                         if (isolate_lru_page(page)) {
2469                                 if (remap) {
2470                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2471                                         migrate->cpages--;
2472                                         restore++;
2473                                 } else {
2474                                         migrate->src[i] = 0;
2475                                         unlock_page(page);
2476                                         migrate->cpages--;
2477                                         put_page(page);
2478                                 }
2479                                 continue;
2480                         }
2481
2482                         /* Drop the reference we took in collect */
2483                         put_page(page);
2484                 }
2485
2486                 if (!migrate_vma_check_page(page)) {
2487                         if (remap) {
2488                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2489                                 migrate->cpages--;
2490                                 restore++;
2491
2492                                 if (!is_zone_device_page(page)) {
2493                                         get_page(page);
2494                                         putback_lru_page(page);
2495                                 }
2496                         } else {
2497                                 migrate->src[i] = 0;
2498                                 unlock_page(page);
2499                                 migrate->cpages--;
2500
2501                                 if (!is_zone_device_page(page))
2502                                         putback_lru_page(page);
2503                                 else
2504                                         put_page(page);
2505                         }
2506                 }
2507         }
2508
2509         for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2510                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2511
2512                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2513                         continue;
2514
2515                 remove_migration_pte(page, migrate->vma, addr, page);
2516
2517                 migrate->src[i] = 0;
2518                 unlock_page(page);
2519                 put_page(page);
2520                 restore--;
2521         }
2522 }
2523
2524 /*
2525  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2526  * @migrate: migrate struct containing all migration information
2527  *
2528  * Replace page mapping (CPU page table pte) with a special migration pte entry
2529  * and check again if it has been pinned. Pinned pages are restored because we
2530  * cannot migrate them.
2531  *
2532  * This is the last step before we call the device driver callback to allocate
2533  * destination memory and copy contents of original page over to new page.
2534  */
2535 static void migrate_vma_unmap(struct migrate_vma *migrate)
2536 {
2537         int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
2538         const unsigned long npages = migrate->npages;
2539         const unsigned long start = migrate->start;
2540         unsigned long addr, i, restore = 0;
2541
2542         for (i = 0; i < npages; i++) {
2543                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2544
2545                 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2546                         continue;
2547
2548                 if (page_mapped(page)) {
2549                         try_to_unmap(page, flags);
2550                         if (page_mapped(page))
2551                                 goto restore;
2552                 }
2553
2554                 if (migrate_vma_check_page(page))
2555                         continue;
2556
2557 restore:
2558                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2559                 migrate->cpages--;
2560                 restore++;
2561         }
2562
2563         for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2564                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2565
2566                 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2567                         continue;
2568
2569                 remove_migration_ptes(page, page, false);
2570
2571                 migrate->src[i] = 0;
2572                 unlock_page(page);
2573                 restore--;
2574
2575                 if (is_zone_device_page(page))
2576                         put_page(page);
2577                 else
2578                         putback_lru_page(page);
2579         }
2580 }
2581
2582 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2583                                     unsigned long addr,
2584                                     struct page *page,
2585                                     unsigned long *src,
2586                                     unsigned long *dst)
2587 {
2588         struct vm_area_struct *vma = migrate->vma;
2589         struct mm_struct *mm = vma->vm_mm;
2590         struct mem_cgroup *memcg;
2591         bool flush = false;
2592         spinlock_t *ptl;
2593         pte_t entry;
2594         pgd_t *pgdp;
2595         p4d_t *p4dp;
2596         pud_t *pudp;
2597         pmd_t *pmdp;
2598         pte_t *ptep;
2599
2600         /* Only allow populating anonymous memory */
2601         if (!vma_is_anonymous(vma))
2602                 goto abort;
2603
2604         pgdp = pgd_offset(mm, addr);
2605         p4dp = p4d_alloc(mm, pgdp, addr);
2606         if (!p4dp)
2607                 goto abort;
2608         pudp = pud_alloc(mm, p4dp, addr);
2609         if (!pudp)
2610                 goto abort;
2611         pmdp = pmd_alloc(mm, pudp, addr);
2612         if (!pmdp)
2613                 goto abort;
2614
2615         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2616                 goto abort;
2617
2618         /*
2619          * Use pte_alloc() instead of pte_alloc_map().  We can't run
2620          * pte_offset_map() on pmds where a huge pmd might be created
2621          * from a different thread.
2622          *
2623          * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
2624          * parallel threads are excluded by other means.
2625          *
2626          * Here we only have down_read(mmap_sem).
2627          */
2628         if (pte_alloc(mm, pmdp))
2629                 goto abort;
2630
2631         /* See the comment in pte_alloc_one_map() */
2632         if (unlikely(pmd_trans_unstable(pmdp)))
2633                 goto abort;
2634
2635         if (unlikely(anon_vma_prepare(vma)))
2636                 goto abort;
2637         if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
2638                 goto abort;
2639
2640         /*
2641          * The memory barrier inside __SetPageUptodate makes sure that
2642          * preceding stores to the page contents become visible before
2643          * the set_pte_at() write.
2644          */
2645         __SetPageUptodate(page);
2646
2647         if (is_zone_device_page(page)) {
2648                 if (is_device_private_page(page)) {
2649                         swp_entry_t swp_entry;
2650
2651                         swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
2652                         entry = swp_entry_to_pte(swp_entry);
2653                 } else if (is_device_public_page(page)) {
2654                         entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
2655                         if (vma->vm_flags & VM_WRITE)
2656                                 entry = pte_mkwrite(pte_mkdirty(entry));
2657                         entry = pte_mkdevmap(entry);
2658                 }
2659         } else {
2660                 entry = mk_pte(page, vma->vm_page_prot);
2661                 if (vma->vm_flags & VM_WRITE)
2662                         entry = pte_mkwrite(pte_mkdirty(entry));
2663         }
2664
2665         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2666
2667         if (pte_present(*ptep)) {
2668                 unsigned long pfn = pte_pfn(*ptep);
2669
2670                 if (!is_zero_pfn(pfn)) {
2671                         pte_unmap_unlock(ptep, ptl);
2672                         mem_cgroup_cancel_charge(page, memcg, false);
2673                         goto abort;
2674                 }
2675                 flush = true;
2676         } else if (!pte_none(*ptep)) {
2677                 pte_unmap_unlock(ptep, ptl);
2678                 mem_cgroup_cancel_charge(page, memcg, false);
2679                 goto abort;
2680         }
2681
2682         /*
2683          * Check for usefaultfd but do not deliver the fault. Instead,
2684          * just back off.
2685          */
2686         if (userfaultfd_missing(vma)) {
2687                 pte_unmap_unlock(ptep, ptl);
2688                 mem_cgroup_cancel_charge(page, memcg, false);
2689                 goto abort;
2690         }
2691
2692         inc_mm_counter(mm, MM_ANONPAGES);
2693         page_add_new_anon_rmap(page, vma, addr, false);
2694         mem_cgroup_commit_charge(page, memcg, false, false);
2695         if (!is_zone_device_page(page))
2696                 lru_cache_add_active_or_unevictable(page, vma);
2697         get_page(page);
2698
2699         if (flush) {
2700                 flush_cache_page(vma, addr, pte_pfn(*ptep));
2701                 ptep_clear_flush_notify(vma, addr, ptep);
2702                 set_pte_at_notify(mm, addr, ptep, entry);
2703                 update_mmu_cache(vma, addr, ptep);
2704         } else {
2705                 /* No need to invalidate - it was non-present before */
2706                 set_pte_at(mm, addr, ptep, entry);
2707                 update_mmu_cache(vma, addr, ptep);
2708         }
2709
2710         pte_unmap_unlock(ptep, ptl);
2711         *src = MIGRATE_PFN_MIGRATE;
2712         return;
2713
2714 abort:
2715         *src &= ~MIGRATE_PFN_MIGRATE;
2716 }
2717
2718 /*
2719  * migrate_vma_pages() - migrate meta-data from src page to dst page
2720  * @migrate: migrate struct containing all migration information
2721  *
2722  * This migrates struct page meta-data from source struct page to destination
2723  * struct page. This effectively finishes the migration from source page to the
2724  * destination page.
2725  */
2726 static void migrate_vma_pages(struct migrate_vma *migrate)
2727 {
2728         const unsigned long npages = migrate->npages;
2729         const unsigned long start = migrate->start;
2730         struct mmu_notifier_range range;
2731         unsigned long addr, i;
2732         bool notified = false;
2733
2734         for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2735                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2736                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2737                 struct address_space *mapping;
2738                 int r;
2739
2740                 if (!newpage) {
2741                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2742                         continue;
2743                 }
2744
2745                 if (!page) {
2746                         if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
2747                                 continue;
2748                         }
2749                         if (!notified) {
2750                                 notified = true;
2751
2752                                 mmu_notifier_range_init(&range,
2753                                                         migrate->vma->vm_mm,
2754                                                         addr, migrate->end);
2755                                 mmu_notifier_invalidate_range_start(&range);
2756                         }
2757                         migrate_vma_insert_page(migrate, addr, newpage,
2758                                                 &migrate->src[i],
2759                                                 &migrate->dst[i]);
2760                         continue;
2761                 }
2762
2763                 mapping = page_mapping(page);
2764
2765                 if (is_zone_device_page(newpage)) {
2766                         if (is_device_private_page(newpage)) {
2767                                 /*
2768                                  * For now only support private anonymous when
2769                                  * migrating to un-addressable device memory.
2770                                  */
2771                                 if (mapping) {
2772                                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2773                                         continue;
2774                                 }
2775                         } else if (!is_device_public_page(newpage)) {
2776                                 /*
2777                                  * Other types of ZONE_DEVICE page are not
2778                                  * supported.
2779                                  */
2780                                 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2781                                 continue;
2782                         }
2783                 }
2784
2785                 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
2786                 if (r != MIGRATEPAGE_SUCCESS)
2787                         migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2788         }
2789
2790         /*
2791          * No need to double call mmu_notifier->invalidate_range() callback as
2792          * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
2793          * did already call it.
2794          */
2795         if (notified)
2796                 mmu_notifier_invalidate_range_only_end(&range);
2797 }
2798
2799 /*
2800  * migrate_vma_finalize() - restore CPU page table entry
2801  * @migrate: migrate struct containing all migration information
2802  *
2803  * This replaces the special migration pte entry with either a mapping to the
2804  * new page if migration was successful for that page, or to the original page
2805  * otherwise.
2806  *
2807  * This also unlocks the pages and puts them back on the lru, or drops the extra
2808  * refcount, for device pages.
2809  */
2810 static void migrate_vma_finalize(struct migrate_vma *migrate)
2811 {
2812         const unsigned long npages = migrate->npages;
2813         unsigned long i;
2814
2815         for (i = 0; i < npages; i++) {
2816                 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2817                 struct page *page = migrate_pfn_to_page(migrate->src[i]);
2818
2819                 if (!page) {
2820                         if (newpage) {
2821                                 unlock_page(newpage);
2822                                 put_page(newpage);
2823                         }
2824                         continue;
2825                 }
2826
2827                 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
2828                         if (newpage) {
2829                                 unlock_page(newpage);
2830                                 put_page(newpage);
2831                         }
2832                         newpage = page;
2833                 }
2834
2835                 remove_migration_ptes(page, newpage, false);
2836                 unlock_page(page);
2837                 migrate->cpages--;
2838
2839                 if (is_zone_device_page(page))
2840                         put_page(page);
2841                 else
2842                         putback_lru_page(page);
2843
2844                 if (newpage != page) {
2845                         unlock_page(newpage);
2846                         if (is_zone_device_page(newpage))
2847                                 put_page(newpage);
2848                         else
2849                                 putback_lru_page(newpage);
2850                 }
2851         }
2852 }
2853
2854 /*
2855  * migrate_vma() - migrate a range of memory inside vma
2856  *
2857  * @ops: migration callback for allocating destination memory and copying
2858  * @vma: virtual memory area containing the range to be migrated
2859  * @start: start address of the range to migrate (inclusive)
2860  * @end: end address of the range to migrate (exclusive)
2861  * @src: array of hmm_pfn_t containing source pfns
2862  * @dst: array of hmm_pfn_t containing destination pfns
2863  * @private: pointer passed back to each of the callback
2864  * Returns: 0 on success, error code otherwise
2865  *
2866  * This function tries to migrate a range of memory virtual address range, using
2867  * callbacks to allocate and copy memory from source to destination. First it
2868  * collects all the pages backing each virtual address in the range, saving this
2869  * inside the src array. Then it locks those pages and unmaps them. Once the pages
2870  * are locked and unmapped, it checks whether each page is pinned or not. Pages
2871  * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
2872  * in the corresponding src array entry. It then restores any pages that are
2873  * pinned, by remapping and unlocking those pages.
2874  *
2875  * At this point it calls the alloc_and_copy() callback. For documentation on
2876  * what is expected from that callback, see struct migrate_vma_ops comments in
2877  * include/linux/migrate.h
2878  *
2879  * After the alloc_and_copy() callback, this function goes over each entry in
2880  * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2881  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2882  * then the function tries to migrate struct page information from the source
2883  * struct page to the destination struct page. If it fails to migrate the struct
2884  * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2885  * array.
2886  *
2887  * At this point all successfully migrated pages have an entry in the src
2888  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2889  * array entry with MIGRATE_PFN_VALID flag set.
2890  *
2891  * It then calls the finalize_and_map() callback. See comments for "struct
2892  * migrate_vma_ops", in include/linux/migrate.h for details about
2893  * finalize_and_map() behavior.
2894  *
2895  * After the finalize_and_map() callback, for successfully migrated pages, this
2896  * function updates the CPU page table to point to new pages, otherwise it
2897  * restores the CPU page table to point to the original source pages.
2898  *
2899  * Function returns 0 after the above steps, even if no pages were migrated
2900  * (The function only returns an error if any of the arguments are invalid.)
2901  *
2902  * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
2903  * unsigned long entries.
2904  */
2905 int migrate_vma(const struct migrate_vma_ops *ops,
2906                 struct vm_area_struct *vma,
2907                 unsigned long start,
2908                 unsigned long end,
2909                 unsigned long *src,
2910                 unsigned long *dst,
2911                 void *private)
2912 {
2913         struct migrate_vma migrate;
2914
2915         /* Sanity check the arguments */
2916         start &= PAGE_MASK;
2917         end &= PAGE_MASK;
2918         if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
2919                         vma_is_dax(vma))
2920                 return -EINVAL;
2921         if (start < vma->vm_start || start >= vma->vm_end)
2922                 return -EINVAL;
2923         if (end <= vma->vm_start || end > vma->vm_end)
2924                 return -EINVAL;
2925         if (!ops || !src || !dst || start >= end)
2926                 return -EINVAL;
2927
2928         memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
2929         migrate.src = src;
2930         migrate.dst = dst;
2931         migrate.start = start;
2932         migrate.npages = 0;
2933         migrate.cpages = 0;
2934         migrate.end = end;
2935         migrate.vma = vma;
2936
2937         /* Collect, and try to unmap source pages */
2938         migrate_vma_collect(&migrate);
2939         if (!migrate.cpages)
2940                 return 0;
2941
2942         /* Lock and isolate page */
2943         migrate_vma_prepare(&migrate);
2944         if (!migrate.cpages)
2945                 return 0;
2946
2947         /* Unmap pages */
2948         migrate_vma_unmap(&migrate);
2949         if (!migrate.cpages)
2950                 return 0;
2951
2952         /*
2953          * At this point pages are locked and unmapped, and thus they have
2954          * stable content and can safely be copied to destination memory that
2955          * is allocated by the callback.
2956          *
2957          * Note that migration can fail in migrate_vma_struct_page() for each
2958          * individual page.
2959          */
2960         ops->alloc_and_copy(vma, src, dst, start, end, private);
2961
2962         /* This does the real migration of struct page */
2963         migrate_vma_pages(&migrate);
2964
2965         ops->finalize_and_map(vma, src, dst, start, end, private);
2966
2967         /* Unlock and remap pages */
2968         migrate_vma_finalize(&migrate);
2969
2970         return 0;
2971 }
2972 EXPORT_SYMBOL(migrate_vma);
2973 #endif /* defined(MIGRATE_VMA_HELPER) */