Merge tag 'perf-core-for-mingo-4.17-20180413' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / mm / compaction.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/mm/compaction.c
4  *
5  * Memory compaction for the reduction of external fragmentation. Note that
6  * this heavily depends upon page migration to do all the real heavy
7  * lifting
8  *
9  * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
10  */
11 #include <linux/cpu.h>
12 #include <linux/swap.h>
13 #include <linux/migrate.h>
14 #include <linux/compaction.h>
15 #include <linux/mm_inline.h>
16 #include <linux/sched/signal.h>
17 #include <linux/backing-dev.h>
18 #include <linux/sysctl.h>
19 #include <linux/sysfs.h>
20 #include <linux/page-isolation.h>
21 #include <linux/kasan.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/page_owner.h>
25 #include "internal.h"
26
27 #ifdef CONFIG_COMPACTION
28 static inline void count_compact_event(enum vm_event_item item)
29 {
30         count_vm_event(item);
31 }
32
33 static inline void count_compact_events(enum vm_event_item item, long delta)
34 {
35         count_vm_events(item, delta);
36 }
37 #else
38 #define count_compact_event(item) do { } while (0)
39 #define count_compact_events(item, delta) do { } while (0)
40 #endif
41
42 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/compaction.h>
46
47 #define block_start_pfn(pfn, order)     round_down(pfn, 1UL << (order))
48 #define block_end_pfn(pfn, order)       ALIGN((pfn) + 1, 1UL << (order))
49 #define pageblock_start_pfn(pfn)        block_start_pfn(pfn, pageblock_order)
50 #define pageblock_end_pfn(pfn)          block_end_pfn(pfn, pageblock_order)
51
52 static unsigned long release_freepages(struct list_head *freelist)
53 {
54         struct page *page, *next;
55         unsigned long high_pfn = 0;
56
57         list_for_each_entry_safe(page, next, freelist, lru) {
58                 unsigned long pfn = page_to_pfn(page);
59                 list_del(&page->lru);
60                 __free_page(page);
61                 if (pfn > high_pfn)
62                         high_pfn = pfn;
63         }
64
65         return high_pfn;
66 }
67
68 static void map_pages(struct list_head *list)
69 {
70         unsigned int i, order, nr_pages;
71         struct page *page, *next;
72         LIST_HEAD(tmp_list);
73
74         list_for_each_entry_safe(page, next, list, lru) {
75                 list_del(&page->lru);
76
77                 order = page_private(page);
78                 nr_pages = 1 << order;
79
80                 post_alloc_hook(page, order, __GFP_MOVABLE);
81                 if (order)
82                         split_page(page, order);
83
84                 for (i = 0; i < nr_pages; i++) {
85                         list_add(&page->lru, &tmp_list);
86                         page++;
87                 }
88         }
89
90         list_splice(&tmp_list, list);
91 }
92
93 #ifdef CONFIG_COMPACTION
94
95 int PageMovable(struct page *page)
96 {
97         struct address_space *mapping;
98
99         VM_BUG_ON_PAGE(!PageLocked(page), page);
100         if (!__PageMovable(page))
101                 return 0;
102
103         mapping = page_mapping(page);
104         if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
105                 return 1;
106
107         return 0;
108 }
109 EXPORT_SYMBOL(PageMovable);
110
111 void __SetPageMovable(struct page *page, struct address_space *mapping)
112 {
113         VM_BUG_ON_PAGE(!PageLocked(page), page);
114         VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
115         page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
116 }
117 EXPORT_SYMBOL(__SetPageMovable);
118
119 void __ClearPageMovable(struct page *page)
120 {
121         VM_BUG_ON_PAGE(!PageLocked(page), page);
122         VM_BUG_ON_PAGE(!PageMovable(page), page);
123         /*
124          * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
125          * flag so that VM can catch up released page by driver after isolation.
126          * With it, VM migration doesn't try to put it back.
127          */
128         page->mapping = (void *)((unsigned long)page->mapping &
129                                 PAGE_MAPPING_MOVABLE);
130 }
131 EXPORT_SYMBOL(__ClearPageMovable);
132
133 /* Do not skip compaction more than 64 times */
134 #define COMPACT_MAX_DEFER_SHIFT 6
135
136 /*
137  * Compaction is deferred when compaction fails to result in a page
138  * allocation success. 1 << compact_defer_limit compactions are skipped up
139  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
140  */
141 void defer_compaction(struct zone *zone, int order)
142 {
143         zone->compact_considered = 0;
144         zone->compact_defer_shift++;
145
146         if (order < zone->compact_order_failed)
147                 zone->compact_order_failed = order;
148
149         if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
150                 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
151
152         trace_mm_compaction_defer_compaction(zone, order);
153 }
154
155 /* Returns true if compaction should be skipped this time */
156 bool compaction_deferred(struct zone *zone, int order)
157 {
158         unsigned long defer_limit = 1UL << zone->compact_defer_shift;
159
160         if (order < zone->compact_order_failed)
161                 return false;
162
163         /* Avoid possible overflow */
164         if (++zone->compact_considered > defer_limit)
165                 zone->compact_considered = defer_limit;
166
167         if (zone->compact_considered >= defer_limit)
168                 return false;
169
170         trace_mm_compaction_deferred(zone, order);
171
172         return true;
173 }
174
175 /*
176  * Update defer tracking counters after successful compaction of given order,
177  * which means an allocation either succeeded (alloc_success == true) or is
178  * expected to succeed.
179  */
180 void compaction_defer_reset(struct zone *zone, int order,
181                 bool alloc_success)
182 {
183         if (alloc_success) {
184                 zone->compact_considered = 0;
185                 zone->compact_defer_shift = 0;
186         }
187         if (order >= zone->compact_order_failed)
188                 zone->compact_order_failed = order + 1;
189
190         trace_mm_compaction_defer_reset(zone, order);
191 }
192
193 /* Returns true if restarting compaction after many failures */
194 bool compaction_restarting(struct zone *zone, int order)
195 {
196         if (order < zone->compact_order_failed)
197                 return false;
198
199         return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
200                 zone->compact_considered >= 1UL << zone->compact_defer_shift;
201 }
202
203 /* Returns true if the pageblock should be scanned for pages to isolate. */
204 static inline bool isolation_suitable(struct compact_control *cc,
205                                         struct page *page)
206 {
207         if (cc->ignore_skip_hint)
208                 return true;
209
210         return !get_pageblock_skip(page);
211 }
212
213 static void reset_cached_positions(struct zone *zone)
214 {
215         zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
216         zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
217         zone->compact_cached_free_pfn =
218                                 pageblock_start_pfn(zone_end_pfn(zone) - 1);
219 }
220
221 /*
222  * Compound pages of >= pageblock_order should consistenly be skipped until
223  * released. It is always pointless to compact pages of such order (if they are
224  * migratable), and the pageblocks they occupy cannot contain any free pages.
225  */
226 static bool pageblock_skip_persistent(struct page *page)
227 {
228         if (!PageCompound(page))
229                 return false;
230
231         page = compound_head(page);
232
233         if (compound_order(page) >= pageblock_order)
234                 return true;
235
236         return false;
237 }
238
239 /*
240  * This function is called to clear all cached information on pageblocks that
241  * should be skipped for page isolation when the migrate and free page scanner
242  * meet.
243  */
244 static void __reset_isolation_suitable(struct zone *zone)
245 {
246         unsigned long start_pfn = zone->zone_start_pfn;
247         unsigned long end_pfn = zone_end_pfn(zone);
248         unsigned long pfn;
249
250         zone->compact_blockskip_flush = false;
251
252         /* Walk the zone and mark every pageblock as suitable for isolation */
253         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
254                 struct page *page;
255
256                 cond_resched();
257
258                 page = pfn_to_online_page(pfn);
259                 if (!page)
260                         continue;
261                 if (zone != page_zone(page))
262                         continue;
263                 if (pageblock_skip_persistent(page))
264                         continue;
265
266                 clear_pageblock_skip(page);
267         }
268
269         reset_cached_positions(zone);
270 }
271
272 void reset_isolation_suitable(pg_data_t *pgdat)
273 {
274         int zoneid;
275
276         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
277                 struct zone *zone = &pgdat->node_zones[zoneid];
278                 if (!populated_zone(zone))
279                         continue;
280
281                 /* Only flush if a full compaction finished recently */
282                 if (zone->compact_blockskip_flush)
283                         __reset_isolation_suitable(zone);
284         }
285 }
286
287 /*
288  * If no pages were isolated then mark this pageblock to be skipped in the
289  * future. The information is later cleared by __reset_isolation_suitable().
290  */
291 static void update_pageblock_skip(struct compact_control *cc,
292                         struct page *page, unsigned long nr_isolated,
293                         bool migrate_scanner)
294 {
295         struct zone *zone = cc->zone;
296         unsigned long pfn;
297
298         if (cc->no_set_skip_hint)
299                 return;
300
301         if (!page)
302                 return;
303
304         if (nr_isolated)
305                 return;
306
307         set_pageblock_skip(page);
308
309         pfn = page_to_pfn(page);
310
311         /* Update where async and sync compaction should restart */
312         if (migrate_scanner) {
313                 if (pfn > zone->compact_cached_migrate_pfn[0])
314                         zone->compact_cached_migrate_pfn[0] = pfn;
315                 if (cc->mode != MIGRATE_ASYNC &&
316                     pfn > zone->compact_cached_migrate_pfn[1])
317                         zone->compact_cached_migrate_pfn[1] = pfn;
318         } else {
319                 if (pfn < zone->compact_cached_free_pfn)
320                         zone->compact_cached_free_pfn = pfn;
321         }
322 }
323 #else
324 static inline bool isolation_suitable(struct compact_control *cc,
325                                         struct page *page)
326 {
327         return true;
328 }
329
330 static inline bool pageblock_skip_persistent(struct page *page)
331 {
332         return false;
333 }
334
335 static inline void update_pageblock_skip(struct compact_control *cc,
336                         struct page *page, unsigned long nr_isolated,
337                         bool migrate_scanner)
338 {
339 }
340 #endif /* CONFIG_COMPACTION */
341
342 /*
343  * Compaction requires the taking of some coarse locks that are potentially
344  * very heavily contended. For async compaction, back out if the lock cannot
345  * be taken immediately. For sync compaction, spin on the lock if needed.
346  *
347  * Returns true if the lock is held
348  * Returns false if the lock is not held and compaction should abort
349  */
350 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
351                                                 struct compact_control *cc)
352 {
353         if (cc->mode == MIGRATE_ASYNC) {
354                 if (!spin_trylock_irqsave(lock, *flags)) {
355                         cc->contended = true;
356                         return false;
357                 }
358         } else {
359                 spin_lock_irqsave(lock, *flags);
360         }
361
362         return true;
363 }
364
365 /*
366  * Compaction requires the taking of some coarse locks that are potentially
367  * very heavily contended. The lock should be periodically unlocked to avoid
368  * having disabled IRQs for a long time, even when there is nobody waiting on
369  * the lock. It might also be that allowing the IRQs will result in
370  * need_resched() becoming true. If scheduling is needed, async compaction
371  * aborts. Sync compaction schedules.
372  * Either compaction type will also abort if a fatal signal is pending.
373  * In either case if the lock was locked, it is dropped and not regained.
374  *
375  * Returns true if compaction should abort due to fatal signal pending, or
376  *              async compaction due to need_resched()
377  * Returns false when compaction can continue (sync compaction might have
378  *              scheduled)
379  */
380 static bool compact_unlock_should_abort(spinlock_t *lock,
381                 unsigned long flags, bool *locked, struct compact_control *cc)
382 {
383         if (*locked) {
384                 spin_unlock_irqrestore(lock, flags);
385                 *locked = false;
386         }
387
388         if (fatal_signal_pending(current)) {
389                 cc->contended = true;
390                 return true;
391         }
392
393         if (need_resched()) {
394                 if (cc->mode == MIGRATE_ASYNC) {
395                         cc->contended = true;
396                         return true;
397                 }
398                 cond_resched();
399         }
400
401         return false;
402 }
403
404 /*
405  * Aside from avoiding lock contention, compaction also periodically checks
406  * need_resched() and either schedules in sync compaction or aborts async
407  * compaction. This is similar to what compact_unlock_should_abort() does, but
408  * is used where no lock is concerned.
409  *
410  * Returns false when no scheduling was needed, or sync compaction scheduled.
411  * Returns true when async compaction should abort.
412  */
413 static inline bool compact_should_abort(struct compact_control *cc)
414 {
415         /* async compaction aborts if contended */
416         if (need_resched()) {
417                 if (cc->mode == MIGRATE_ASYNC) {
418                         cc->contended = true;
419                         return true;
420                 }
421
422                 cond_resched();
423         }
424
425         return false;
426 }
427
428 /*
429  * Isolate free pages onto a private freelist. If @strict is true, will abort
430  * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
431  * (even though it may still end up isolating some pages).
432  */
433 static unsigned long isolate_freepages_block(struct compact_control *cc,
434                                 unsigned long *start_pfn,
435                                 unsigned long end_pfn,
436                                 struct list_head *freelist,
437                                 bool strict)
438 {
439         int nr_scanned = 0, total_isolated = 0;
440         struct page *cursor, *valid_page = NULL;
441         unsigned long flags = 0;
442         bool locked = false;
443         unsigned long blockpfn = *start_pfn;
444         unsigned int order;
445
446         cursor = pfn_to_page(blockpfn);
447
448         /* Isolate free pages. */
449         for (; blockpfn < end_pfn; blockpfn++, cursor++) {
450                 int isolated;
451                 struct page *page = cursor;
452
453                 /*
454                  * Periodically drop the lock (if held) regardless of its
455                  * contention, to give chance to IRQs. Abort if fatal signal
456                  * pending or async compaction detects need_resched()
457                  */
458                 if (!(blockpfn % SWAP_CLUSTER_MAX)
459                     && compact_unlock_should_abort(&cc->zone->lock, flags,
460                                                                 &locked, cc))
461                         break;
462
463                 nr_scanned++;
464                 if (!pfn_valid_within(blockpfn))
465                         goto isolate_fail;
466
467                 if (!valid_page)
468                         valid_page = page;
469
470                 /*
471                  * For compound pages such as THP and hugetlbfs, we can save
472                  * potentially a lot of iterations if we skip them at once.
473                  * The check is racy, but we can consider only valid values
474                  * and the only danger is skipping too much.
475                  */
476                 if (PageCompound(page)) {
477                         const unsigned int order = compound_order(page);
478
479                         if (likely(order < MAX_ORDER)) {
480                                 blockpfn += (1UL << order) - 1;
481                                 cursor += (1UL << order) - 1;
482                         }
483                         goto isolate_fail;
484                 }
485
486                 if (!PageBuddy(page))
487                         goto isolate_fail;
488
489                 /*
490                  * If we already hold the lock, we can skip some rechecking.
491                  * Note that if we hold the lock now, checked_pageblock was
492                  * already set in some previous iteration (or strict is true),
493                  * so it is correct to skip the suitable migration target
494                  * recheck as well.
495                  */
496                 if (!locked) {
497                         /*
498                          * The zone lock must be held to isolate freepages.
499                          * Unfortunately this is a very coarse lock and can be
500                          * heavily contended if there are parallel allocations
501                          * or parallel compactions. For async compaction do not
502                          * spin on the lock and we acquire the lock as late as
503                          * possible.
504                          */
505                         locked = compact_trylock_irqsave(&cc->zone->lock,
506                                                                 &flags, cc);
507                         if (!locked)
508                                 break;
509
510                         /* Recheck this is a buddy page under lock */
511                         if (!PageBuddy(page))
512                                 goto isolate_fail;
513                 }
514
515                 /* Found a free page, will break it into order-0 pages */
516                 order = page_order(page);
517                 isolated = __isolate_free_page(page, order);
518                 if (!isolated)
519                         break;
520                 set_page_private(page, order);
521
522                 total_isolated += isolated;
523                 cc->nr_freepages += isolated;
524                 list_add_tail(&page->lru, freelist);
525
526                 if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
527                         blockpfn += isolated;
528                         break;
529                 }
530                 /* Advance to the end of split page */
531                 blockpfn += isolated - 1;
532                 cursor += isolated - 1;
533                 continue;
534
535 isolate_fail:
536                 if (strict)
537                         break;
538                 else
539                         continue;
540
541         }
542
543         if (locked)
544                 spin_unlock_irqrestore(&cc->zone->lock, flags);
545
546         /*
547          * There is a tiny chance that we have read bogus compound_order(),
548          * so be careful to not go outside of the pageblock.
549          */
550         if (unlikely(blockpfn > end_pfn))
551                 blockpfn = end_pfn;
552
553         trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
554                                         nr_scanned, total_isolated);
555
556         /* Record how far we have got within the block */
557         *start_pfn = blockpfn;
558
559         /*
560          * If strict isolation is requested by CMA then check that all the
561          * pages requested were isolated. If there were any failures, 0 is
562          * returned and CMA will fail.
563          */
564         if (strict && blockpfn < end_pfn)
565                 total_isolated = 0;
566
567         /* Update the pageblock-skip if the whole pageblock was scanned */
568         if (blockpfn == end_pfn)
569                 update_pageblock_skip(cc, valid_page, total_isolated, false);
570
571         cc->total_free_scanned += nr_scanned;
572         if (total_isolated)
573                 count_compact_events(COMPACTISOLATED, total_isolated);
574         return total_isolated;
575 }
576
577 /**
578  * isolate_freepages_range() - isolate free pages.
579  * @cc:        Compaction control structure.
580  * @start_pfn: The first PFN to start isolating.
581  * @end_pfn:   The one-past-last PFN.
582  *
583  * Non-free pages, invalid PFNs, or zone boundaries within the
584  * [start_pfn, end_pfn) range are considered errors, cause function to
585  * undo its actions and return zero.
586  *
587  * Otherwise, function returns one-past-the-last PFN of isolated page
588  * (which may be greater then end_pfn if end fell in a middle of
589  * a free page).
590  */
591 unsigned long
592 isolate_freepages_range(struct compact_control *cc,
593                         unsigned long start_pfn, unsigned long end_pfn)
594 {
595         unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
596         LIST_HEAD(freelist);
597
598         pfn = start_pfn;
599         block_start_pfn = pageblock_start_pfn(pfn);
600         if (block_start_pfn < cc->zone->zone_start_pfn)
601                 block_start_pfn = cc->zone->zone_start_pfn;
602         block_end_pfn = pageblock_end_pfn(pfn);
603
604         for (; pfn < end_pfn; pfn += isolated,
605                                 block_start_pfn = block_end_pfn,
606                                 block_end_pfn += pageblock_nr_pages) {
607                 /* Protect pfn from changing by isolate_freepages_block */
608                 unsigned long isolate_start_pfn = pfn;
609
610                 block_end_pfn = min(block_end_pfn, end_pfn);
611
612                 /*
613                  * pfn could pass the block_end_pfn if isolated freepage
614                  * is more than pageblock order. In this case, we adjust
615                  * scanning range to right one.
616                  */
617                 if (pfn >= block_end_pfn) {
618                         block_start_pfn = pageblock_start_pfn(pfn);
619                         block_end_pfn = pageblock_end_pfn(pfn);
620                         block_end_pfn = min(block_end_pfn, end_pfn);
621                 }
622
623                 if (!pageblock_pfn_to_page(block_start_pfn,
624                                         block_end_pfn, cc->zone))
625                         break;
626
627                 isolated = isolate_freepages_block(cc, &isolate_start_pfn,
628                                                 block_end_pfn, &freelist, true);
629
630                 /*
631                  * In strict mode, isolate_freepages_block() returns 0 if
632                  * there are any holes in the block (ie. invalid PFNs or
633                  * non-free pages).
634                  */
635                 if (!isolated)
636                         break;
637
638                 /*
639                  * If we managed to isolate pages, it is always (1 << n) *
640                  * pageblock_nr_pages for some non-negative n.  (Max order
641                  * page may span two pageblocks).
642                  */
643         }
644
645         /* __isolate_free_page() does not map the pages */
646         map_pages(&freelist);
647
648         if (pfn < end_pfn) {
649                 /* Loop terminated early, cleanup. */
650                 release_freepages(&freelist);
651                 return 0;
652         }
653
654         /* We don't use freelists for anything. */
655         return pfn;
656 }
657
658 /* Similar to reclaim, but different enough that they don't share logic */
659 static bool too_many_isolated(struct zone *zone)
660 {
661         unsigned long active, inactive, isolated;
662
663         inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
664                         node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
665         active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
666                         node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
667         isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
668                         node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
669
670         return isolated > (inactive + active) / 2;
671 }
672
673 /**
674  * isolate_migratepages_block() - isolate all migrate-able pages within
675  *                                a single pageblock
676  * @cc:         Compaction control structure.
677  * @low_pfn:    The first PFN to isolate
678  * @end_pfn:    The one-past-the-last PFN to isolate, within same pageblock
679  * @isolate_mode: Isolation mode to be used.
680  *
681  * Isolate all pages that can be migrated from the range specified by
682  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
683  * Returns zero if there is a fatal signal pending, otherwise PFN of the
684  * first page that was not scanned (which may be both less, equal to or more
685  * than end_pfn).
686  *
687  * The pages are isolated on cc->migratepages list (not required to be empty),
688  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
689  * is neither read nor updated.
690  */
691 static unsigned long
692 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
693                         unsigned long end_pfn, isolate_mode_t isolate_mode)
694 {
695         struct zone *zone = cc->zone;
696         unsigned long nr_scanned = 0, nr_isolated = 0;
697         struct lruvec *lruvec;
698         unsigned long flags = 0;
699         bool locked = false;
700         struct page *page = NULL, *valid_page = NULL;
701         unsigned long start_pfn = low_pfn;
702         bool skip_on_failure = false;
703         unsigned long next_skip_pfn = 0;
704
705         /*
706          * Ensure that there are not too many pages isolated from the LRU
707          * list by either parallel reclaimers or compaction. If there are,
708          * delay for some time until fewer pages are isolated
709          */
710         while (unlikely(too_many_isolated(zone))) {
711                 /* async migration should just abort */
712                 if (cc->mode == MIGRATE_ASYNC)
713                         return 0;
714
715                 congestion_wait(BLK_RW_ASYNC, HZ/10);
716
717                 if (fatal_signal_pending(current))
718                         return 0;
719         }
720
721         if (compact_should_abort(cc))
722                 return 0;
723
724         if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
725                 skip_on_failure = true;
726                 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
727         }
728
729         /* Time to isolate some pages for migration */
730         for (; low_pfn < end_pfn; low_pfn++) {
731
732                 if (skip_on_failure && low_pfn >= next_skip_pfn) {
733                         /*
734                          * We have isolated all migration candidates in the
735                          * previous order-aligned block, and did not skip it due
736                          * to failure. We should migrate the pages now and
737                          * hopefully succeed compaction.
738                          */
739                         if (nr_isolated)
740                                 break;
741
742                         /*
743                          * We failed to isolate in the previous order-aligned
744                          * block. Set the new boundary to the end of the
745                          * current block. Note we can't simply increase
746                          * next_skip_pfn by 1 << order, as low_pfn might have
747                          * been incremented by a higher number due to skipping
748                          * a compound or a high-order buddy page in the
749                          * previous loop iteration.
750                          */
751                         next_skip_pfn = block_end_pfn(low_pfn, cc->order);
752                 }
753
754                 /*
755                  * Periodically drop the lock (if held) regardless of its
756                  * contention, to give chance to IRQs. Abort async compaction
757                  * if contended.
758                  */
759                 if (!(low_pfn % SWAP_CLUSTER_MAX)
760                     && compact_unlock_should_abort(zone_lru_lock(zone), flags,
761                                                                 &locked, cc))
762                         break;
763
764                 if (!pfn_valid_within(low_pfn))
765                         goto isolate_fail;
766                 nr_scanned++;
767
768                 page = pfn_to_page(low_pfn);
769
770                 if (!valid_page)
771                         valid_page = page;
772
773                 /*
774                  * Skip if free. We read page order here without zone lock
775                  * which is generally unsafe, but the race window is small and
776                  * the worst thing that can happen is that we skip some
777                  * potential isolation targets.
778                  */
779                 if (PageBuddy(page)) {
780                         unsigned long freepage_order = page_order_unsafe(page);
781
782                         /*
783                          * Without lock, we cannot be sure that what we got is
784                          * a valid page order. Consider only values in the
785                          * valid order range to prevent low_pfn overflow.
786                          */
787                         if (freepage_order > 0 && freepage_order < MAX_ORDER)
788                                 low_pfn += (1UL << freepage_order) - 1;
789                         continue;
790                 }
791
792                 /*
793                  * Regardless of being on LRU, compound pages such as THP and
794                  * hugetlbfs are not to be compacted. We can potentially save
795                  * a lot of iterations if we skip them at once. The check is
796                  * racy, but we can consider only valid values and the only
797                  * danger is skipping too much.
798                  */
799                 if (PageCompound(page)) {
800                         const unsigned int order = compound_order(page);
801
802                         if (likely(order < MAX_ORDER))
803                                 low_pfn += (1UL << order) - 1;
804                         goto isolate_fail;
805                 }
806
807                 /*
808                  * Check may be lockless but that's ok as we recheck later.
809                  * It's possible to migrate LRU and non-lru movable pages.
810                  * Skip any other type of page
811                  */
812                 if (!PageLRU(page)) {
813                         /*
814                          * __PageMovable can return false positive so we need
815                          * to verify it under page_lock.
816                          */
817                         if (unlikely(__PageMovable(page)) &&
818                                         !PageIsolated(page)) {
819                                 if (locked) {
820                                         spin_unlock_irqrestore(zone_lru_lock(zone),
821                                                                         flags);
822                                         locked = false;
823                                 }
824
825                                 if (!isolate_movable_page(page, isolate_mode))
826                                         goto isolate_success;
827                         }
828
829                         goto isolate_fail;
830                 }
831
832                 /*
833                  * Migration will fail if an anonymous page is pinned in memory,
834                  * so avoid taking lru_lock and isolating it unnecessarily in an
835                  * admittedly racy check.
836                  */
837                 if (!page_mapping(page) &&
838                     page_count(page) > page_mapcount(page))
839                         goto isolate_fail;
840
841                 /*
842                  * Only allow to migrate anonymous pages in GFP_NOFS context
843                  * because those do not depend on fs locks.
844                  */
845                 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
846                         goto isolate_fail;
847
848                 /* If we already hold the lock, we can skip some rechecking */
849                 if (!locked) {
850                         locked = compact_trylock_irqsave(zone_lru_lock(zone),
851                                                                 &flags, cc);
852                         if (!locked)
853                                 break;
854
855                         /* Recheck PageLRU and PageCompound under lock */
856                         if (!PageLRU(page))
857                                 goto isolate_fail;
858
859                         /*
860                          * Page become compound since the non-locked check,
861                          * and it's on LRU. It can only be a THP so the order
862                          * is safe to read and it's 0 for tail pages.
863                          */
864                         if (unlikely(PageCompound(page))) {
865                                 low_pfn += (1UL << compound_order(page)) - 1;
866                                 goto isolate_fail;
867                         }
868                 }
869
870                 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
871
872                 /* Try isolate the page */
873                 if (__isolate_lru_page(page, isolate_mode) != 0)
874                         goto isolate_fail;
875
876                 VM_BUG_ON_PAGE(PageCompound(page), page);
877
878                 /* Successfully isolated */
879                 del_page_from_lru_list(page, lruvec, page_lru(page));
880                 inc_node_page_state(page,
881                                 NR_ISOLATED_ANON + page_is_file_cache(page));
882
883 isolate_success:
884                 list_add(&page->lru, &cc->migratepages);
885                 cc->nr_migratepages++;
886                 nr_isolated++;
887
888                 /*
889                  * Record where we could have freed pages by migration and not
890                  * yet flushed them to buddy allocator.
891                  * - this is the lowest page that was isolated and likely be
892                  * then freed by migration.
893                  */
894                 if (!cc->last_migrated_pfn)
895                         cc->last_migrated_pfn = low_pfn;
896
897                 /* Avoid isolating too much */
898                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
899                         ++low_pfn;
900                         break;
901                 }
902
903                 continue;
904 isolate_fail:
905                 if (!skip_on_failure)
906                         continue;
907
908                 /*
909                  * We have isolated some pages, but then failed. Release them
910                  * instead of migrating, as we cannot form the cc->order buddy
911                  * page anyway.
912                  */
913                 if (nr_isolated) {
914                         if (locked) {
915                                 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
916                                 locked = false;
917                         }
918                         putback_movable_pages(&cc->migratepages);
919                         cc->nr_migratepages = 0;
920                         cc->last_migrated_pfn = 0;
921                         nr_isolated = 0;
922                 }
923
924                 if (low_pfn < next_skip_pfn) {
925                         low_pfn = next_skip_pfn - 1;
926                         /*
927                          * The check near the loop beginning would have updated
928                          * next_skip_pfn too, but this is a bit simpler.
929                          */
930                         next_skip_pfn += 1UL << cc->order;
931                 }
932         }
933
934         /*
935          * The PageBuddy() check could have potentially brought us outside
936          * the range to be scanned.
937          */
938         if (unlikely(low_pfn > end_pfn))
939                 low_pfn = end_pfn;
940
941         if (locked)
942                 spin_unlock_irqrestore(zone_lru_lock(zone), flags);
943
944         /*
945          * Update the pageblock-skip information and cached scanner pfn,
946          * if the whole pageblock was scanned without isolating any page.
947          */
948         if (low_pfn == end_pfn)
949                 update_pageblock_skip(cc, valid_page, nr_isolated, true);
950
951         trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
952                                                 nr_scanned, nr_isolated);
953
954         cc->total_migrate_scanned += nr_scanned;
955         if (nr_isolated)
956                 count_compact_events(COMPACTISOLATED, nr_isolated);
957
958         return low_pfn;
959 }
960
961 /**
962  * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
963  * @cc:        Compaction control structure.
964  * @start_pfn: The first PFN to start isolating.
965  * @end_pfn:   The one-past-last PFN.
966  *
967  * Returns zero if isolation fails fatally due to e.g. pending signal.
968  * Otherwise, function returns one-past-the-last PFN of isolated page
969  * (which may be greater than end_pfn if end fell in a middle of a THP page).
970  */
971 unsigned long
972 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
973                                                         unsigned long end_pfn)
974 {
975         unsigned long pfn, block_start_pfn, block_end_pfn;
976
977         /* Scan block by block. First and last block may be incomplete */
978         pfn = start_pfn;
979         block_start_pfn = pageblock_start_pfn(pfn);
980         if (block_start_pfn < cc->zone->zone_start_pfn)
981                 block_start_pfn = cc->zone->zone_start_pfn;
982         block_end_pfn = pageblock_end_pfn(pfn);
983
984         for (; pfn < end_pfn; pfn = block_end_pfn,
985                                 block_start_pfn = block_end_pfn,
986                                 block_end_pfn += pageblock_nr_pages) {
987
988                 block_end_pfn = min(block_end_pfn, end_pfn);
989
990                 if (!pageblock_pfn_to_page(block_start_pfn,
991                                         block_end_pfn, cc->zone))
992                         continue;
993
994                 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
995                                                         ISOLATE_UNEVICTABLE);
996
997                 if (!pfn)
998                         break;
999
1000                 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
1001                         break;
1002         }
1003
1004         return pfn;
1005 }
1006
1007 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
1008 #ifdef CONFIG_COMPACTION
1009
1010 static bool suitable_migration_source(struct compact_control *cc,
1011                                                         struct page *page)
1012 {
1013         int block_mt;
1014
1015         if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1016                 return true;
1017
1018         block_mt = get_pageblock_migratetype(page);
1019
1020         if (cc->migratetype == MIGRATE_MOVABLE)
1021                 return is_migrate_movable(block_mt);
1022         else
1023                 return block_mt == cc->migratetype;
1024 }
1025
1026 /* Returns true if the page is within a block suitable for migration to */
1027 static bool suitable_migration_target(struct compact_control *cc,
1028                                                         struct page *page)
1029 {
1030         /* If the page is a large free page, then disallow migration */
1031         if (PageBuddy(page)) {
1032                 /*
1033                  * We are checking page_order without zone->lock taken. But
1034                  * the only small danger is that we skip a potentially suitable
1035                  * pageblock, so it's not worth to check order for valid range.
1036                  */
1037                 if (page_order_unsafe(page) >= pageblock_order)
1038                         return false;
1039         }
1040
1041         if (cc->ignore_block_suitable)
1042                 return true;
1043
1044         /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1045         if (is_migrate_movable(get_pageblock_migratetype(page)))
1046                 return true;
1047
1048         /* Otherwise skip the block */
1049         return false;
1050 }
1051
1052 /*
1053  * Test whether the free scanner has reached the same or lower pageblock than
1054  * the migration scanner, and compaction should thus terminate.
1055  */
1056 static inline bool compact_scanners_met(struct compact_control *cc)
1057 {
1058         return (cc->free_pfn >> pageblock_order)
1059                 <= (cc->migrate_pfn >> pageblock_order);
1060 }
1061
1062 /*
1063  * Based on information in the current compact_control, find blocks
1064  * suitable for isolating free pages from and then isolate them.
1065  */
1066 static void isolate_freepages(struct compact_control *cc)
1067 {
1068         struct zone *zone = cc->zone;
1069         struct page *page;
1070         unsigned long block_start_pfn;  /* start of current pageblock */
1071         unsigned long isolate_start_pfn; /* exact pfn we start at */
1072         unsigned long block_end_pfn;    /* end of current pageblock */
1073         unsigned long low_pfn;       /* lowest pfn scanner is able to scan */
1074         struct list_head *freelist = &cc->freepages;
1075
1076         /*
1077          * Initialise the free scanner. The starting point is where we last
1078          * successfully isolated from, zone-cached value, or the end of the
1079          * zone when isolating for the first time. For looping we also need
1080          * this pfn aligned down to the pageblock boundary, because we do
1081          * block_start_pfn -= pageblock_nr_pages in the for loop.
1082          * For ending point, take care when isolating in last pageblock of a
1083          * a zone which ends in the middle of a pageblock.
1084          * The low boundary is the end of the pageblock the migration scanner
1085          * is using.
1086          */
1087         isolate_start_pfn = cc->free_pfn;
1088         block_start_pfn = pageblock_start_pfn(cc->free_pfn);
1089         block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
1090                                                 zone_end_pfn(zone));
1091         low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1092
1093         /*
1094          * Isolate free pages until enough are available to migrate the
1095          * pages on cc->migratepages. We stop searching if the migrate
1096          * and free page scanners meet or enough free pages are isolated.
1097          */
1098         for (; block_start_pfn >= low_pfn;
1099                                 block_end_pfn = block_start_pfn,
1100                                 block_start_pfn -= pageblock_nr_pages,
1101                                 isolate_start_pfn = block_start_pfn) {
1102                 /*
1103                  * This can iterate a massively long zone without finding any
1104                  * suitable migration targets, so periodically check if we need
1105                  * to schedule, or even abort async compaction.
1106                  */
1107                 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1108                                                 && compact_should_abort(cc))
1109                         break;
1110
1111                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1112                                                                         zone);
1113                 if (!page)
1114                         continue;
1115
1116                 /* Check the block is suitable for migration */
1117                 if (!suitable_migration_target(cc, page))
1118                         continue;
1119
1120                 /* If isolation recently failed, do not retry */
1121                 if (!isolation_suitable(cc, page))
1122                         continue;
1123
1124                 /* Found a block suitable for isolating free pages from. */
1125                 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1126                                         freelist, false);
1127
1128                 /*
1129                  * If we isolated enough freepages, or aborted due to lock
1130                  * contention, terminate.
1131                  */
1132                 if ((cc->nr_freepages >= cc->nr_migratepages)
1133                                                         || cc->contended) {
1134                         if (isolate_start_pfn >= block_end_pfn) {
1135                                 /*
1136                                  * Restart at previous pageblock if more
1137                                  * freepages can be isolated next time.
1138                                  */
1139                                 isolate_start_pfn =
1140                                         block_start_pfn - pageblock_nr_pages;
1141                         }
1142                         break;
1143                 } else if (isolate_start_pfn < block_end_pfn) {
1144                         /*
1145                          * If isolation failed early, do not continue
1146                          * needlessly.
1147                          */
1148                         break;
1149                 }
1150         }
1151
1152         /* __isolate_free_page() does not map the pages */
1153         map_pages(freelist);
1154
1155         /*
1156          * Record where the free scanner will restart next time. Either we
1157          * broke from the loop and set isolate_start_pfn based on the last
1158          * call to isolate_freepages_block(), or we met the migration scanner
1159          * and the loop terminated due to isolate_start_pfn < low_pfn
1160          */
1161         cc->free_pfn = isolate_start_pfn;
1162 }
1163
1164 /*
1165  * This is a migrate-callback that "allocates" freepages by taking pages
1166  * from the isolated freelists in the block we are migrating to.
1167  */
1168 static struct page *compaction_alloc(struct page *migratepage,
1169                                         unsigned long data)
1170 {
1171         struct compact_control *cc = (struct compact_control *)data;
1172         struct page *freepage;
1173
1174         /*
1175          * Isolate free pages if necessary, and if we are not aborting due to
1176          * contention.
1177          */
1178         if (list_empty(&cc->freepages)) {
1179                 if (!cc->contended)
1180                         isolate_freepages(cc);
1181
1182                 if (list_empty(&cc->freepages))
1183                         return NULL;
1184         }
1185
1186         freepage = list_entry(cc->freepages.next, struct page, lru);
1187         list_del(&freepage->lru);
1188         cc->nr_freepages--;
1189
1190         return freepage;
1191 }
1192
1193 /*
1194  * This is a migrate-callback that "frees" freepages back to the isolated
1195  * freelist.  All pages on the freelist are from the same zone, so there is no
1196  * special handling needed for NUMA.
1197  */
1198 static void compaction_free(struct page *page, unsigned long data)
1199 {
1200         struct compact_control *cc = (struct compact_control *)data;
1201
1202         list_add(&page->lru, &cc->freepages);
1203         cc->nr_freepages++;
1204 }
1205
1206 /* possible outcome of isolate_migratepages */
1207 typedef enum {
1208         ISOLATE_ABORT,          /* Abort compaction now */
1209         ISOLATE_NONE,           /* No pages isolated, continue scanning */
1210         ISOLATE_SUCCESS,        /* Pages isolated, migrate */
1211 } isolate_migrate_t;
1212
1213 /*
1214  * Allow userspace to control policy on scanning the unevictable LRU for
1215  * compactable pages.
1216  */
1217 int sysctl_compact_unevictable_allowed __read_mostly = 1;
1218
1219 /*
1220  * Isolate all pages that can be migrated from the first suitable block,
1221  * starting at the block pointed to by the migrate scanner pfn within
1222  * compact_control.
1223  */
1224 static isolate_migrate_t isolate_migratepages(struct zone *zone,
1225                                         struct compact_control *cc)
1226 {
1227         unsigned long block_start_pfn;
1228         unsigned long block_end_pfn;
1229         unsigned long low_pfn;
1230         struct page *page;
1231         const isolate_mode_t isolate_mode =
1232                 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1233                 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1234
1235         /*
1236          * Start at where we last stopped, or beginning of the zone as
1237          * initialized by compact_zone()
1238          */
1239         low_pfn = cc->migrate_pfn;
1240         block_start_pfn = pageblock_start_pfn(low_pfn);
1241         if (block_start_pfn < zone->zone_start_pfn)
1242                 block_start_pfn = zone->zone_start_pfn;
1243
1244         /* Only scan within a pageblock boundary */
1245         block_end_pfn = pageblock_end_pfn(low_pfn);
1246
1247         /*
1248          * Iterate over whole pageblocks until we find the first suitable.
1249          * Do not cross the free scanner.
1250          */
1251         for (; block_end_pfn <= cc->free_pfn;
1252                         low_pfn = block_end_pfn,
1253                         block_start_pfn = block_end_pfn,
1254                         block_end_pfn += pageblock_nr_pages) {
1255
1256                 /*
1257                  * This can potentially iterate a massively long zone with
1258                  * many pageblocks unsuitable, so periodically check if we
1259                  * need to schedule, or even abort async compaction.
1260                  */
1261                 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
1262                                                 && compact_should_abort(cc))
1263                         break;
1264
1265                 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1266                                                                         zone);
1267                 if (!page)
1268                         continue;
1269
1270                 /* If isolation recently failed, do not retry */
1271                 if (!isolation_suitable(cc, page))
1272                         continue;
1273
1274                 /*
1275                  * For async compaction, also only scan in MOVABLE blocks.
1276                  * Async compaction is optimistic to see if the minimum amount
1277                  * of work satisfies the allocation.
1278                  */
1279                 if (!suitable_migration_source(cc, page))
1280                         continue;
1281
1282                 /* Perform the isolation */
1283                 low_pfn = isolate_migratepages_block(cc, low_pfn,
1284                                                 block_end_pfn, isolate_mode);
1285
1286                 if (!low_pfn || cc->contended)
1287                         return ISOLATE_ABORT;
1288
1289                 /*
1290                  * Either we isolated something and proceed with migration. Or
1291                  * we failed and compact_zone should decide if we should
1292                  * continue or not.
1293                  */
1294                 break;
1295         }
1296
1297         /* Record where migration scanner will be restarted. */
1298         cc->migrate_pfn = low_pfn;
1299
1300         return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1301 }
1302
1303 /*
1304  * order == -1 is expected when compacting via
1305  * /proc/sys/vm/compact_memory
1306  */
1307 static inline bool is_via_compact_memory(int order)
1308 {
1309         return order == -1;
1310 }
1311
1312 static enum compact_result __compact_finished(struct zone *zone,
1313                                                 struct compact_control *cc)
1314 {
1315         unsigned int order;
1316         const int migratetype = cc->migratetype;
1317
1318         if (cc->contended || fatal_signal_pending(current))
1319                 return COMPACT_CONTENDED;
1320
1321         /* Compaction run completes if the migrate and free scanner meet */
1322         if (compact_scanners_met(cc)) {
1323                 /* Let the next compaction start anew. */
1324                 reset_cached_positions(zone);
1325
1326                 /*
1327                  * Mark that the PG_migrate_skip information should be cleared
1328                  * by kswapd when it goes to sleep. kcompactd does not set the
1329                  * flag itself as the decision to be clear should be directly
1330                  * based on an allocation request.
1331                  */
1332                 if (cc->direct_compaction)
1333                         zone->compact_blockskip_flush = true;
1334
1335                 if (cc->whole_zone)
1336                         return COMPACT_COMPLETE;
1337                 else
1338                         return COMPACT_PARTIAL_SKIPPED;
1339         }
1340
1341         if (is_via_compact_memory(cc->order))
1342                 return COMPACT_CONTINUE;
1343
1344         if (cc->finishing_block) {
1345                 /*
1346                  * We have finished the pageblock, but better check again that
1347                  * we really succeeded.
1348                  */
1349                 if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1350                         cc->finishing_block = false;
1351                 else
1352                         return COMPACT_CONTINUE;
1353         }
1354
1355         /* Direct compactor: Is a suitable page free? */
1356         for (order = cc->order; order < MAX_ORDER; order++) {
1357                 struct free_area *area = &zone->free_area[order];
1358                 bool can_steal;
1359
1360                 /* Job done if page is free of the right migratetype */
1361                 if (!list_empty(&area->free_list[migratetype]))
1362                         return COMPACT_SUCCESS;
1363
1364 #ifdef CONFIG_CMA
1365                 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1366                 if (migratetype == MIGRATE_MOVABLE &&
1367                         !list_empty(&area->free_list[MIGRATE_CMA]))
1368                         return COMPACT_SUCCESS;
1369 #endif
1370                 /*
1371                  * Job done if allocation would steal freepages from
1372                  * other migratetype buddy lists.
1373                  */
1374                 if (find_suitable_fallback(area, order, migratetype,
1375                                                 true, &can_steal) != -1) {
1376
1377                         /* movable pages are OK in any pageblock */
1378                         if (migratetype == MIGRATE_MOVABLE)
1379                                 return COMPACT_SUCCESS;
1380
1381                         /*
1382                          * We are stealing for a non-movable allocation. Make
1383                          * sure we finish compacting the current pageblock
1384                          * first so it is as free as possible and we won't
1385                          * have to steal another one soon. This only applies
1386                          * to sync compaction, as async compaction operates
1387                          * on pageblocks of the same migratetype.
1388                          */
1389                         if (cc->mode == MIGRATE_ASYNC ||
1390                                         IS_ALIGNED(cc->migrate_pfn,
1391                                                         pageblock_nr_pages)) {
1392                                 return COMPACT_SUCCESS;
1393                         }
1394
1395                         cc->finishing_block = true;
1396                         return COMPACT_CONTINUE;
1397                 }
1398         }
1399
1400         return COMPACT_NO_SUITABLE_PAGE;
1401 }
1402
1403 static enum compact_result compact_finished(struct zone *zone,
1404                         struct compact_control *cc)
1405 {
1406         int ret;
1407
1408         ret = __compact_finished(zone, cc);
1409         trace_mm_compaction_finished(zone, cc->order, ret);
1410         if (ret == COMPACT_NO_SUITABLE_PAGE)
1411                 ret = COMPACT_CONTINUE;
1412
1413         return ret;
1414 }
1415
1416 /*
1417  * compaction_suitable: Is this suitable to run compaction on this zone now?
1418  * Returns
1419  *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1420  *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
1421  *   COMPACT_CONTINUE - If compaction should run now
1422  */
1423 static enum compact_result __compaction_suitable(struct zone *zone, int order,
1424                                         unsigned int alloc_flags,
1425                                         int classzone_idx,
1426                                         unsigned long wmark_target)
1427 {
1428         unsigned long watermark;
1429
1430         if (is_via_compact_memory(order))
1431                 return COMPACT_CONTINUE;
1432
1433         watermark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1434         /*
1435          * If watermarks for high-order allocation are already met, there
1436          * should be no need for compaction at all.
1437          */
1438         if (zone_watermark_ok(zone, order, watermark, classzone_idx,
1439                                                                 alloc_flags))
1440                 return COMPACT_SUCCESS;
1441
1442         /*
1443          * Watermarks for order-0 must be met for compaction to be able to
1444          * isolate free pages for migration targets. This means that the
1445          * watermark and alloc_flags have to match, or be more pessimistic than
1446          * the check in __isolate_free_page(). We don't use the direct
1447          * compactor's alloc_flags, as they are not relevant for freepage
1448          * isolation. We however do use the direct compactor's classzone_idx to
1449          * skip over zones where lowmem reserves would prevent allocation even
1450          * if compaction succeeds.
1451          * For costly orders, we require low watermark instead of min for
1452          * compaction to proceed to increase its chances.
1453          */
1454         watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
1455                                 low_wmark_pages(zone) : min_wmark_pages(zone);
1456         watermark += compact_gap(order);
1457         if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1458                                                 0, wmark_target))
1459                 return COMPACT_SKIPPED;
1460
1461         return COMPACT_CONTINUE;
1462 }
1463
1464 enum compact_result compaction_suitable(struct zone *zone, int order,
1465                                         unsigned int alloc_flags,
1466                                         int classzone_idx)
1467 {
1468         enum compact_result ret;
1469         int fragindex;
1470
1471         ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
1472                                     zone_page_state(zone, NR_FREE_PAGES));
1473         /*
1474          * fragmentation index determines if allocation failures are due to
1475          * low memory or external fragmentation
1476          *
1477          * index of -1000 would imply allocations might succeed depending on
1478          * watermarks, but we already failed the high-order watermark check
1479          * index towards 0 implies failure is due to lack of memory
1480          * index towards 1000 implies failure is due to fragmentation
1481          *
1482          * Only compact if a failure would be due to fragmentation. Also
1483          * ignore fragindex for non-costly orders where the alternative to
1484          * a successful reclaim/compaction is OOM. Fragindex and the
1485          * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
1486          * excessive compaction for costly orders, but it should not be at the
1487          * expense of system stability.
1488          */
1489         if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
1490                 fragindex = fragmentation_index(zone, order);
1491                 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1492                         ret = COMPACT_NOT_SUITABLE_ZONE;
1493         }
1494
1495         trace_mm_compaction_suitable(zone, order, ret);
1496         if (ret == COMPACT_NOT_SUITABLE_ZONE)
1497                 ret = COMPACT_SKIPPED;
1498
1499         return ret;
1500 }
1501
1502 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
1503                 int alloc_flags)
1504 {
1505         struct zone *zone;
1506         struct zoneref *z;
1507
1508         /*
1509          * Make sure at least one zone would pass __compaction_suitable if we continue
1510          * retrying the reclaim.
1511          */
1512         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1513                                         ac->nodemask) {
1514                 unsigned long available;
1515                 enum compact_result compact_result;
1516
1517                 /*
1518                  * Do not consider all the reclaimable memory because we do not
1519                  * want to trash just for a single high order allocation which
1520                  * is even not guaranteed to appear even if __compaction_suitable
1521                  * is happy about the watermark check.
1522                  */
1523                 available = zone_reclaimable_pages(zone) / order;
1524                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
1525                 compact_result = __compaction_suitable(zone, order, alloc_flags,
1526                                 ac_classzone_idx(ac), available);
1527                 if (compact_result != COMPACT_SKIPPED)
1528                         return true;
1529         }
1530
1531         return false;
1532 }
1533
1534 static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc)
1535 {
1536         enum compact_result ret;
1537         unsigned long start_pfn = zone->zone_start_pfn;
1538         unsigned long end_pfn = zone_end_pfn(zone);
1539         const bool sync = cc->mode != MIGRATE_ASYNC;
1540
1541         cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1542         ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
1543                                                         cc->classzone_idx);
1544         /* Compaction is likely to fail */
1545         if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
1546                 return ret;
1547
1548         /* huh, compaction_suitable is returning something unexpected */
1549         VM_BUG_ON(ret != COMPACT_CONTINUE);
1550
1551         /*
1552          * Clear pageblock skip if there were failures recently and compaction
1553          * is about to be retried after being deferred.
1554          */
1555         if (compaction_restarting(zone, cc->order))
1556                 __reset_isolation_suitable(zone);
1557
1558         /*
1559          * Setup to move all movable pages to the end of the zone. Used cached
1560          * information on where the scanners should start (unless we explicitly
1561          * want to compact the whole zone), but check that it is initialised
1562          * by ensuring the values are within zone boundaries.
1563          */
1564         if (cc->whole_zone) {
1565                 cc->migrate_pfn = start_pfn;
1566                 cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1567         } else {
1568                 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1569                 cc->free_pfn = zone->compact_cached_free_pfn;
1570                 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
1571                         cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
1572                         zone->compact_cached_free_pfn = cc->free_pfn;
1573                 }
1574                 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
1575                         cc->migrate_pfn = start_pfn;
1576                         zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1577                         zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1578                 }
1579
1580                 if (cc->migrate_pfn == start_pfn)
1581                         cc->whole_zone = true;
1582         }
1583
1584         cc->last_migrated_pfn = 0;
1585
1586         trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
1587                                 cc->free_pfn, end_pfn, sync);
1588
1589         migrate_prep_local();
1590
1591         while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
1592                 int err;
1593
1594                 switch (isolate_migratepages(zone, cc)) {
1595                 case ISOLATE_ABORT:
1596                         ret = COMPACT_CONTENDED;
1597                         putback_movable_pages(&cc->migratepages);
1598                         cc->nr_migratepages = 0;
1599                         goto out;
1600                 case ISOLATE_NONE:
1601                         /*
1602                          * We haven't isolated and migrated anything, but
1603                          * there might still be unflushed migrations from
1604                          * previous cc->order aligned block.
1605                          */
1606                         goto check_drain;
1607                 case ISOLATE_SUCCESS:
1608                         ;
1609                 }
1610
1611                 err = migrate_pages(&cc->migratepages, compaction_alloc,
1612                                 compaction_free, (unsigned long)cc, cc->mode,
1613                                 MR_COMPACTION);
1614
1615                 trace_mm_compaction_migratepages(cc->nr_migratepages, err,
1616                                                         &cc->migratepages);
1617
1618                 /* All pages were either migrated or will be released */
1619                 cc->nr_migratepages = 0;
1620                 if (err) {
1621                         putback_movable_pages(&cc->migratepages);
1622                         /*
1623                          * migrate_pages() may return -ENOMEM when scanners meet
1624                          * and we want compact_finished() to detect it
1625                          */
1626                         if (err == -ENOMEM && !compact_scanners_met(cc)) {
1627                                 ret = COMPACT_CONTENDED;
1628                                 goto out;
1629                         }
1630                         /*
1631                          * We failed to migrate at least one page in the current
1632                          * order-aligned block, so skip the rest of it.
1633                          */
1634                         if (cc->direct_compaction &&
1635                                                 (cc->mode == MIGRATE_ASYNC)) {
1636                                 cc->migrate_pfn = block_end_pfn(
1637                                                 cc->migrate_pfn - 1, cc->order);
1638                                 /* Draining pcplists is useless in this case */
1639                                 cc->last_migrated_pfn = 0;
1640
1641                         }
1642                 }
1643
1644 check_drain:
1645                 /*
1646                  * Has the migration scanner moved away from the previous
1647                  * cc->order aligned block where we migrated from? If yes,
1648                  * flush the pages that were freed, so that they can merge and
1649                  * compact_finished() can detect immediately if allocation
1650                  * would succeed.
1651                  */
1652                 if (cc->order > 0 && cc->last_migrated_pfn) {
1653                         int cpu;
1654                         unsigned long current_block_start =
1655                                 block_start_pfn(cc->migrate_pfn, cc->order);
1656
1657                         if (cc->last_migrated_pfn < current_block_start) {
1658                                 cpu = get_cpu();
1659                                 lru_add_drain_cpu(cpu);
1660                                 drain_local_pages(zone);
1661                                 put_cpu();
1662                                 /* No more flushing until we migrate again */
1663                                 cc->last_migrated_pfn = 0;
1664                         }
1665                 }
1666
1667         }
1668
1669 out:
1670         /*
1671          * Release free pages and update where the free scanner should restart,
1672          * so we don't leave any returned pages behind in the next attempt.
1673          */
1674         if (cc->nr_freepages > 0) {
1675                 unsigned long free_pfn = release_freepages(&cc->freepages);
1676
1677                 cc->nr_freepages = 0;
1678                 VM_BUG_ON(free_pfn == 0);
1679                 /* The cached pfn is always the first in a pageblock */
1680                 free_pfn = pageblock_start_pfn(free_pfn);
1681                 /*
1682                  * Only go back, not forward. The cached pfn might have been
1683                  * already reset to zone end in compact_finished()
1684                  */
1685                 if (free_pfn > zone->compact_cached_free_pfn)
1686                         zone->compact_cached_free_pfn = free_pfn;
1687         }
1688
1689         count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
1690         count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);
1691
1692         trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
1693                                 cc->free_pfn, end_pfn, sync, ret);
1694
1695         return ret;
1696 }
1697
1698 static enum compact_result compact_zone_order(struct zone *zone, int order,
1699                 gfp_t gfp_mask, enum compact_priority prio,
1700                 unsigned int alloc_flags, int classzone_idx)
1701 {
1702         enum compact_result ret;
1703         struct compact_control cc = {
1704                 .nr_freepages = 0,
1705                 .nr_migratepages = 0,
1706                 .total_migrate_scanned = 0,
1707                 .total_free_scanned = 0,
1708                 .order = order,
1709                 .gfp_mask = gfp_mask,
1710                 .zone = zone,
1711                 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1712                                         MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1713                 .alloc_flags = alloc_flags,
1714                 .classzone_idx = classzone_idx,
1715                 .direct_compaction = true,
1716                 .whole_zone = (prio == MIN_COMPACT_PRIORITY),
1717                 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
1718                 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
1719         };
1720         INIT_LIST_HEAD(&cc.freepages);
1721         INIT_LIST_HEAD(&cc.migratepages);
1722
1723         ret = compact_zone(zone, &cc);
1724
1725         VM_BUG_ON(!list_empty(&cc.freepages));
1726         VM_BUG_ON(!list_empty(&cc.migratepages));
1727
1728         return ret;
1729 }
1730
1731 int sysctl_extfrag_threshold = 500;
1732
1733 /**
1734  * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1735  * @gfp_mask: The GFP mask of the current allocation
1736  * @order: The order of the current allocation
1737  * @alloc_flags: The allocation flags of the current allocation
1738  * @ac: The context of current allocation
1739  * @prio: Determines how hard direct compaction should try to succeed
1740  *
1741  * This is the main entry point for direct page compaction.
1742  */
1743 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1744                 unsigned int alloc_flags, const struct alloc_context *ac,
1745                 enum compact_priority prio)
1746 {
1747         int may_perform_io = gfp_mask & __GFP_IO;
1748         struct zoneref *z;
1749         struct zone *zone;
1750         enum compact_result rc = COMPACT_SKIPPED;
1751
1752         /*
1753          * Check if the GFP flags allow compaction - GFP_NOIO is really
1754          * tricky context because the migration might require IO
1755          */
1756         if (!may_perform_io)
1757                 return COMPACT_SKIPPED;
1758
1759         trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
1760
1761         /* Compact each zone in the list */
1762         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
1763                                                                 ac->nodemask) {
1764                 enum compact_result status;
1765
1766                 if (prio > MIN_COMPACT_PRIORITY
1767                                         && compaction_deferred(zone, order)) {
1768                         rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
1769                         continue;
1770                 }
1771
1772                 status = compact_zone_order(zone, order, gfp_mask, prio,
1773                                         alloc_flags, ac_classzone_idx(ac));
1774                 rc = max(status, rc);
1775
1776                 /* The allocation should succeed, stop compacting */
1777                 if (status == COMPACT_SUCCESS) {
1778                         /*
1779                          * We think the allocation will succeed in this zone,
1780                          * but it is not certain, hence the false. The caller
1781                          * will repeat this with true if allocation indeed
1782                          * succeeds in this zone.
1783                          */
1784                         compaction_defer_reset(zone, order, false);
1785
1786                         break;
1787                 }
1788
1789                 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
1790                                         status == COMPACT_PARTIAL_SKIPPED))
1791                         /*
1792                          * We think that allocation won't succeed in this zone
1793                          * so we defer compaction there. If it ends up
1794                          * succeeding after all, it will be reset.
1795                          */
1796                         defer_compaction(zone, order);
1797
1798                 /*
1799                  * We might have stopped compacting due to need_resched() in
1800                  * async compaction, or due to a fatal signal detected. In that
1801                  * case do not try further zones
1802                  */
1803                 if ((prio == COMPACT_PRIO_ASYNC && need_resched())
1804                                         || fatal_signal_pending(current))
1805                         break;
1806         }
1807
1808         return rc;
1809 }
1810
1811
1812 /* Compact all zones within a node */
1813 static void compact_node(int nid)
1814 {
1815         pg_data_t *pgdat = NODE_DATA(nid);
1816         int zoneid;
1817         struct zone *zone;
1818         struct compact_control cc = {
1819                 .order = -1,
1820                 .total_migrate_scanned = 0,
1821                 .total_free_scanned = 0,
1822                 .mode = MIGRATE_SYNC,
1823                 .ignore_skip_hint = true,
1824                 .whole_zone = true,
1825                 .gfp_mask = GFP_KERNEL,
1826         };
1827
1828
1829         for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1830
1831                 zone = &pgdat->node_zones[zoneid];
1832                 if (!populated_zone(zone))
1833                         continue;
1834
1835                 cc.nr_freepages = 0;
1836                 cc.nr_migratepages = 0;
1837                 cc.zone = zone;
1838                 INIT_LIST_HEAD(&cc.freepages);
1839                 INIT_LIST_HEAD(&cc.migratepages);
1840
1841                 compact_zone(zone, &cc);
1842
1843                 VM_BUG_ON(!list_empty(&cc.freepages));
1844                 VM_BUG_ON(!list_empty(&cc.migratepages));
1845         }
1846 }
1847
1848 /* Compact all nodes in the system */
1849 static void compact_nodes(void)
1850 {
1851         int nid;
1852
1853         /* Flush pending updates to the LRU lists */
1854         lru_add_drain_all();
1855
1856         for_each_online_node(nid)
1857                 compact_node(nid);
1858 }
1859
1860 /* The written value is actually unused, all memory is compacted */
1861 int sysctl_compact_memory;
1862
1863 /*
1864  * This is the entry point for compacting all nodes via
1865  * /proc/sys/vm/compact_memory
1866  */
1867 int sysctl_compaction_handler(struct ctl_table *table, int write,
1868                         void __user *buffer, size_t *length, loff_t *ppos)
1869 {
1870         if (write)
1871                 compact_nodes();
1872
1873         return 0;
1874 }
1875
1876 int sysctl_extfrag_handler(struct ctl_table *table, int write,
1877                         void __user *buffer, size_t *length, loff_t *ppos)
1878 {
1879         proc_dointvec_minmax(table, write, buffer, length, ppos);
1880
1881         return 0;
1882 }
1883
1884 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1885 static ssize_t sysfs_compact_node(struct device *dev,
1886                         struct device_attribute *attr,
1887                         const char *buf, size_t count)
1888 {
1889         int nid = dev->id;
1890
1891         if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1892                 /* Flush pending updates to the LRU lists */
1893                 lru_add_drain_all();
1894
1895                 compact_node(nid);
1896         }
1897
1898         return count;
1899 }
1900 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1901
1902 int compaction_register_node(struct node *node)
1903 {
1904         return device_create_file(&node->dev, &dev_attr_compact);
1905 }
1906
1907 void compaction_unregister_node(struct node *node)
1908 {
1909         return device_remove_file(&node->dev, &dev_attr_compact);
1910 }
1911 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
1912
1913 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
1914 {
1915         return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
1916 }
1917
1918 static bool kcompactd_node_suitable(pg_data_t *pgdat)
1919 {
1920         int zoneid;
1921         struct zone *zone;
1922         enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
1923
1924         for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
1925                 zone = &pgdat->node_zones[zoneid];
1926
1927                 if (!populated_zone(zone))
1928                         continue;
1929
1930                 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
1931                                         classzone_idx) == COMPACT_CONTINUE)
1932                         return true;
1933         }
1934
1935         return false;
1936 }
1937
1938 static void kcompactd_do_work(pg_data_t *pgdat)
1939 {
1940         /*
1941          * With no special task, compact all zones so that a page of requested
1942          * order is allocatable.
1943          */
1944         int zoneid;
1945         struct zone *zone;
1946         struct compact_control cc = {
1947                 .order = pgdat->kcompactd_max_order,
1948                 .total_migrate_scanned = 0,
1949                 .total_free_scanned = 0,
1950                 .classzone_idx = pgdat->kcompactd_classzone_idx,
1951                 .mode = MIGRATE_SYNC_LIGHT,
1952                 .ignore_skip_hint = false,
1953                 .gfp_mask = GFP_KERNEL,
1954         };
1955         trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
1956                                                         cc.classzone_idx);
1957         count_compact_event(KCOMPACTD_WAKE);
1958
1959         for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
1960                 int status;
1961
1962                 zone = &pgdat->node_zones[zoneid];
1963                 if (!populated_zone(zone))
1964                         continue;
1965
1966                 if (compaction_deferred(zone, cc.order))
1967                         continue;
1968
1969                 if (compaction_suitable(zone, cc.order, 0, zoneid) !=
1970                                                         COMPACT_CONTINUE)
1971                         continue;
1972
1973                 cc.nr_freepages = 0;
1974                 cc.nr_migratepages = 0;
1975                 cc.total_migrate_scanned = 0;
1976                 cc.total_free_scanned = 0;
1977                 cc.zone = zone;
1978                 INIT_LIST_HEAD(&cc.freepages);
1979                 INIT_LIST_HEAD(&cc.migratepages);
1980
1981                 if (kthread_should_stop())
1982                         return;
1983                 status = compact_zone(zone, &cc);
1984
1985                 if (status == COMPACT_SUCCESS) {
1986                         compaction_defer_reset(zone, cc.order, false);
1987                 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
1988                         /*
1989                          * Buddy pages may become stranded on pcps that could
1990                          * otherwise coalesce on the zone's free area for
1991                          * order >= cc.order.  This is ratelimited by the
1992                          * upcoming deferral.
1993                          */
1994                         drain_all_pages(zone);
1995
1996                         /*
1997                          * We use sync migration mode here, so we defer like
1998                          * sync direct compaction does.
1999                          */
2000                         defer_compaction(zone, cc.order);
2001                 }
2002
2003                 count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
2004                                      cc.total_migrate_scanned);
2005                 count_compact_events(KCOMPACTD_FREE_SCANNED,
2006                                      cc.total_free_scanned);
2007
2008                 VM_BUG_ON(!list_empty(&cc.freepages));
2009                 VM_BUG_ON(!list_empty(&cc.migratepages));
2010         }
2011
2012         /*
2013          * Regardless of success, we are done until woken up next. But remember
2014          * the requested order/classzone_idx in case it was higher/tighter than
2015          * our current ones
2016          */
2017         if (pgdat->kcompactd_max_order <= cc.order)
2018                 pgdat->kcompactd_max_order = 0;
2019         if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
2020                 pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2021 }
2022
2023 void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
2024 {
2025         if (!order)
2026                 return;
2027
2028         if (pgdat->kcompactd_max_order < order)
2029                 pgdat->kcompactd_max_order = order;
2030
2031         if (pgdat->kcompactd_classzone_idx > classzone_idx)
2032                 pgdat->kcompactd_classzone_idx = classzone_idx;
2033
2034         /*
2035          * Pairs with implicit barrier in wait_event_freezable()
2036          * such that wakeups are not missed.
2037          */
2038         if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2039                 return;
2040
2041         if (!kcompactd_node_suitable(pgdat))
2042                 return;
2043
2044         trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
2045                                                         classzone_idx);
2046         wake_up_interruptible(&pgdat->kcompactd_wait);
2047 }
2048
2049 /*
2050  * The background compaction daemon, started as a kernel thread
2051  * from the init process.
2052  */
2053 static int kcompactd(void *p)
2054 {
2055         pg_data_t *pgdat = (pg_data_t*)p;
2056         struct task_struct *tsk = current;
2057
2058         const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
2059
2060         if (!cpumask_empty(cpumask))
2061                 set_cpus_allowed_ptr(tsk, cpumask);
2062
2063         set_freezable();
2064
2065         pgdat->kcompactd_max_order = 0;
2066         pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
2067
2068         while (!kthread_should_stop()) {
2069                 trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
2070                 wait_event_freezable(pgdat->kcompactd_wait,
2071                                 kcompactd_work_requested(pgdat));
2072
2073                 kcompactd_do_work(pgdat);
2074         }
2075
2076         return 0;
2077 }
2078
2079 /*
2080  * This kcompactd start function will be called by init and node-hot-add.
2081  * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
2082  */
2083 int kcompactd_run(int nid)
2084 {
2085         pg_data_t *pgdat = NODE_DATA(nid);
2086         int ret = 0;
2087
2088         if (pgdat->kcompactd)
2089                 return 0;
2090
2091         pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
2092         if (IS_ERR(pgdat->kcompactd)) {
2093                 pr_err("Failed to start kcompactd on node %d\n", nid);
2094                 ret = PTR_ERR(pgdat->kcompactd);
2095                 pgdat->kcompactd = NULL;
2096         }
2097         return ret;
2098 }
2099
2100 /*
2101  * Called by memory hotplug when all memory in a node is offlined. Caller must
2102  * hold mem_hotplug_begin/end().
2103  */
2104 void kcompactd_stop(int nid)
2105 {
2106         struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
2107
2108         if (kcompactd) {
2109                 kthread_stop(kcompactd);
2110                 NODE_DATA(nid)->kcompactd = NULL;
2111         }
2112 }
2113
2114 /*
2115  * It's optimal to keep kcompactd on the same CPUs as their memory, but
2116  * not required for correctness. So if the last cpu in a node goes
2117  * away, we get changed to run anywhere: as the first one comes back,
2118  * restore their cpu bindings.
2119  */
2120 static int kcompactd_cpu_online(unsigned int cpu)
2121 {
2122         int nid;
2123
2124         for_each_node_state(nid, N_MEMORY) {
2125                 pg_data_t *pgdat = NODE_DATA(nid);
2126                 const struct cpumask *mask;
2127
2128                 mask = cpumask_of_node(pgdat->node_id);
2129
2130                 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2131                         /* One of our CPUs online: restore mask */
2132                         set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2133         }
2134         return 0;
2135 }
2136
2137 static int __init kcompactd_init(void)
2138 {
2139         int nid;
2140         int ret;
2141
2142         ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
2143                                         "mm/compaction:online",
2144                                         kcompactd_cpu_online, NULL);
2145         if (ret < 0) {
2146                 pr_err("kcompactd: failed to register hotplug callbacks.\n");
2147                 return ret;
2148         }
2149
2150         for_each_node_state(nid, N_MEMORY)
2151                 kcompactd_run(nid);
2152         return 0;
2153 }
2154 subsys_initcall(kcompactd_init)
2155
2156 #endif /* CONFIG_COMPACTION */