29220174433b10ea2ff776ad69adf0b7618f2acf
[sfrench/cifs-2.6.git] / mm / swap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/swap.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  */
7
8 /*
9  * This file contains the default values for the operation of the
10  * Linux VM subsystem. Fine-tuning documentation can be found in
11  * Documentation/admin-guide/sysctl/vm.rst.
12  * Started 18.12.91
13  * Swap aging added 23.2.95, Stephen Tweedie.
14  * Buffermem limits added 12.3.98, Rik van Riel.
15  */
16
17 #include <linux/mm.h>
18 #include <linux/sched.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/swap.h>
21 #include <linux/mman.h>
22 #include <linux/pagemap.h>
23 #include <linux/pagevec.h>
24 #include <linux/init.h>
25 #include <linux/export.h>
26 #include <linux/mm_inline.h>
27 #include <linux/percpu_counter.h>
28 #include <linux/memremap.h>
29 #include <linux/percpu.h>
30 #include <linux/cpu.h>
31 #include <linux/notifier.h>
32 #include <linux/backing-dev.h>
33 #include <linux/memcontrol.h>
34 #include <linux/gfp.h>
35 #include <linux/uio.h>
36 #include <linux/hugetlb.h>
37 #include <linux/page_idle.h>
38 #include <linux/local_lock.h>
39
40 #include "internal.h"
41
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/pagemap.h>
44
45 /* How many pages do we try to swap or page in/out together? */
46 int page_cluster;
47
48 /* Protecting only lru_rotate.pvec which requires disabling interrupts */
49 struct lru_rotate {
50         local_lock_t lock;
51         struct pagevec pvec;
52 };
53 static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
54         .lock = INIT_LOCAL_LOCK(lock),
55 };
56
57 /*
58  * The following struct pagevec are grouped together because they are protected
59  * by disabling preemption (and interrupts remain enabled).
60  */
61 struct lru_pvecs {
62         local_lock_t lock;
63         struct pagevec lru_add;
64         struct pagevec lru_deactivate_file;
65         struct pagevec lru_deactivate;
66         struct pagevec lru_lazyfree;
67 #ifdef CONFIG_SMP
68         struct pagevec activate_page;
69 #endif
70 };
71 static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
72         .lock = INIT_LOCAL_LOCK(lock),
73 };
74
75 /*
76  * This path almost never happens for VM activity - pages are normally
77  * freed via pagevecs.  But it gets used by networking.
78  */
79 static void __page_cache_release(struct page *page)
80 {
81         if (PageLRU(page)) {
82                 pg_data_t *pgdat = page_pgdat(page);
83                 struct lruvec *lruvec;
84                 unsigned long flags;
85
86                 spin_lock_irqsave(&pgdat->lru_lock, flags);
87                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
88                 VM_BUG_ON_PAGE(!PageLRU(page), page);
89                 __ClearPageLRU(page);
90                 del_page_from_lru_list(page, lruvec, page_off_lru(page));
91                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
92         }
93         __ClearPageWaiters(page);
94 }
95
96 static void __put_single_page(struct page *page)
97 {
98         __page_cache_release(page);
99         mem_cgroup_uncharge(page);
100         free_unref_page(page);
101 }
102
103 static void __put_compound_page(struct page *page)
104 {
105         /*
106          * __page_cache_release() is supposed to be called for thp, not for
107          * hugetlb. This is because hugetlb page does never have PageLRU set
108          * (it's never listed to any LRU lists) and no memcg routines should
109          * be called for hugetlb (it has a separate hugetlb_cgroup.)
110          */
111         if (!PageHuge(page))
112                 __page_cache_release(page);
113         destroy_compound_page(page);
114 }
115
116 void __put_page(struct page *page)
117 {
118         if (is_zone_device_page(page)) {
119                 put_dev_pagemap(page->pgmap);
120
121                 /*
122                  * The page belongs to the device that created pgmap. Do
123                  * not return it to page allocator.
124                  */
125                 return;
126         }
127
128         if (unlikely(PageCompound(page)))
129                 __put_compound_page(page);
130         else
131                 __put_single_page(page);
132 }
133 EXPORT_SYMBOL(__put_page);
134
135 /**
136  * put_pages_list() - release a list of pages
137  * @pages: list of pages threaded on page->lru
138  *
139  * Release a list of pages which are strung together on page.lru.  Currently
140  * used by read_cache_pages() and related error recovery code.
141  */
142 void put_pages_list(struct list_head *pages)
143 {
144         while (!list_empty(pages)) {
145                 struct page *victim;
146
147                 victim = lru_to_page(pages);
148                 list_del(&victim->lru);
149                 put_page(victim);
150         }
151 }
152 EXPORT_SYMBOL(put_pages_list);
153
154 /*
155  * get_kernel_pages() - pin kernel pages in memory
156  * @kiov:       An array of struct kvec structures
157  * @nr_segs:    number of segments to pin
158  * @write:      pinning for read/write, currently ignored
159  * @pages:      array that receives pointers to the pages pinned.
160  *              Should be at least nr_segs long.
161  *
162  * Returns number of pages pinned. This may be fewer than the number
163  * requested. If nr_pages is 0 or negative, returns 0. If no pages
164  * were pinned, returns -errno. Each page returned must be released
165  * with a put_page() call when it is finished with.
166  */
167 int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
168                 struct page **pages)
169 {
170         int seg;
171
172         for (seg = 0; seg < nr_segs; seg++) {
173                 if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
174                         return seg;
175
176                 pages[seg] = kmap_to_page(kiov[seg].iov_base);
177                 get_page(pages[seg]);
178         }
179
180         return seg;
181 }
182 EXPORT_SYMBOL_GPL(get_kernel_pages);
183
184 /*
185  * get_kernel_page() - pin a kernel page in memory
186  * @start:      starting kernel address
187  * @write:      pinning for read/write, currently ignored
188  * @pages:      array that receives pointer to the page pinned.
189  *              Must be at least nr_segs long.
190  *
191  * Returns 1 if page is pinned. If the page was not pinned, returns
192  * -errno. The page returned must be released with a put_page() call
193  * when it is finished with.
194  */
195 int get_kernel_page(unsigned long start, int write, struct page **pages)
196 {
197         const struct kvec kiov = {
198                 .iov_base = (void *)start,
199                 .iov_len = PAGE_SIZE
200         };
201
202         return get_kernel_pages(&kiov, 1, write, pages);
203 }
204 EXPORT_SYMBOL_GPL(get_kernel_page);
205
206 static void pagevec_lru_move_fn(struct pagevec *pvec,
207         void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
208         void *arg)
209 {
210         int i;
211         struct pglist_data *pgdat = NULL;
212         struct lruvec *lruvec;
213         unsigned long flags = 0;
214
215         for (i = 0; i < pagevec_count(pvec); i++) {
216                 struct page *page = pvec->pages[i];
217                 struct pglist_data *pagepgdat = page_pgdat(page);
218
219                 if (pagepgdat != pgdat) {
220                         if (pgdat)
221                                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
222                         pgdat = pagepgdat;
223                         spin_lock_irqsave(&pgdat->lru_lock, flags);
224                 }
225
226                 lruvec = mem_cgroup_page_lruvec(page, pgdat);
227                 (*move_fn)(page, lruvec, arg);
228         }
229         if (pgdat)
230                 spin_unlock_irqrestore(&pgdat->lru_lock, flags);
231         release_pages(pvec->pages, pvec->nr);
232         pagevec_reinit(pvec);
233 }
234
235 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
236                                  void *arg)
237 {
238         int *pgmoved = arg;
239
240         if (PageLRU(page) && !PageUnevictable(page)) {
241                 del_page_from_lru_list(page, lruvec, page_lru(page));
242                 ClearPageActive(page);
243                 add_page_to_lru_list_tail(page, lruvec, page_lru(page));
244                 (*pgmoved) += thp_nr_pages(page);
245         }
246 }
247
248 /*
249  * pagevec_move_tail() must be called with IRQ disabled.
250  * Otherwise this may cause nasty races.
251  */
252 static void pagevec_move_tail(struct pagevec *pvec)
253 {
254         int pgmoved = 0;
255
256         pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
257         __count_vm_events(PGROTATED, pgmoved);
258 }
259
260 /*
261  * Writeback is about to end against a page which has been marked for immediate
262  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
263  * inactive list.
264  */
265 void rotate_reclaimable_page(struct page *page)
266 {
267         if (!PageLocked(page) && !PageDirty(page) &&
268             !PageUnevictable(page) && PageLRU(page)) {
269                 struct pagevec *pvec;
270                 unsigned long flags;
271
272                 get_page(page);
273                 local_lock_irqsave(&lru_rotate.lock, flags);
274                 pvec = this_cpu_ptr(&lru_rotate.pvec);
275                 if (!pagevec_add(pvec, page) || PageCompound(page))
276                         pagevec_move_tail(pvec);
277                 local_unlock_irqrestore(&lru_rotate.lock, flags);
278         }
279 }
280
281 void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
282 {
283         do {
284                 unsigned long lrusize;
285
286                 /* Record cost event */
287                 if (file)
288                         lruvec->file_cost += nr_pages;
289                 else
290                         lruvec->anon_cost += nr_pages;
291
292                 /*
293                  * Decay previous events
294                  *
295                  * Because workloads change over time (and to avoid
296                  * overflow) we keep these statistics as a floating
297                  * average, which ends up weighing recent refaults
298                  * more than old ones.
299                  */
300                 lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) +
301                           lruvec_page_state(lruvec, NR_ACTIVE_ANON) +
302                           lruvec_page_state(lruvec, NR_INACTIVE_FILE) +
303                           lruvec_page_state(lruvec, NR_ACTIVE_FILE);
304
305                 if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) {
306                         lruvec->file_cost /= 2;
307                         lruvec->anon_cost /= 2;
308                 }
309         } while ((lruvec = parent_lruvec(lruvec)));
310 }
311
312 void lru_note_cost_page(struct page *page)
313 {
314         lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
315                       page_is_file_lru(page), thp_nr_pages(page));
316 }
317
318 static void __activate_page(struct page *page, struct lruvec *lruvec,
319                             void *arg)
320 {
321         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
322                 int lru = page_lru_base_type(page);
323                 int nr_pages = thp_nr_pages(page);
324
325                 del_page_from_lru_list(page, lruvec, lru);
326                 SetPageActive(page);
327                 lru += LRU_ACTIVE;
328                 add_page_to_lru_list(page, lruvec, lru);
329                 trace_mm_lru_activate(page);
330
331                 __count_vm_events(PGACTIVATE, nr_pages);
332                 __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
333                                      nr_pages);
334         }
335 }
336
337 #ifdef CONFIG_SMP
338 static void activate_page_drain(int cpu)
339 {
340         struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
341
342         if (pagevec_count(pvec))
343                 pagevec_lru_move_fn(pvec, __activate_page, NULL);
344 }
345
346 static bool need_activate_page_drain(int cpu)
347 {
348         return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
349 }
350
351 static void activate_page(struct page *page)
352 {
353         page = compound_head(page);
354         if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
355                 struct pagevec *pvec;
356
357                 local_lock(&lru_pvecs.lock);
358                 pvec = this_cpu_ptr(&lru_pvecs.activate_page);
359                 get_page(page);
360                 if (!pagevec_add(pvec, page) || PageCompound(page))
361                         pagevec_lru_move_fn(pvec, __activate_page, NULL);
362                 local_unlock(&lru_pvecs.lock);
363         }
364 }
365
366 #else
367 static inline void activate_page_drain(int cpu)
368 {
369 }
370
371 static void activate_page(struct page *page)
372 {
373         pg_data_t *pgdat = page_pgdat(page);
374
375         page = compound_head(page);
376         spin_lock_irq(&pgdat->lru_lock);
377         __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL);
378         spin_unlock_irq(&pgdat->lru_lock);
379 }
380 #endif
381
382 static void __lru_cache_activate_page(struct page *page)
383 {
384         struct pagevec *pvec;
385         int i;
386
387         local_lock(&lru_pvecs.lock);
388         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
389
390         /*
391          * Search backwards on the optimistic assumption that the page being
392          * activated has just been added to this pagevec. Note that only
393          * the local pagevec is examined as a !PageLRU page could be in the
394          * process of being released, reclaimed, migrated or on a remote
395          * pagevec that is currently being drained. Furthermore, marking
396          * a remote pagevec's page PageActive potentially hits a race where
397          * a page is marked PageActive just after it is added to the inactive
398          * list causing accounting errors and BUG_ON checks to trigger.
399          */
400         for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
401                 struct page *pagevec_page = pvec->pages[i];
402
403                 if (pagevec_page == page) {
404                         SetPageActive(page);
405                         break;
406                 }
407         }
408
409         local_unlock(&lru_pvecs.lock);
410 }
411
412 /*
413  * Mark a page as having seen activity.
414  *
415  * inactive,unreferenced        ->      inactive,referenced
416  * inactive,referenced          ->      active,unreferenced
417  * active,unreferenced          ->      active,referenced
418  *
419  * When a newly allocated page is not yet visible, so safe for non-atomic ops,
420  * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
421  */
422 void mark_page_accessed(struct page *page)
423 {
424         page = compound_head(page);
425
426         if (!PageReferenced(page)) {
427                 SetPageReferenced(page);
428         } else if (PageUnevictable(page)) {
429                 /*
430                  * Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
431                  * this list is never rotated or maintained, so marking an
432                  * evictable page accessed has no effect.
433                  */
434         } else if (!PageActive(page)) {
435                 /*
436                  * If the page is on the LRU, queue it for activation via
437                  * lru_pvecs.activate_page. Otherwise, assume the page is on a
438                  * pagevec, mark it active and it'll be moved to the active
439                  * LRU on the next drain.
440                  */
441                 if (PageLRU(page))
442                         activate_page(page);
443                 else
444                         __lru_cache_activate_page(page);
445                 ClearPageReferenced(page);
446                 workingset_activation(page);
447         }
448         if (page_is_idle(page))
449                 clear_page_idle(page);
450 }
451 EXPORT_SYMBOL(mark_page_accessed);
452
453 /**
454  * lru_cache_add - add a page to a page list
455  * @page: the page to be added to the LRU.
456  *
457  * Queue the page for addition to the LRU via pagevec. The decision on whether
458  * to add the page to the [in]active [file|anon] list is deferred until the
459  * pagevec is drained. This gives a chance for the caller of lru_cache_add()
460  * have the page added to the active list using mark_page_accessed().
461  */
462 void lru_cache_add(struct page *page)
463 {
464         struct pagevec *pvec;
465
466         VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
467         VM_BUG_ON_PAGE(PageLRU(page), page);
468
469         get_page(page);
470         local_lock(&lru_pvecs.lock);
471         pvec = this_cpu_ptr(&lru_pvecs.lru_add);
472         if (!pagevec_add(pvec, page) || PageCompound(page))
473                 __pagevec_lru_add(pvec);
474         local_unlock(&lru_pvecs.lock);
475 }
476 EXPORT_SYMBOL(lru_cache_add);
477
478 /**
479  * lru_cache_add_inactive_or_unevictable
480  * @page:  the page to be added to LRU
481  * @vma:   vma in which page is mapped for determining reclaimability
482  *
483  * Place @page on the inactive or unevictable LRU list, depending on its
484  * evictability.
485  */
486 void lru_cache_add_inactive_or_unevictable(struct page *page,
487                                          struct vm_area_struct *vma)
488 {
489         bool unevictable;
490
491         VM_BUG_ON_PAGE(PageLRU(page), page);
492
493         unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
494         if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
495                 int nr_pages = thp_nr_pages(page);
496                 /*
497                  * We use the irq-unsafe __mod_zone_page_stat because this
498                  * counter is not modified from interrupt context, and the pte
499                  * lock is held(spinlock), which implies preemption disabled.
500                  */
501                 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
502                 count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
503         }
504         lru_cache_add(page);
505 }
506
507 /*
508  * If the page can not be invalidated, it is moved to the
509  * inactive list to speed up its reclaim.  It is moved to the
510  * head of the list, rather than the tail, to give the flusher
511  * threads some time to write it out, as this is much more
512  * effective than the single-page writeout from reclaim.
513  *
514  * If the page isn't page_mapped and dirty/writeback, the page
515  * could reclaim asap using PG_reclaim.
516  *
517  * 1. active, mapped page -> none
518  * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
519  * 3. inactive, mapped page -> none
520  * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
521  * 5. inactive, clean -> inactive, tail
522  * 6. Others -> none
523  *
524  * In 4, why it moves inactive's head, the VM expects the page would
525  * be write it out by flusher threads as this is much more effective
526  * than the single-page writeout from reclaim.
527  */
528 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
529                               void *arg)
530 {
531         int lru;
532         bool active;
533         int nr_pages = thp_nr_pages(page);
534
535         if (!PageLRU(page))
536                 return;
537
538         if (PageUnevictable(page))
539                 return;
540
541         /* Some processes are using the page */
542         if (page_mapped(page))
543                 return;
544
545         active = PageActive(page);
546         lru = page_lru_base_type(page);
547
548         del_page_from_lru_list(page, lruvec, lru + active);
549         ClearPageActive(page);
550         ClearPageReferenced(page);
551
552         if (PageWriteback(page) || PageDirty(page)) {
553                 /*
554                  * PG_reclaim could be raced with end_page_writeback
555                  * It can make readahead confusing.  But race window
556                  * is _really_ small and  it's non-critical problem.
557                  */
558                 add_page_to_lru_list(page, lruvec, lru);
559                 SetPageReclaim(page);
560         } else {
561                 /*
562                  * The page's writeback ends up during pagevec
563                  * We moves tha page into tail of inactive.
564                  */
565                 add_page_to_lru_list_tail(page, lruvec, lru);
566                 __count_vm_events(PGROTATED, nr_pages);
567         }
568
569         if (active) {
570                 __count_vm_events(PGDEACTIVATE, nr_pages);
571                 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
572                                      nr_pages);
573         }
574 }
575
576 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
577                             void *arg)
578 {
579         if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
580                 int lru = page_lru_base_type(page);
581                 int nr_pages = thp_nr_pages(page);
582
583                 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
584                 ClearPageActive(page);
585                 ClearPageReferenced(page);
586                 add_page_to_lru_list(page, lruvec, lru);
587
588                 __count_vm_events(PGDEACTIVATE, nr_pages);
589                 __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
590                                      nr_pages);
591         }
592 }
593
594 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
595                             void *arg)
596 {
597         if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
598             !PageSwapCache(page) && !PageUnevictable(page)) {
599                 bool active = PageActive(page);
600                 int nr_pages = thp_nr_pages(page);
601
602                 del_page_from_lru_list(page, lruvec,
603                                        LRU_INACTIVE_ANON + active);
604                 ClearPageActive(page);
605                 ClearPageReferenced(page);
606                 /*
607                  * Lazyfree pages are clean anonymous pages.  They have
608                  * PG_swapbacked flag cleared, to distinguish them from normal
609                  * anonymous pages
610                  */
611                 ClearPageSwapBacked(page);
612                 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
613
614                 __count_vm_events(PGLAZYFREE, nr_pages);
615                 __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
616                                      nr_pages);
617         }
618 }
619
620 /*
621  * Drain pages out of the cpu's pagevecs.
622  * Either "cpu" is the current CPU, and preemption has already been
623  * disabled; or "cpu" is being hot-unplugged, and is already dead.
624  */
625 void lru_add_drain_cpu(int cpu)
626 {
627         struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu);
628
629         if (pagevec_count(pvec))
630                 __pagevec_lru_add(pvec);
631
632         pvec = &per_cpu(lru_rotate.pvec, cpu);
633         /* Disabling interrupts below acts as a compiler barrier. */
634         if (data_race(pagevec_count(pvec))) {
635                 unsigned long flags;
636
637                 /* No harm done if a racing interrupt already did this */
638                 local_lock_irqsave(&lru_rotate.lock, flags);
639                 pagevec_move_tail(pvec);
640                 local_unlock_irqrestore(&lru_rotate.lock, flags);
641         }
642
643         pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu);
644         if (pagevec_count(pvec))
645                 pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
646
647         pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu);
648         if (pagevec_count(pvec))
649                 pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
650
651         pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu);
652         if (pagevec_count(pvec))
653                 pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
654
655         activate_page_drain(cpu);
656 }
657
658 /**
659  * deactivate_file_page - forcefully deactivate a file page
660  * @page: page to deactivate
661  *
662  * This function hints the VM that @page is a good reclaim candidate,
663  * for example if its invalidation fails due to the page being dirty
664  * or under writeback.
665  */
666 void deactivate_file_page(struct page *page)
667 {
668         /*
669          * In a workload with many unevictable page such as mprotect,
670          * unevictable page deactivation for accelerating reclaim is pointless.
671          */
672         if (PageUnevictable(page))
673                 return;
674
675         if (likely(get_page_unless_zero(page))) {
676                 struct pagevec *pvec;
677
678                 local_lock(&lru_pvecs.lock);
679                 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
680
681                 if (!pagevec_add(pvec, page) || PageCompound(page))
682                         pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
683                 local_unlock(&lru_pvecs.lock);
684         }
685 }
686
687 /*
688  * deactivate_page - deactivate a page
689  * @page: page to deactivate
690  *
691  * deactivate_page() moves @page to the inactive list if @page was on the active
692  * list and was not an unevictable page.  This is done to accelerate the reclaim
693  * of @page.
694  */
695 void deactivate_page(struct page *page)
696 {
697         if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
698                 struct pagevec *pvec;
699
700                 local_lock(&lru_pvecs.lock);
701                 pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate);
702                 get_page(page);
703                 if (!pagevec_add(pvec, page) || PageCompound(page))
704                         pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
705                 local_unlock(&lru_pvecs.lock);
706         }
707 }
708
709 /**
710  * mark_page_lazyfree - make an anon page lazyfree
711  * @page: page to deactivate
712  *
713  * mark_page_lazyfree() moves @page to the inactive file list.
714  * This is done to accelerate the reclaim of @page.
715  */
716 void mark_page_lazyfree(struct page *page)
717 {
718         if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
719             !PageSwapCache(page) && !PageUnevictable(page)) {
720                 struct pagevec *pvec;
721
722                 local_lock(&lru_pvecs.lock);
723                 pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree);
724                 get_page(page);
725                 if (!pagevec_add(pvec, page) || PageCompound(page))
726                         pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
727                 local_unlock(&lru_pvecs.lock);
728         }
729 }
730
731 void lru_add_drain(void)
732 {
733         local_lock(&lru_pvecs.lock);
734         lru_add_drain_cpu(smp_processor_id());
735         local_unlock(&lru_pvecs.lock);
736 }
737
738 void lru_add_drain_cpu_zone(struct zone *zone)
739 {
740         local_lock(&lru_pvecs.lock);
741         lru_add_drain_cpu(smp_processor_id());
742         drain_local_pages(zone);
743         local_unlock(&lru_pvecs.lock);
744 }
745
746 #ifdef CONFIG_SMP
747
748 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
749
750 static void lru_add_drain_per_cpu(struct work_struct *dummy)
751 {
752         lru_add_drain();
753 }
754
755 /*
756  * Doesn't need any cpu hotplug locking because we do rely on per-cpu
757  * kworkers being shut down before our page_alloc_cpu_dead callback is
758  * executed on the offlined cpu.
759  * Calling this function with cpu hotplug locks held can actually lead
760  * to obscure indirect dependencies via WQ context.
761  */
762 void lru_add_drain_all(void)
763 {
764         /*
765          * lru_drain_gen - Global pages generation number
766          *
767          * (A) Definition: global lru_drain_gen = x implies that all generations
768          *     0 < n <= x are already *scheduled* for draining.
769          *
770          * This is an optimization for the highly-contended use case where a
771          * user space workload keeps constantly generating a flow of pages for
772          * each CPU.
773          */
774         static unsigned int lru_drain_gen;
775         static struct cpumask has_work;
776         static DEFINE_MUTEX(lock);
777         unsigned cpu, this_gen;
778
779         /*
780          * Make sure nobody triggers this path before mm_percpu_wq is fully
781          * initialized.
782          */
783         if (WARN_ON(!mm_percpu_wq))
784                 return;
785
786         /*
787          * Guarantee pagevec counter stores visible by this CPU are visible to
788          * other CPUs before loading the current drain generation.
789          */
790         smp_mb();
791
792         /*
793          * (B) Locally cache global LRU draining generation number
794          *
795          * The read barrier ensures that the counter is loaded before the mutex
796          * is taken. It pairs with smp_mb() inside the mutex critical section
797          * at (D).
798          */
799         this_gen = smp_load_acquire(&lru_drain_gen);
800
801         mutex_lock(&lock);
802
803         /*
804          * (C) Exit the draining operation if a newer generation, from another
805          * lru_add_drain_all(), was already scheduled for draining. Check (A).
806          */
807         if (unlikely(this_gen != lru_drain_gen))
808                 goto done;
809
810         /*
811          * (D) Increment global generation number
812          *
813          * Pairs with smp_load_acquire() at (B), outside of the critical
814          * section. Use a full memory barrier to guarantee that the new global
815          * drain generation number is stored before loading pagevec counters.
816          *
817          * This pairing must be done here, before the for_each_online_cpu loop
818          * below which drains the page vectors.
819          *
820          * Let x, y, and z represent some system CPU numbers, where x < y < z.
821          * Assume CPU #z is is in the middle of the for_each_online_cpu loop
822          * below and has already reached CPU #y's per-cpu data. CPU #x comes
823          * along, adds some pages to its per-cpu vectors, then calls
824          * lru_add_drain_all().
825          *
826          * If the paired barrier is done at any later step, e.g. after the
827          * loop, CPU #x will just exit at (C) and miss flushing out all of its
828          * added pages.
829          */
830         WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1);
831         smp_mb();
832
833         cpumask_clear(&has_work);
834         for_each_online_cpu(cpu) {
835                 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
836
837                 if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
838                     data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
839                     pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
840                     pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
841                     pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
842                     need_activate_page_drain(cpu)) {
843                         INIT_WORK(work, lru_add_drain_per_cpu);
844                         queue_work_on(cpu, mm_percpu_wq, work);
845                         __cpumask_set_cpu(cpu, &has_work);
846                 }
847         }
848
849         for_each_cpu(cpu, &has_work)
850                 flush_work(&per_cpu(lru_add_drain_work, cpu));
851
852 done:
853         mutex_unlock(&lock);
854 }
855 #else
856 void lru_add_drain_all(void)
857 {
858         lru_add_drain();
859 }
860 #endif /* CONFIG_SMP */
861
862 /**
863  * release_pages - batched put_page()
864  * @pages: array of pages to release
865  * @nr: number of pages
866  *
867  * Decrement the reference count on all the pages in @pages.  If it
868  * fell to zero, remove the page from the LRU and free it.
869  */
870 void release_pages(struct page **pages, int nr)
871 {
872         int i;
873         LIST_HEAD(pages_to_free);
874         struct pglist_data *locked_pgdat = NULL;
875         struct lruvec *lruvec;
876         unsigned long flags;
877         unsigned int lock_batch;
878
879         for (i = 0; i < nr; i++) {
880                 struct page *page = pages[i];
881
882                 /*
883                  * Make sure the IRQ-safe lock-holding time does not get
884                  * excessive with a continuous string of pages from the
885                  * same pgdat. The lock is held only if pgdat != NULL.
886                  */
887                 if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
888                         spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
889                         locked_pgdat = NULL;
890                 }
891
892                 page = compound_head(page);
893                 if (is_huge_zero_page(page))
894                         continue;
895
896                 if (is_zone_device_page(page)) {
897                         if (locked_pgdat) {
898                                 spin_unlock_irqrestore(&locked_pgdat->lru_lock,
899                                                        flags);
900                                 locked_pgdat = NULL;
901                         }
902                         /*
903                          * ZONE_DEVICE pages that return 'false' from
904                          * page_is_devmap_managed() do not require special
905                          * processing, and instead, expect a call to
906                          * put_page_testzero().
907                          */
908                         if (page_is_devmap_managed(page)) {
909                                 put_devmap_managed_page(page);
910                                 continue;
911                         }
912                         if (put_page_testzero(page))
913                                 put_dev_pagemap(page->pgmap);
914                         continue;
915                 }
916
917                 if (!put_page_testzero(page))
918                         continue;
919
920                 if (PageCompound(page)) {
921                         if (locked_pgdat) {
922                                 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
923                                 locked_pgdat = NULL;
924                         }
925                         __put_compound_page(page);
926                         continue;
927                 }
928
929                 if (PageLRU(page)) {
930                         struct pglist_data *pgdat = page_pgdat(page);
931
932                         if (pgdat != locked_pgdat) {
933                                 if (locked_pgdat)
934                                         spin_unlock_irqrestore(&locked_pgdat->lru_lock,
935                                                                         flags);
936                                 lock_batch = 0;
937                                 locked_pgdat = pgdat;
938                                 spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
939                         }
940
941                         lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
942                         VM_BUG_ON_PAGE(!PageLRU(page), page);
943                         __ClearPageLRU(page);
944                         del_page_from_lru_list(page, lruvec, page_off_lru(page));
945                 }
946
947                 __ClearPageWaiters(page);
948
949                 list_add(&page->lru, &pages_to_free);
950         }
951         if (locked_pgdat)
952                 spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
953
954         mem_cgroup_uncharge_list(&pages_to_free);
955         free_unref_page_list(&pages_to_free);
956 }
957 EXPORT_SYMBOL(release_pages);
958
959 /*
960  * The pages which we're about to release may be in the deferred lru-addition
961  * queues.  That would prevent them from really being freed right now.  That's
962  * OK from a correctness point of view but is inefficient - those pages may be
963  * cache-warm and we want to give them back to the page allocator ASAP.
964  *
965  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
966  * and __pagevec_lru_add_active() call release_pages() directly to avoid
967  * mutual recursion.
968  */
969 void __pagevec_release(struct pagevec *pvec)
970 {
971         if (!pvec->percpu_pvec_drained) {
972                 lru_add_drain();
973                 pvec->percpu_pvec_drained = true;
974         }
975         release_pages(pvec->pages, pagevec_count(pvec));
976         pagevec_reinit(pvec);
977 }
978 EXPORT_SYMBOL(__pagevec_release);
979
980 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
981 /* used by __split_huge_page_refcount() */
982 void lru_add_page_tail(struct page *page, struct page *page_tail,
983                        struct lruvec *lruvec, struct list_head *list)
984 {
985         VM_BUG_ON_PAGE(!PageHead(page), page);
986         VM_BUG_ON_PAGE(PageCompound(page_tail), page);
987         VM_BUG_ON_PAGE(PageLRU(page_tail), page);
988         lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
989
990         if (!list)
991                 SetPageLRU(page_tail);
992
993         if (likely(PageLRU(page)))
994                 list_add_tail(&page_tail->lru, &page->lru);
995         else if (list) {
996                 /* page reclaim is reclaiming a huge page */
997                 get_page(page_tail);
998                 list_add_tail(&page_tail->lru, list);
999         } else {
1000                 /*
1001                  * Head page has not yet been counted, as an hpage,
1002                  * so we must account for each subpage individually.
1003                  *
1004                  * Put page_tail on the list at the correct position
1005                  * so they all end up in order.
1006                  */
1007                 add_page_to_lru_list_tail(page_tail, lruvec,
1008                                           page_lru(page_tail));
1009         }
1010 }
1011 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1012
1013 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
1014                                  void *arg)
1015 {
1016         enum lru_list lru;
1017         int was_unevictable = TestClearPageUnevictable(page);
1018         int nr_pages = thp_nr_pages(page);
1019
1020         VM_BUG_ON_PAGE(PageLRU(page), page);
1021
1022         /*
1023          * Page becomes evictable in two ways:
1024          * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
1025          * 2) Before acquiring LRU lock to put the page to correct LRU and then
1026          *   a) do PageLRU check with lock [check_move_unevictable_pages]
1027          *   b) do PageLRU check before lock [clear_page_mlock]
1028          *
1029          * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need
1030          * following strict ordering:
1031          *
1032          * #0: __pagevec_lru_add_fn             #1: clear_page_mlock
1033          *
1034          * SetPageLRU()                         TestClearPageMlocked()
1035          * smp_mb() // explicit ordering        // above provides strict
1036          *                                      // ordering
1037          * PageMlocked()                        PageLRU()
1038          *
1039          *
1040          * if '#1' does not observe setting of PG_lru by '#0' and fails
1041          * isolation, the explicit barrier will make sure that page_evictable
1042          * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
1043          * can be reordered after PageMlocked check and can make '#1' to fail
1044          * the isolation of the page whose Mlocked bit is cleared (#0 is also
1045          * looking at the same page) and the evictable page will be stranded
1046          * in an unevictable LRU.
1047          */
1048         SetPageLRU(page);
1049         smp_mb__after_atomic();
1050
1051         if (page_evictable(page)) {
1052                 lru = page_lru(page);
1053                 if (was_unevictable)
1054                         __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
1055         } else {
1056                 lru = LRU_UNEVICTABLE;
1057                 ClearPageActive(page);
1058                 SetPageUnevictable(page);
1059                 if (!was_unevictable)
1060                         __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
1061         }
1062
1063         add_page_to_lru_list(page, lruvec, lru);
1064         trace_mm_lru_insertion(page, lru);
1065 }
1066
1067 /*
1068  * Add the passed pages to the LRU, then drop the caller's refcount
1069  * on them.  Reinitialises the caller's pagevec.
1070  */
1071 void __pagevec_lru_add(struct pagevec *pvec)
1072 {
1073         pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
1074 }
1075
1076 /**
1077  * pagevec_lookup_entries - gang pagecache lookup
1078  * @pvec:       Where the resulting entries are placed
1079  * @mapping:    The address_space to search
1080  * @start:      The starting entry index
1081  * @nr_entries: The maximum number of pages
1082  * @indices:    The cache indices corresponding to the entries in @pvec
1083  *
1084  * pagevec_lookup_entries() will search for and return a group of up
1085  * to @nr_pages pages and shadow entries in the mapping.  All
1086  * entries are placed in @pvec.  pagevec_lookup_entries() takes a
1087  * reference against actual pages in @pvec.
1088  *
1089  * The search returns a group of mapping-contiguous entries with
1090  * ascending indexes.  There may be holes in the indices due to
1091  * not-present entries.
1092  *
1093  * Only one subpage of a Transparent Huge Page is returned in one call:
1094  * allowing truncate_inode_pages_range() to evict the whole THP without
1095  * cycling through a pagevec of extra references.
1096  *
1097  * pagevec_lookup_entries() returns the number of entries which were
1098  * found.
1099  */
1100 unsigned pagevec_lookup_entries(struct pagevec *pvec,
1101                                 struct address_space *mapping,
1102                                 pgoff_t start, unsigned nr_entries,
1103                                 pgoff_t *indices)
1104 {
1105         pvec->nr = find_get_entries(mapping, start, nr_entries,
1106                                     pvec->pages, indices);
1107         return pagevec_count(pvec);
1108 }
1109
1110 /**
1111  * pagevec_remove_exceptionals - pagevec exceptionals pruning
1112  * @pvec:       The pagevec to prune
1113  *
1114  * pagevec_lookup_entries() fills both pages and exceptional radix
1115  * tree entries into the pagevec.  This function prunes all
1116  * exceptionals from @pvec without leaving holes, so that it can be
1117  * passed on to page-only pagevec operations.
1118  */
1119 void pagevec_remove_exceptionals(struct pagevec *pvec)
1120 {
1121         int i, j;
1122
1123         for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
1124                 struct page *page = pvec->pages[i];
1125                 if (!xa_is_value(page))
1126                         pvec->pages[j++] = page;
1127         }
1128         pvec->nr = j;
1129 }
1130
1131 /**
1132  * pagevec_lookup_range - gang pagecache lookup
1133  * @pvec:       Where the resulting pages are placed
1134  * @mapping:    The address_space to search
1135  * @start:      The starting page index
1136  * @end:        The final page index
1137  *
1138  * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE
1139  * pages in the mapping starting from index @start and upto index @end
1140  * (inclusive).  The pages are placed in @pvec.  pagevec_lookup() takes a
1141  * reference against the pages in @pvec.
1142  *
1143  * The search returns a group of mapping-contiguous pages with ascending
1144  * indexes.  There may be holes in the indices due to not-present pages. We
1145  * also update @start to index the next page for the traversal.
1146  *
1147  * pagevec_lookup_range() returns the number of pages which were found. If this
1148  * number is smaller than PAGEVEC_SIZE, the end of specified range has been
1149  * reached.
1150  */
1151 unsigned pagevec_lookup_range(struct pagevec *pvec,
1152                 struct address_space *mapping, pgoff_t *start, pgoff_t end)
1153 {
1154         pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE,
1155                                         pvec->pages);
1156         return pagevec_count(pvec);
1157 }
1158 EXPORT_SYMBOL(pagevec_lookup_range);
1159
1160 unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
1161                 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1162                 xa_mark_t tag)
1163 {
1164         pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1165                                         PAGEVEC_SIZE, pvec->pages);
1166         return pagevec_count(pvec);
1167 }
1168 EXPORT_SYMBOL(pagevec_lookup_range_tag);
1169
1170 unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec,
1171                 struct address_space *mapping, pgoff_t *index, pgoff_t end,
1172                 xa_mark_t tag, unsigned max_pages)
1173 {
1174         pvec->nr = find_get_pages_range_tag(mapping, index, end, tag,
1175                 min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages);
1176         return pagevec_count(pvec);
1177 }
1178 EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
1179 /*
1180  * Perform any setup for the swap system
1181  */
1182 void __init swap_setup(void)
1183 {
1184         unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);
1185
1186         /* Use a smaller cluster for small-memory machines */
1187         if (megs < 16)
1188                 page_cluster = 2;
1189         else
1190                 page_cluster = 3;
1191         /*
1192          * Right now other parts of the system means that we
1193          * _really_ don't want to cluster much more
1194          */
1195 }
1196
1197 #ifdef CONFIG_DEV_PAGEMAP_OPS
1198 void put_devmap_managed_page(struct page *page)
1199 {
1200         int count;
1201
1202         if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
1203                 return;
1204
1205         count = page_ref_dec_return(page);
1206
1207         /*
1208          * devmap page refcounts are 1-based, rather than 0-based: if
1209          * refcount is 1, then the page is free and the refcount is
1210          * stable because nobody holds a reference on the page.
1211          */
1212         if (count == 1)
1213                 free_devmap_managed_page(page);
1214         else if (!count)
1215                 __put_page(page);
1216 }
1217 EXPORT_SYMBOL(put_devmap_managed_page);
1218 #endif