Merge tag 'imx-fixes-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[sfrench/cifs-2.6.git] / mm / swap_state.c
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/page_cgroup.h>
21
22 #include <asm/pgtable.h>
23
24 /*
25  * swapper_space is a fiction, retained to simplify the path through
26  * vmscan's shrink_page_list.
27  */
28 static const struct address_space_operations swap_aops = {
29         .writepage      = swap_writepage,
30         .set_page_dirty = swap_set_page_dirty,
31 #ifdef CONFIG_MIGRATION
32         .migratepage    = migrate_page,
33 #endif
34 };
35
36 static struct backing_dev_info swap_backing_dev_info = {
37         .name           = "swap",
38         .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
39 };
40
41 struct address_space swapper_spaces[MAX_SWAPFILES] = {
42         [0 ... MAX_SWAPFILES - 1] = {
43                 .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
44                 .i_mmap_writable = ATOMIC_INIT(0),
45                 .a_ops          = &swap_aops,
46                 .backing_dev_info = &swap_backing_dev_info,
47         }
48 };
49
50 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
51
52 static struct {
53         unsigned long add_total;
54         unsigned long del_total;
55         unsigned long find_success;
56         unsigned long find_total;
57 } swap_cache_info;
58
59 unsigned long total_swapcache_pages(void)
60 {
61         int i;
62         unsigned long ret = 0;
63
64         for (i = 0; i < MAX_SWAPFILES; i++)
65                 ret += swapper_spaces[i].nrpages;
66         return ret;
67 }
68
69 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
70
71 void show_swap_cache_info(void)
72 {
73         printk("%lu pages in swap cache\n", total_swapcache_pages());
74         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
75                 swap_cache_info.add_total, swap_cache_info.del_total,
76                 swap_cache_info.find_success, swap_cache_info.find_total);
77         printk("Free swap  = %ldkB\n",
78                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
79         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
80 }
81
82 /*
83  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
84  * but sets SwapCache flag and private instead of mapping and index.
85  */
86 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
87 {
88         int error;
89         struct address_space *address_space;
90
91         VM_BUG_ON_PAGE(!PageLocked(page), page);
92         VM_BUG_ON_PAGE(PageSwapCache(page), page);
93         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
94
95         page_cache_get(page);
96         SetPageSwapCache(page);
97         set_page_private(page, entry.val);
98
99         address_space = swap_address_space(entry);
100         spin_lock_irq(&address_space->tree_lock);
101         error = radix_tree_insert(&address_space->page_tree,
102                                         entry.val, page);
103         if (likely(!error)) {
104                 address_space->nrpages++;
105                 __inc_zone_page_state(page, NR_FILE_PAGES);
106                 INC_CACHE_INFO(add_total);
107         }
108         spin_unlock_irq(&address_space->tree_lock);
109
110         if (unlikely(error)) {
111                 /*
112                  * Only the context which have set SWAP_HAS_CACHE flag
113                  * would call add_to_swap_cache().
114                  * So add_to_swap_cache() doesn't returns -EEXIST.
115                  */
116                 VM_BUG_ON(error == -EEXIST);
117                 set_page_private(page, 0UL);
118                 ClearPageSwapCache(page);
119                 page_cache_release(page);
120         }
121
122         return error;
123 }
124
125
126 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
127 {
128         int error;
129
130         error = radix_tree_maybe_preload(gfp_mask);
131         if (!error) {
132                 error = __add_to_swap_cache(page, entry);
133                 radix_tree_preload_end();
134         }
135         return error;
136 }
137
138 /*
139  * This must be called only on pages that have
140  * been verified to be in the swap cache.
141  */
142 void __delete_from_swap_cache(struct page *page)
143 {
144         swp_entry_t entry;
145         struct address_space *address_space;
146
147         VM_BUG_ON_PAGE(!PageLocked(page), page);
148         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
149         VM_BUG_ON_PAGE(PageWriteback(page), page);
150
151         entry.val = page_private(page);
152         address_space = swap_address_space(entry);
153         radix_tree_delete(&address_space->page_tree, page_private(page));
154         set_page_private(page, 0);
155         ClearPageSwapCache(page);
156         address_space->nrpages--;
157         __dec_zone_page_state(page, NR_FILE_PAGES);
158         INC_CACHE_INFO(del_total);
159 }
160
161 /**
162  * add_to_swap - allocate swap space for a page
163  * @page: page we want to move to swap
164  *
165  * Allocate swap space for the page and add the page to the
166  * swap cache.  Caller needs to hold the page lock. 
167  */
168 int add_to_swap(struct page *page, struct list_head *list)
169 {
170         swp_entry_t entry;
171         int err;
172
173         VM_BUG_ON_PAGE(!PageLocked(page), page);
174         VM_BUG_ON_PAGE(!PageUptodate(page), page);
175
176         entry = get_swap_page();
177         if (!entry.val)
178                 return 0;
179
180         if (unlikely(PageTransHuge(page)))
181                 if (unlikely(split_huge_page_to_list(page, list))) {
182                         swapcache_free(entry);
183                         return 0;
184                 }
185
186         /*
187          * Radix-tree node allocations from PF_MEMALLOC contexts could
188          * completely exhaust the page allocator. __GFP_NOMEMALLOC
189          * stops emergency reserves from being allocated.
190          *
191          * TODO: this could cause a theoretical memory reclaim
192          * deadlock in the swap out path.
193          */
194         /*
195          * Add it to the swap cache and mark it dirty
196          */
197         err = add_to_swap_cache(page, entry,
198                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
199
200         if (!err) {     /* Success */
201                 SetPageDirty(page);
202                 return 1;
203         } else {        /* -ENOMEM radix-tree allocation failure */
204                 /*
205                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
206                  * clear SWAP_HAS_CACHE flag.
207                  */
208                 swapcache_free(entry);
209                 return 0;
210         }
211 }
212
213 /*
214  * This must be called only on pages that have
215  * been verified to be in the swap cache and locked.
216  * It will never put the page into the free list,
217  * the caller has a reference on the page.
218  */
219 void delete_from_swap_cache(struct page *page)
220 {
221         swp_entry_t entry;
222         struct address_space *address_space;
223
224         entry.val = page_private(page);
225
226         address_space = swap_address_space(entry);
227         spin_lock_irq(&address_space->tree_lock);
228         __delete_from_swap_cache(page);
229         spin_unlock_irq(&address_space->tree_lock);
230
231         swapcache_free(entry);
232         page_cache_release(page);
233 }
234
235 /* 
236  * If we are the only user, then try to free up the swap cache. 
237  * 
238  * Its ok to check for PageSwapCache without the page lock
239  * here because we are going to recheck again inside
240  * try_to_free_swap() _with_ the lock.
241  *                                      - Marcelo
242  */
243 static inline void free_swap_cache(struct page *page)
244 {
245         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
246                 try_to_free_swap(page);
247                 unlock_page(page);
248         }
249 }
250
251 /* 
252  * Perform a free_page(), also freeing any swap cache associated with
253  * this page if it is the last user of the page.
254  */
255 void free_page_and_swap_cache(struct page *page)
256 {
257         free_swap_cache(page);
258         page_cache_release(page);
259 }
260
261 /*
262  * Passed an array of pages, drop them all from swapcache and then release
263  * them.  They are removed from the LRU and freed if this is their last use.
264  */
265 void free_pages_and_swap_cache(struct page **pages, int nr)
266 {
267         struct page **pagep = pages;
268         int i;
269
270         lru_add_drain();
271         for (i = 0; i < nr; i++)
272                 free_swap_cache(pagep[i]);
273         release_pages(pagep, nr, false);
274 }
275
276 /*
277  * Lookup a swap entry in the swap cache. A found page will be returned
278  * unlocked and with its refcount incremented - we rely on the kernel
279  * lock getting page table operations atomic even if we drop the page
280  * lock before returning.
281  */
282 struct page * lookup_swap_cache(swp_entry_t entry)
283 {
284         struct page *page;
285
286         page = find_get_page(swap_address_space(entry), entry.val);
287
288         if (page) {
289                 INC_CACHE_INFO(find_success);
290                 if (TestClearPageReadahead(page))
291                         atomic_inc(&swapin_readahead_hits);
292         }
293
294         INC_CACHE_INFO(find_total);
295         return page;
296 }
297
298 /* 
299  * Locate a page of swap in physical memory, reserving swap cache space
300  * and reading the disk if it is not already cached.
301  * A failure return means that either the page allocation failed or that
302  * the swap entry is no longer in use.
303  */
304 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
305                         struct vm_area_struct *vma, unsigned long addr)
306 {
307         struct page *found_page, *new_page = NULL;
308         int err;
309
310         do {
311                 /*
312                  * First check the swap cache.  Since this is normally
313                  * called after lookup_swap_cache() failed, re-calling
314                  * that would confuse statistics.
315                  */
316                 found_page = find_get_page(swap_address_space(entry),
317                                         entry.val);
318                 if (found_page)
319                         break;
320
321                 /*
322                  * Get a new page to read into from swap.
323                  */
324                 if (!new_page) {
325                         new_page = alloc_page_vma(gfp_mask, vma, addr);
326                         if (!new_page)
327                                 break;          /* Out of memory */
328                 }
329
330                 /*
331                  * call radix_tree_preload() while we can wait.
332                  */
333                 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
334                 if (err)
335                         break;
336
337                 /*
338                  * Swap entry may have been freed since our caller observed it.
339                  */
340                 err = swapcache_prepare(entry);
341                 if (err == -EEXIST) {
342                         radix_tree_preload_end();
343                         /*
344                          * We might race against get_swap_page() and stumble
345                          * across a SWAP_HAS_CACHE swap_map entry whose page
346                          * has not been brought into the swapcache yet, while
347                          * the other end is scheduled away waiting on discard
348                          * I/O completion at scan_swap_map().
349                          *
350                          * In order to avoid turning this transitory state
351                          * into a permanent loop around this -EEXIST case
352                          * if !CONFIG_PREEMPT and the I/O completion happens
353                          * to be waiting on the CPU waitqueue where we are now
354                          * busy looping, we just conditionally invoke the
355                          * scheduler here, if there are some more important
356                          * tasks to run.
357                          */
358                         cond_resched();
359                         continue;
360                 }
361                 if (err) {              /* swp entry is obsolete ? */
362                         radix_tree_preload_end();
363                         break;
364                 }
365
366                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
367                 __set_page_locked(new_page);
368                 SetPageSwapBacked(new_page);
369                 err = __add_to_swap_cache(new_page, entry);
370                 if (likely(!err)) {
371                         radix_tree_preload_end();
372                         /*
373                          * Initiate read into locked page and return.
374                          */
375                         lru_cache_add_anon(new_page);
376                         swap_readpage(new_page);
377                         return new_page;
378                 }
379                 radix_tree_preload_end();
380                 ClearPageSwapBacked(new_page);
381                 __clear_page_locked(new_page);
382                 /*
383                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
384                  * clear SWAP_HAS_CACHE flag.
385                  */
386                 swapcache_free(entry);
387         } while (err != -ENOMEM);
388
389         if (new_page)
390                 page_cache_release(new_page);
391         return found_page;
392 }
393
394 static unsigned long swapin_nr_pages(unsigned long offset)
395 {
396         static unsigned long prev_offset;
397         unsigned int pages, max_pages, last_ra;
398         static atomic_t last_readahead_pages;
399
400         max_pages = 1 << ACCESS_ONCE(page_cluster);
401         if (max_pages <= 1)
402                 return 1;
403
404         /*
405          * This heuristic has been found to work well on both sequential and
406          * random loads, swapping to hard disk or to SSD: please don't ask
407          * what the "+ 2" means, it just happens to work well, that's all.
408          */
409         pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
410         if (pages == 2) {
411                 /*
412                  * We can have no readahead hits to judge by: but must not get
413                  * stuck here forever, so check for an adjacent offset instead
414                  * (and don't even bother to check whether swap type is same).
415                  */
416                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
417                         pages = 1;
418                 prev_offset = offset;
419         } else {
420                 unsigned int roundup = 4;
421                 while (roundup < pages)
422                         roundup <<= 1;
423                 pages = roundup;
424         }
425
426         if (pages > max_pages)
427                 pages = max_pages;
428
429         /* Don't shrink readahead too fast */
430         last_ra = atomic_read(&last_readahead_pages) / 2;
431         if (pages < last_ra)
432                 pages = last_ra;
433         atomic_set(&last_readahead_pages, pages);
434
435         return pages;
436 }
437
438 /**
439  * swapin_readahead - swap in pages in hope we need them soon
440  * @entry: swap entry of this memory
441  * @gfp_mask: memory allocation flags
442  * @vma: user vma this address belongs to
443  * @addr: target address for mempolicy
444  *
445  * Returns the struct page for entry and addr, after queueing swapin.
446  *
447  * Primitive swap readahead code. We simply read an aligned block of
448  * (1 << page_cluster) entries in the swap area. This method is chosen
449  * because it doesn't cost us any seek time.  We also make sure to queue
450  * the 'original' request together with the readahead ones...
451  *
452  * This has been extended to use the NUMA policies from the mm triggering
453  * the readahead.
454  *
455  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
456  */
457 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
458                         struct vm_area_struct *vma, unsigned long addr)
459 {
460         struct page *page;
461         unsigned long entry_offset = swp_offset(entry);
462         unsigned long offset = entry_offset;
463         unsigned long start_offset, end_offset;
464         unsigned long mask;
465         struct blk_plug plug;
466
467         mask = swapin_nr_pages(offset) - 1;
468         if (!mask)
469                 goto skip;
470
471         /* Read a page_cluster sized and aligned cluster around offset. */
472         start_offset = offset & ~mask;
473         end_offset = offset | mask;
474         if (!start_offset)      /* First page is swap header. */
475                 start_offset++;
476
477         blk_start_plug(&plug);
478         for (offset = start_offset; offset <= end_offset ; offset++) {
479                 /* Ok, do the async read-ahead now */
480                 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
481                                                 gfp_mask, vma, addr);
482                 if (!page)
483                         continue;
484                 if (offset != entry_offset)
485                         SetPageReadahead(page);
486                 page_cache_release(page);
487         }
488         blk_finish_plug(&plug);
489
490         lru_add_drain();        /* Push any new pages onto the LRU now */
491 skip:
492         return read_swap_cache_async(entry, gfp_mask, vma, addr);
493 }