[PARISC] add back Crestone Peak cpu
[sfrench/cifs-2.6.git] / mm / swap_state.c
index 67daecb6031ad62022fbe7aa7fe9ea7b027dd3cf..ec42f01a8d02669fb5bd2c73dcffefb5bb619f6c 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/kernel_stat.h>
 #include <linux/swap.h>
+#include <linux/swapops.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
@@ -51,29 +52,26 @@ static struct {
        unsigned long del_total;
        unsigned long find_success;
        unsigned long find_total;
-       unsigned long noent_race;
-       unsigned long exist_race;
 } swap_cache_info;
 
 void show_swap_cache_info(void)
 {
-       printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+       printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
                swap_cache_info.add_total, swap_cache_info.del_total,
-               swap_cache_info.find_success, swap_cache_info.find_total,
-               swap_cache_info.noent_race, swap_cache_info.exist_race);
+               swap_cache_info.find_success, swap_cache_info.find_total);
        printk("Free swap  = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
 /*
- * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
  */
-static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
-                              gfp_t gfp_mask)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
 {
        int error;
 
+       BUG_ON(!PageLocked(page));
        BUG_ON(PageSwapCache(page));
        BUG_ON(PagePrivate(page));
        error = radix_tree_preload(gfp_mask);
@@ -83,11 +81,11 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
                                                entry.val, page);
                if (!error) {
                        page_cache_get(page);
-                       SetPageLocked(page);
                        SetPageSwapCache(page);
                        set_page_private(page, entry.val);
                        total_swapcache_pages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+                       INC_CACHE_INFO(add_total);
                }
                write_unlock_irq(&swapper_space.tree_lock);
                radix_tree_preload_end();
@@ -95,28 +93,6 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
        return error;
 }
 
-static int add_to_swap_cache(struct page *page, swp_entry_t entry)
-{
-       int error;
-
-       if (!swap_duplicate(entry)) {
-               INC_CACHE_INFO(noent_race);
-               return -ENOENT;
-       }
-       error = __add_to_swap_cache(page, entry, GFP_KERNEL);
-       /*
-        * Anon pages are already on the LRU, we don't run lru_cache_add here.
-        */
-       if (error) {
-               swap_free(entry);
-               if (error == -EEXIST)
-                       INC_CACHE_INFO(exist_race);
-               return error;
-       }
-       INC_CACHE_INFO(add_total);
-       return 0;
-}
-
 /*
  * This must be called only on pages that have
  * been verified to be in the swap cache.
@@ -149,6 +125,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
        int err;
 
        BUG_ON(!PageLocked(page));
+       BUG_ON(!PageUptodate(page));
 
        for (;;) {
                entry = get_swap_page();
@@ -166,18 +143,15 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
                /*
                 * Add it to the swap cache and mark it dirty
                 */
-               err = __add_to_swap_cache(page, entry,
+               err = add_to_swap_cache(page, entry,
                                gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
 
                switch (err) {
                case 0:                         /* Success */
-                       SetPageUptodate(page);
                        SetPageDirty(page);
-                       INC_CACHE_INFO(add_total);
                        return 1;
                case -EEXIST:
                        /* Raced with "speculative" read_swap_cache_async */
-                       INC_CACHE_INFO(exist_race);
                        swap_free(entry);
                        continue;
                default:
@@ -208,40 +182,6 @@ void delete_from_swap_cache(struct page *page)
        page_cache_release(page);
 }
 
-/*
- * Strange swizzling function only for use by shmem_writepage
- */
-int move_to_swap_cache(struct page *page, swp_entry_t entry)
-{
-       int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
-       if (!err) {
-               remove_from_page_cache(page);
-               page_cache_release(page);       /* pagecache ref */
-               if (!swap_duplicate(entry))
-                       BUG();
-               SetPageDirty(page);
-               INC_CACHE_INFO(add_total);
-       } else if (err == -EEXIST)
-               INC_CACHE_INFO(exist_race);
-       return err;
-}
-
-/*
- * Strange swizzling function for shmem_getpage (and shmem_unuse)
- */
-int move_from_swap_cache(struct page *page, unsigned long index,
-               struct address_space *mapping)
-{
-       int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
-       if (!err) {
-               delete_from_swap_cache(page);
-               /* shift page from clean_pages to dirty_pages list */
-               ClearPageDirty(page);
-               set_page_dirty(page);
-       }
-       return err;
-}
-
 /* 
  * If we are the only user, then try to free up the swap cache. 
  * 
@@ -314,7 +254,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
  * A failure return means that either the page allocation failed or that
  * the swap entry is no longer in use.
  */
-struct page *read_swap_cache_async(swp_entry_t entry,
+struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *found_page, *new_page = NULL;
@@ -334,23 +274,27 @@ struct page *read_swap_cache_async(swp_entry_t entry,
                 * Get a new page to read into from swap.
                 */
                if (!new_page) {
-                       new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
-                                                               vma, addr);
+                       new_page = alloc_page_vma(gfp_mask, vma, addr);
                        if (!new_page)
                                break;          /* Out of memory */
                }
 
+               /*
+                * Swap entry may have been freed since our caller observed it.
+                */
+               if (!swap_duplicate(entry))
+                       break;
+
                /*
                 * Associate the page with swap entry in the swap cache.
-                * May fail (-ENOENT) if swap entry has been freed since
-                * our caller observed it.  May fail (-EEXIST) if there
-                * is already a page associated with this entry in the
-                * swap cache: added by a racing read_swap_cache_async,
-                * or by try_to_swap_out (or shmem_writepage) re-using
-                * the just freed swap entry for an existing page.
+                * May fail (-EEXIST) if there is already a page associated
+                * with this entry in the swap cache: added by a racing
+                * read_swap_cache_async, or add_to_swap or shmem_writepage
+                * re-using the just freed swap entry for an existing page.
                 * May fail (-ENOMEM) if radix-tree node allocation failed.
                 */
-               err = add_to_swap_cache(new_page, entry);
+               SetPageLocked(new_page);
+               err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
                if (!err) {
                        /*
                         * Initiate read into locked page and return.
@@ -359,9 +303,57 @@ struct page *read_swap_cache_async(swp_entry_t entry,
                        swap_readpage(NULL, new_page);
                        return new_page;
                }
-       } while (err != -ENOENT && err != -ENOMEM);
+               ClearPageLocked(new_page);
+               swap_free(entry);
+       } while (err != -ENOMEM);
 
        if (new_page)
                page_cache_release(new_page);
        return found_page;
 }
+
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @vma: user vma this address belongs to
+ * @addr: target address for mempolicy
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time.  We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+                       struct vm_area_struct *vma, unsigned long addr)
+{
+       int nr_pages;
+       struct page *page;
+       unsigned long offset;
+       unsigned long end_offset;
+
+       /*
+        * Get starting offset for readaround, and number of pages to read.
+        * Adjust starting address by readbehind (for NUMA interleave case)?
+        * No, it's very unlikely that swap layout would follow vma layout,
+        * more likely that neighbouring swap pages came from the same node:
+        * so use the same "addr" to choose the same node for each swap read.
+        */
+       nr_pages = valid_swaphandles(entry, &offset);
+       for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
+               /* Ok, do the async read-ahead now */
+               page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
+                                               gfp_mask, vma, addr);
+               if (!page)
+                       break;
+               page_cache_release(page);
+       }
+       lru_add_drain();        /* Push any new pages onto the LRU now */
+       return read_swap_cache_async(entry, gfp_mask, vma, addr);
+}