drm/sun4i: tcon: Add dithering support for RGB565/RGB666 LCD panels
[sfrench/cifs-2.6.git] / mm / swapfile.c
index 8837b22c848d069262653318a69a542b6ddf34dd..d954b71c4f9c2e842e142713e1a921addb6a4c9d 100644 (file)
@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,
 
 #ifdef CONFIG_THP_SWAP
 #define SWAPFILE_CLUSTER       HPAGE_PMD_NR
+
+#define swap_entry_size(size)  (size)
 #else
 #define SWAPFILE_CLUSTER       256
+
+/*
+ * Define swap_entry_size() as constant to let compiler to optimize
+ * out some code if !CONFIG_THP_SWAP
+ */
+#define swap_entry_size(size)  1
 #endif
 #define LATENCY_LIMIT          256
 
@@ -269,7 +277,9 @@ static inline void cluster_set_null(struct swap_cluster_info *info)
 
 static inline bool cluster_is_huge(struct swap_cluster_info *info)
 {
-       return info->flags & CLUSTER_FLAG_HUGE;
+       if (IS_ENABLED(CONFIG_THP_SWAP))
+               return info->flags & CLUSTER_FLAG_HUGE;
+       return false;
 }
 
 static inline void cluster_clear_huge(struct swap_cluster_info *info)
@@ -296,13 +306,18 @@ static inline void unlock_cluster(struct swap_cluster_info *ci)
                spin_unlock(&ci->lock);
 }
 
+/*
+ * Determine the locking method in use for this device.  Return
+ * swap_cluster_info if SSD-style cluster-based locking is in place.
+ */
 static inline struct swap_cluster_info *lock_cluster_or_swap_info(
-       struct swap_info_struct *si,
-       unsigned long offset)
+               struct swap_info_struct *si, unsigned long offset)
 {
        struct swap_cluster_info *ci;
 
+       /* Try to use fine-grained SSD-style locking if available: */
        ci = lock_cluster(si, offset);
+       /* Otherwise, fall back to traditional, coarse locking: */
        if (!ci)
                spin_lock(&si->lock);
 
@@ -863,7 +878,6 @@ no_page:
        return n_ret;
 }
 
-#ifdef CONFIG_THP_SWAP
 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
 {
        unsigned long idx;
@@ -871,6 +885,15 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
        unsigned long offset, i;
        unsigned char *map;
 
+       /*
+        * Should not even be attempting cluster allocations when huge
+        * page swap is disabled.  Warn and fail the allocation.
+        */
+       if (!IS_ENABLED(CONFIG_THP_SWAP)) {
+               VM_WARN_ON_ONCE(1);
+               return 0;
+       }
+
        if (cluster_list_empty(&si->free_clusters))
                return 0;
 
@@ -901,13 +924,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
        unlock_cluster(ci);
        swap_range_free(si, offset, SWAPFILE_CLUSTER);
 }
-#else
-static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
-{
-       VM_WARN_ON_ONCE(1);
-       return 0;
-}
-#endif /* CONFIG_THP_SWAP */
 
 static unsigned long scan_swap_map(struct swap_info_struct *si,
                                   unsigned char usage)
@@ -924,18 +940,18 @@ static unsigned long scan_swap_map(struct swap_info_struct *si,
 
 }
 
-int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
+int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
 {
-       unsigned long nr_pages = cluster ? SWAPFILE_CLUSTER : 1;
+       unsigned long size = swap_entry_size(entry_size);
        struct swap_info_struct *si, *next;
        long avail_pgs;
        int n_ret = 0;
        int node;
 
        /* Only single cluster request supported */
-       WARN_ON_ONCE(n_goal > 1 && cluster);
+       WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
 
-       avail_pgs = atomic_long_read(&nr_swap_pages) / nr_pages;
+       avail_pgs = atomic_long_read(&nr_swap_pages) / size;
        if (avail_pgs <= 0)
                goto noswap;
 
@@ -945,7 +961,7 @@ int get_swap_pages(int n_goal, bool cluster, swp_entry_t swp_entries[])
        if (n_goal > avail_pgs)
                n_goal = avail_pgs;
 
-       atomic_long_sub(n_goal * nr_pages, &nr_swap_pages);
+       atomic_long_sub(n_goal * size, &nr_swap_pages);
 
        spin_lock(&swap_avail_lock);
 
@@ -972,14 +988,14 @@ start_over:
                        spin_unlock(&si->lock);
                        goto nextsi;
                }
-               if (cluster) {
+               if (size == SWAPFILE_CLUSTER) {
                        if (!(si->flags & SWP_FILE))
                                n_ret = swap_alloc_cluster(si, swp_entries);
                } else
                        n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
                                                    n_goal, swp_entries);
                spin_unlock(&si->lock);
-               if (n_ret || cluster)
+               if (n_ret || size == SWAPFILE_CLUSTER)
                        goto check_out;
                pr_debug("scan_swap_map of si %d failed to find offset\n",
                        si->type);
@@ -1005,7 +1021,7 @@ nextsi:
 
 check_out:
        if (n_ret < n_goal)
-               atomic_long_add((long)(n_goal - n_ret) * nr_pages,
+               atomic_long_add((long)(n_goal - n_ret) * size,
                                &nr_swap_pages);
 noswap:
        return n_ret;
@@ -1107,16 +1123,13 @@ static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
        return p;
 }
 
-static unsigned char __swap_entry_free(struct swap_info_struct *p,
-                                      swp_entry_t entry, unsigned char usage)
+static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
+                                             unsigned long offset,
+                                             unsigned char usage)
 {
-       struct swap_cluster_info *ci;
-       unsigned long offset = swp_offset(entry);
        unsigned char count;
        unsigned char has_cache;
 
-       ci = lock_cluster_or_swap_info(p, offset);
-
        count = p->swap_map[offset];
 
        has_cache = count & SWAP_HAS_CACHE;
@@ -1144,6 +1157,17 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p,
        usage = count | has_cache;
        p->swap_map[offset] = usage ? : SWAP_HAS_CACHE;
 
+       return usage;
+}
+
+static unsigned char __swap_entry_free(struct swap_info_struct *p,
+                                      swp_entry_t entry, unsigned char usage)
+{
+       struct swap_cluster_info *ci;
+       unsigned long offset = swp_offset(entry);
+
+       ci = lock_cluster_or_swap_info(p, offset);
+       usage = __swap_entry_free_locked(p, offset, usage);
        unlock_cluster_or_swap_info(p, ci);
 
        return usage;
@@ -1184,19 +1208,7 @@ void swap_free(swp_entry_t entry)
 /*
  * Called after dropping swapcache to decrease refcnt to swap entries.
  */
-static void swapcache_free(swp_entry_t entry)
-{
-       struct swap_info_struct *p;
-
-       p = _swap_info_get(entry);
-       if (p) {
-               if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
-                       free_swap_slot(entry);
-       }
-}
-
-#ifdef CONFIG_THP_SWAP
-static void swapcache_free_cluster(swp_entry_t entry)
+void put_swap_page(struct page *page, swp_entry_t entry)
 {
        unsigned long offset = swp_offset(entry);
        unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1205,42 +1217,48 @@ static void swapcache_free_cluster(swp_entry_t entry)
        unsigned char *map;
        unsigned int i, free_entries = 0;
        unsigned char val;
+       int size = swap_entry_size(hpage_nr_pages(page));
 
        si = _swap_info_get(entry);
        if (!si)
                return;
 
-       ci = lock_cluster(si, offset);
-       VM_BUG_ON(!cluster_is_huge(ci));
-       map = si->swap_map + offset;
-       for (i = 0; i < SWAPFILE_CLUSTER; i++) {
-               val = map[i];
-               VM_BUG_ON(!(val & SWAP_HAS_CACHE));
-               if (val == SWAP_HAS_CACHE)
-                       free_entries++;
-       }
-       if (!free_entries) {
-               for (i = 0; i < SWAPFILE_CLUSTER; i++)
-                       map[i] &= ~SWAP_HAS_CACHE;
+       ci = lock_cluster_or_swap_info(si, offset);
+       if (size == SWAPFILE_CLUSTER) {
+               VM_BUG_ON(!cluster_is_huge(ci));
+               map = si->swap_map + offset;
+               for (i = 0; i < SWAPFILE_CLUSTER; i++) {
+                       val = map[i];
+                       VM_BUG_ON(!(val & SWAP_HAS_CACHE));
+                       if (val == SWAP_HAS_CACHE)
+                               free_entries++;
+               }
+               cluster_clear_huge(ci);
+               if (free_entries == SWAPFILE_CLUSTER) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       spin_lock(&si->lock);
+                       ci = lock_cluster(si, offset);
+                       memset(map, 0, SWAPFILE_CLUSTER);
+                       unlock_cluster(ci);
+                       mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
+                       swap_free_cluster(si, idx);
+                       spin_unlock(&si->lock);
+                       return;
+               }
        }
-       cluster_clear_huge(ci);
-       unlock_cluster(ci);
-       if (free_entries == SWAPFILE_CLUSTER) {
-               spin_lock(&si->lock);
-               ci = lock_cluster(si, offset);
-               memset(map, 0, SWAPFILE_CLUSTER);
-               unlock_cluster(ci);
-               mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
-               swap_free_cluster(si, idx);
-               spin_unlock(&si->lock);
-       } else if (free_entries) {
-               for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) {
-                       if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
-                               free_swap_slot(entry);
+       for (i = 0; i < size; i++, entry.val++) {
+               if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
+                       unlock_cluster_or_swap_info(si, ci);
+                       free_swap_slot(entry);
+                       if (i == size - 1)
+                               return;
+                       lock_cluster_or_swap_info(si, offset);
                }
        }
+       unlock_cluster_or_swap_info(si, ci);
 }
 
+#ifdef CONFIG_THP_SWAP
 int split_swap_cluster(swp_entry_t entry)
 {
        struct swap_info_struct *si;
@@ -1255,19 +1273,7 @@ int split_swap_cluster(swp_entry_t entry)
        unlock_cluster(ci);
        return 0;
 }
-#else
-static inline void swapcache_free_cluster(swp_entry_t entry)
-{
-}
-#endif /* CONFIG_THP_SWAP */
-
-void put_swap_page(struct page *page, swp_entry_t entry)
-{
-       if (!PageTransHuge(page))
-               swapcache_free(entry);
-       else
-               swapcache_free_cluster(entry);
-}
+#endif
 
 static int swp_entry_cmp(const void *ent1, const void *ent2)
 {
@@ -1409,7 +1415,6 @@ out:
        return count;
 }
 
-#ifdef CONFIG_THP_SWAP
 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
                                         swp_entry_t entry)
 {
@@ -1422,12 +1427,12 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
 
        ci = lock_cluster_or_swap_info(si, offset);
        if (!ci || !cluster_is_huge(ci)) {
-               if (map[roffset] != SWAP_HAS_CACHE)
+               if (swap_count(map[roffset]))
                        ret = true;
                goto unlock_out;
        }
        for (i = 0; i < SWAPFILE_CLUSTER; i++) {
-               if (map[offset + i] != SWAP_HAS_CACHE) {
+               if (swap_count(map[offset + i])) {
                        ret = true;
                        break;
                }
@@ -1442,7 +1447,7 @@ static bool page_swapped(struct page *page)
        swp_entry_t entry;
        struct swap_info_struct *si;
 
-       if (likely(!PageTransCompound(page)))
+       if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
                return page_swapcount(page) != 0;
 
        page = compound_head(page);
@@ -1466,10 +1471,8 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
        /* hugetlbfs shouldn't call it */
        VM_BUG_ON_PAGE(PageHuge(page), page);
 
-       if (likely(!PageTransCompound(page))) {
-               mapcount = atomic_read(&page->_mapcount) + 1;
-               if (total_mapcount)
-                       *total_mapcount = mapcount;
+       if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
+               mapcount = page_trans_huge_mapcount(page, total_mapcount);
                if (PageSwapCache(page))
                        swapcount = page_swapcount(page);
                if (total_swapcount)
@@ -1516,26 +1519,6 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
 
        return map_swapcount;
 }
-#else
-#define swap_page_trans_huge_swapped(si, entry)        swap_swapcount(si, entry)
-#define page_swapped(page)                     (page_swapcount(page) != 0)
-
-static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
-                                        int *total_swapcount)
-{
-       int mapcount, swapcount = 0;
-
-       /* hugetlbfs shouldn't call it */
-       VM_BUG_ON_PAGE(PageHuge(page), page);
-
-       mapcount = page_trans_huge_mapcount(page, total_mapcount);
-       if (PageSwapCache(page))
-               swapcount = page_swapcount(page);
-       if (total_swapcount)
-               *total_swapcount = swapcount;
-       return mapcount + swapcount;
-}
-#endif
 
 /*
  * We can write to an anon page without COW if there are no other references