29a4f1249bef59f095426c85eabb41559fcdd168
[sfrench/cifs-2.6.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36
37 /*****************
38  * Structures
39 *****************/
40 struct z3fold_pool;
41 struct z3fold_ops {
42         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
43 };
44
45 enum buddy {
46         HEADLESS = 0,
47         FIRST,
48         MIDDLE,
49         LAST,
50         BUDDIES_MAX
51 };
52
53 /*
54  * struct z3fold_header - z3fold page metadata occupying first chunks of each
55  *                      z3fold page, except for HEADLESS pages
56  * @buddy:              links the z3fold page into the relevant list in the
57  *                      pool
58  * @page_lock:          per-page lock
59  * @refcount:           reference count for the z3fold page
60  * @work:               work_struct for page layout optimization
61  * @pool:               pointer to the pool which this page belongs to
62  * @cpu:                CPU which this page "belongs" to
63  * @first_chunks:       the size of the first buddy in chunks, 0 if free
64  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
65  * @last_chunks:        the size of the last buddy in chunks, 0 if free
66  * @first_num:          the starting number (for the first handle)
67  */
68 struct z3fold_header {
69         struct list_head buddy;
70         spinlock_t page_lock;
71         struct kref refcount;
72         struct work_struct work;
73         struct z3fold_pool *pool;
74         short cpu;
75         unsigned short first_chunks;
76         unsigned short middle_chunks;
77         unsigned short last_chunks;
78         unsigned short start_middle;
79         unsigned short first_num:2;
80 };
81
82 /*
83  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84  * adjusting internal fragmentation.  It also determines the number of
85  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87  * in the beginning of an allocated page are occupied by z3fold header, so
88  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89  * which shows the max number of free chunks in z3fold page, also there will
90  * be 63, or 62, respectively, freelists per pool.
91  */
92 #define NCHUNKS_ORDER   6
93
94 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
100
101 #define BUDDY_MASK      (0x3)
102 #define BUDDY_SHIFT     2
103
104 /**
105  * struct z3fold_pool - stores metadata for each z3fold pool
106  * @name:       pool name
107  * @lock:       protects pool unbuddied/lru lists
108  * @stale_lock: protects pool stale page list
109  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
110  *              buddies; the list each z3fold page is added to depends on
111  *              the size of its free region.
112  * @lru:        list tracking the z3fold pages in LRU order by most recently
113  *              added buddy.
114  * @stale:      list of pages marked for freeing
115  * @pages_nr:   number of z3fold pages in the pool.
116  * @ops:        pointer to a structure of user defined operations specified at
117  *              pool creation time.
118  * @compact_wq: workqueue for page layout background optimization
119  * @release_wq: workqueue for safe page release
120  * @work:       work_struct for safe page release
121  *
122  * This structure is allocated at pool creation time and maintains metadata
123  * pertaining to a particular z3fold pool.
124  */
125 struct z3fold_pool {
126         const char *name;
127         spinlock_t lock;
128         spinlock_t stale_lock;
129         struct list_head *unbuddied;
130         struct list_head lru;
131         struct list_head stale;
132         atomic64_t pages_nr;
133         const struct z3fold_ops *ops;
134         struct zpool *zpool;
135         const struct zpool_ops *zpool_ops;
136         struct workqueue_struct *compact_wq;
137         struct workqueue_struct *release_wq;
138         struct work_struct work;
139 };
140
141 /*
142  * Internal z3fold page flags
143  */
144 enum z3fold_page_flags {
145         PAGE_HEADLESS = 0,
146         MIDDLE_CHUNK_MAPPED,
147         NEEDS_COMPACTING,
148         PAGE_STALE,
149         PAGE_CLAIMED, /* by either reclaim or free */
150 };
151
152 /*****************
153  * Helpers
154 *****************/
155
156 /* Converts an allocation size in bytes to size in z3fold chunks */
157 static int size_to_chunks(size_t size)
158 {
159         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
160 }
161
162 #define for_each_unbuddied_list(_iter, _begin) \
163         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
164
165 static void compact_page_work(struct work_struct *w);
166
167 /* Initializes the z3fold header of a newly allocated z3fold page */
168 static struct z3fold_header *init_z3fold_page(struct page *page,
169                                         struct z3fold_pool *pool)
170 {
171         struct z3fold_header *zhdr = page_address(page);
172
173         INIT_LIST_HEAD(&page->lru);
174         clear_bit(PAGE_HEADLESS, &page->private);
175         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
176         clear_bit(NEEDS_COMPACTING, &page->private);
177         clear_bit(PAGE_STALE, &page->private);
178         clear_bit(PAGE_CLAIMED, &page->private);
179
180         spin_lock_init(&zhdr->page_lock);
181         kref_init(&zhdr->refcount);
182         zhdr->first_chunks = 0;
183         zhdr->middle_chunks = 0;
184         zhdr->last_chunks = 0;
185         zhdr->first_num = 0;
186         zhdr->start_middle = 0;
187         zhdr->cpu = -1;
188         zhdr->pool = pool;
189         INIT_LIST_HEAD(&zhdr->buddy);
190         INIT_WORK(&zhdr->work, compact_page_work);
191         return zhdr;
192 }
193
194 /* Resets the struct page fields and frees the page */
195 static void free_z3fold_page(struct page *page)
196 {
197         __free_page(page);
198 }
199
200 /* Lock a z3fold page */
201 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
202 {
203         spin_lock(&zhdr->page_lock);
204 }
205
206 /* Try to lock a z3fold page */
207 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
208 {
209         return spin_trylock(&zhdr->page_lock);
210 }
211
212 /* Unlock a z3fold page */
213 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
214 {
215         spin_unlock(&zhdr->page_lock);
216 }
217
218 /*
219  * Encodes the handle of a particular buddy within a z3fold page
220  * Pool lock should be held as this function accesses first_num
221  */
222 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
223 {
224         unsigned long handle;
225
226         handle = (unsigned long)zhdr;
227         if (bud != HEADLESS) {
228                 handle |= (bud + zhdr->first_num) & BUDDY_MASK;
229                 if (bud == LAST)
230                         handle |= (zhdr->last_chunks << BUDDY_SHIFT);
231         }
232         return handle;
233 }
234
235 /* Returns the z3fold page where a given handle is stored */
236 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
237 {
238         return (struct z3fold_header *)(handle & PAGE_MASK);
239 }
240
241 /* only for LAST bud, returns zero otherwise */
242 static unsigned short handle_to_chunks(unsigned long handle)
243 {
244         return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
245 }
246
247 /*
248  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
249  *  but that doesn't matter. because the masking will result in the
250  *  correct buddy number.
251  */
252 static enum buddy handle_to_buddy(unsigned long handle)
253 {
254         struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
255         return (handle - zhdr->first_num) & BUDDY_MASK;
256 }
257
258 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
259 {
260         return zhdr->pool;
261 }
262
263 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
264 {
265         struct page *page = virt_to_page(zhdr);
266         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
267
268         WARN_ON(!list_empty(&zhdr->buddy));
269         set_bit(PAGE_STALE, &page->private);
270         clear_bit(NEEDS_COMPACTING, &page->private);
271         spin_lock(&pool->lock);
272         if (!list_empty(&page->lru))
273                 list_del(&page->lru);
274         spin_unlock(&pool->lock);
275         if (locked)
276                 z3fold_page_unlock(zhdr);
277         spin_lock(&pool->stale_lock);
278         list_add(&zhdr->buddy, &pool->stale);
279         queue_work(pool->release_wq, &pool->work);
280         spin_unlock(&pool->stale_lock);
281 }
282
283 static void __attribute__((__unused__))
284                         release_z3fold_page(struct kref *ref)
285 {
286         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
287                                                 refcount);
288         __release_z3fold_page(zhdr, false);
289 }
290
291 static void release_z3fold_page_locked(struct kref *ref)
292 {
293         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
294                                                 refcount);
295         WARN_ON(z3fold_page_trylock(zhdr));
296         __release_z3fold_page(zhdr, true);
297 }
298
299 static void release_z3fold_page_locked_list(struct kref *ref)
300 {
301         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
302                                                refcount);
303         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
304         spin_lock(&pool->lock);
305         list_del_init(&zhdr->buddy);
306         spin_unlock(&pool->lock);
307
308         WARN_ON(z3fold_page_trylock(zhdr));
309         __release_z3fold_page(zhdr, true);
310 }
311
312 static void free_pages_work(struct work_struct *w)
313 {
314         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
315
316         spin_lock(&pool->stale_lock);
317         while (!list_empty(&pool->stale)) {
318                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
319                                                 struct z3fold_header, buddy);
320                 struct page *page = virt_to_page(zhdr);
321
322                 list_del(&zhdr->buddy);
323                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
324                         continue;
325                 spin_unlock(&pool->stale_lock);
326                 cancel_work_sync(&zhdr->work);
327                 free_z3fold_page(page);
328                 cond_resched();
329                 spin_lock(&pool->stale_lock);
330         }
331         spin_unlock(&pool->stale_lock);
332 }
333
334 /*
335  * Returns the number of free chunks in a z3fold page.
336  * NB: can't be used with HEADLESS pages.
337  */
338 static int num_free_chunks(struct z3fold_header *zhdr)
339 {
340         int nfree;
341         /*
342          * If there is a middle object, pick up the bigger free space
343          * either before or after it. Otherwise just subtract the number
344          * of chunks occupied by the first and the last objects.
345          */
346         if (zhdr->middle_chunks != 0) {
347                 int nfree_before = zhdr->first_chunks ?
348                         0 : zhdr->start_middle - ZHDR_CHUNKS;
349                 int nfree_after = zhdr->last_chunks ?
350                         0 : TOTAL_CHUNKS -
351                                 (zhdr->start_middle + zhdr->middle_chunks);
352                 nfree = max(nfree_before, nfree_after);
353         } else
354                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
355         return nfree;
356 }
357
358 /* Add to the appropriate unbuddied list */
359 static inline void add_to_unbuddied(struct z3fold_pool *pool,
360                                 struct z3fold_header *zhdr)
361 {
362         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
363                         zhdr->middle_chunks == 0) {
364                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
365
366                 int freechunks = num_free_chunks(zhdr);
367                 spin_lock(&pool->lock);
368                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
369                 spin_unlock(&pool->lock);
370                 zhdr->cpu = smp_processor_id();
371                 put_cpu_ptr(pool->unbuddied);
372         }
373 }
374
375 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
376                                 unsigned short dst_chunk)
377 {
378         void *beg = zhdr;
379         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
380                        beg + (zhdr->start_middle << CHUNK_SHIFT),
381                        zhdr->middle_chunks << CHUNK_SHIFT);
382 }
383
384 #define BIG_CHUNK_GAP   3
385 /* Has to be called with lock held */
386 static int z3fold_compact_page(struct z3fold_header *zhdr)
387 {
388         struct page *page = virt_to_page(zhdr);
389
390         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
391                 return 0; /* can't move middle chunk, it's used */
392
393         if (zhdr->middle_chunks == 0)
394                 return 0; /* nothing to compact */
395
396         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
397                 /* move to the beginning */
398                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
399                 zhdr->first_chunks = zhdr->middle_chunks;
400                 zhdr->middle_chunks = 0;
401                 zhdr->start_middle = 0;
402                 zhdr->first_num++;
403                 return 1;
404         }
405
406         /*
407          * moving data is expensive, so let's only do that if
408          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
409          */
410         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
411             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
412                         BIG_CHUNK_GAP) {
413                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
414                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
415                 return 1;
416         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
417                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
418                                         + zhdr->middle_chunks) >=
419                         BIG_CHUNK_GAP) {
420                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
421                         zhdr->middle_chunks;
422                 mchunk_memmove(zhdr, new_start);
423                 zhdr->start_middle = new_start;
424                 return 1;
425         }
426
427         return 0;
428 }
429
430 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
431 {
432         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
433         struct page *page;
434
435         page = virt_to_page(zhdr);
436         if (locked)
437                 WARN_ON(z3fold_page_trylock(zhdr));
438         else
439                 z3fold_page_lock(zhdr);
440         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
441                 z3fold_page_unlock(zhdr);
442                 return;
443         }
444         spin_lock(&pool->lock);
445         list_del_init(&zhdr->buddy);
446         spin_unlock(&pool->lock);
447
448         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
449                 atomic64_dec(&pool->pages_nr);
450                 return;
451         }
452
453         z3fold_compact_page(zhdr);
454         add_to_unbuddied(pool, zhdr);
455         z3fold_page_unlock(zhdr);
456 }
457
458 static void compact_page_work(struct work_struct *w)
459 {
460         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
461                                                 work);
462
463         do_compact_page(zhdr, false);
464 }
465
466 /* returns _locked_ z3fold page header or NULL */
467 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
468                                                 size_t size, bool can_sleep)
469 {
470         struct z3fold_header *zhdr = NULL;
471         struct page *page;
472         struct list_head *unbuddied;
473         int chunks = size_to_chunks(size), i;
474
475 lookup:
476         /* First, try to find an unbuddied z3fold page. */
477         unbuddied = get_cpu_ptr(pool->unbuddied);
478         for_each_unbuddied_list(i, chunks) {
479                 struct list_head *l = &unbuddied[i];
480
481                 zhdr = list_first_entry_or_null(READ_ONCE(l),
482                                         struct z3fold_header, buddy);
483
484                 if (!zhdr)
485                         continue;
486
487                 /* Re-check under lock. */
488                 spin_lock(&pool->lock);
489                 l = &unbuddied[i];
490                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
491                                                 struct z3fold_header, buddy)) ||
492                     !z3fold_page_trylock(zhdr)) {
493                         spin_unlock(&pool->lock);
494                         zhdr = NULL;
495                         put_cpu_ptr(pool->unbuddied);
496                         if (can_sleep)
497                                 cond_resched();
498                         goto lookup;
499                 }
500                 list_del_init(&zhdr->buddy);
501                 zhdr->cpu = -1;
502                 spin_unlock(&pool->lock);
503
504                 page = virt_to_page(zhdr);
505                 if (test_bit(NEEDS_COMPACTING, &page->private)) {
506                         z3fold_page_unlock(zhdr);
507                         zhdr = NULL;
508                         put_cpu_ptr(pool->unbuddied);
509                         if (can_sleep)
510                                 cond_resched();
511                         goto lookup;
512                 }
513
514                 /*
515                  * this page could not be removed from its unbuddied
516                  * list while pool lock was held, and then we've taken
517                  * page lock so kref_put could not be called before
518                  * we got here, so it's safe to just call kref_get()
519                  */
520                 kref_get(&zhdr->refcount);
521                 break;
522         }
523         put_cpu_ptr(pool->unbuddied);
524
525         if (!zhdr) {
526                 int cpu;
527
528                 /* look for _exact_ match on other cpus' lists */
529                 for_each_online_cpu(cpu) {
530                         struct list_head *l;
531
532                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
533                         spin_lock(&pool->lock);
534                         l = &unbuddied[chunks];
535
536                         zhdr = list_first_entry_or_null(READ_ONCE(l),
537                                                 struct z3fold_header, buddy);
538
539                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
540                                 spin_unlock(&pool->lock);
541                                 zhdr = NULL;
542                                 continue;
543                         }
544                         list_del_init(&zhdr->buddy);
545                         zhdr->cpu = -1;
546                         spin_unlock(&pool->lock);
547
548                         page = virt_to_page(zhdr);
549                         if (test_bit(NEEDS_COMPACTING, &page->private)) {
550                                 z3fold_page_unlock(zhdr);
551                                 zhdr = NULL;
552                                 if (can_sleep)
553                                         cond_resched();
554                                 continue;
555                         }
556                         kref_get(&zhdr->refcount);
557                         break;
558                 }
559         }
560
561         return zhdr;
562 }
563
564 /*
565  * API Functions
566  */
567
568 /**
569  * z3fold_create_pool() - create a new z3fold pool
570  * @name:       pool name
571  * @gfp:        gfp flags when allocating the z3fold pool structure
572  * @ops:        user-defined operations for the z3fold pool
573  *
574  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
575  * failed.
576  */
577 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
578                 const struct z3fold_ops *ops)
579 {
580         struct z3fold_pool *pool = NULL;
581         int i, cpu;
582
583         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
584         if (!pool)
585                 goto out;
586         spin_lock_init(&pool->lock);
587         spin_lock_init(&pool->stale_lock);
588         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
589         if (!pool->unbuddied)
590                 goto out_pool;
591         for_each_possible_cpu(cpu) {
592                 struct list_head *unbuddied =
593                                 per_cpu_ptr(pool->unbuddied, cpu);
594                 for_each_unbuddied_list(i, 0)
595                         INIT_LIST_HEAD(&unbuddied[i]);
596         }
597         INIT_LIST_HEAD(&pool->lru);
598         INIT_LIST_HEAD(&pool->stale);
599         atomic64_set(&pool->pages_nr, 0);
600         pool->name = name;
601         pool->compact_wq = create_singlethread_workqueue(pool->name);
602         if (!pool->compact_wq)
603                 goto out_unbuddied;
604         pool->release_wq = create_singlethread_workqueue(pool->name);
605         if (!pool->release_wq)
606                 goto out_wq;
607         INIT_WORK(&pool->work, free_pages_work);
608         pool->ops = ops;
609         return pool;
610
611 out_wq:
612         destroy_workqueue(pool->compact_wq);
613 out_unbuddied:
614         free_percpu(pool->unbuddied);
615 out_pool:
616         kfree(pool);
617 out:
618         return NULL;
619 }
620
621 /**
622  * z3fold_destroy_pool() - destroys an existing z3fold pool
623  * @pool:       the z3fold pool to be destroyed
624  *
625  * The pool should be emptied before this function is called.
626  */
627 static void z3fold_destroy_pool(struct z3fold_pool *pool)
628 {
629         destroy_workqueue(pool->release_wq);
630         destroy_workqueue(pool->compact_wq);
631         kfree(pool);
632 }
633
634 /**
635  * z3fold_alloc() - allocates a region of a given size
636  * @pool:       z3fold pool from which to allocate
637  * @size:       size in bytes of the desired allocation
638  * @gfp:        gfp flags used if the pool needs to grow
639  * @handle:     handle of the new allocation
640  *
641  * This function will attempt to find a free region in the pool large enough to
642  * satisfy the allocation request.  A search of the unbuddied lists is
643  * performed first. If no suitable free region is found, then a new page is
644  * allocated and added to the pool to satisfy the request.
645  *
646  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
647  * as z3fold pool pages.
648  *
649  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
650  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
651  * a new page.
652  */
653 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
654                         unsigned long *handle)
655 {
656         int chunks = size_to_chunks(size);
657         struct z3fold_header *zhdr = NULL;
658         struct page *page = NULL;
659         enum buddy bud;
660         bool can_sleep = gfpflags_allow_blocking(gfp);
661
662         if (!size || (gfp & __GFP_HIGHMEM))
663                 return -EINVAL;
664
665         if (size > PAGE_SIZE)
666                 return -ENOSPC;
667
668         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
669                 bud = HEADLESS;
670         else {
671 retry:
672                 zhdr = __z3fold_alloc(pool, size, can_sleep);
673                 if (zhdr) {
674                         if (zhdr->first_chunks == 0) {
675                                 if (zhdr->middle_chunks != 0 &&
676                                     chunks >= zhdr->start_middle)
677                                         bud = LAST;
678                                 else
679                                         bud = FIRST;
680                         } else if (zhdr->last_chunks == 0)
681                                 bud = LAST;
682                         else if (zhdr->middle_chunks == 0)
683                                 bud = MIDDLE;
684                         else {
685                                 if (kref_put(&zhdr->refcount,
686                                              release_z3fold_page_locked))
687                                         atomic64_dec(&pool->pages_nr);
688                                 else
689                                         z3fold_page_unlock(zhdr);
690                                 pr_err("No free chunks in unbuddied\n");
691                                 WARN_ON(1);
692                                 goto retry;
693                         }
694                         page = virt_to_page(zhdr);
695                         goto found;
696                 }
697                 bud = FIRST;
698         }
699
700         page = NULL;
701         if (can_sleep) {
702                 spin_lock(&pool->stale_lock);
703                 zhdr = list_first_entry_or_null(&pool->stale,
704                                                 struct z3fold_header, buddy);
705                 /*
706                  * Before allocating a page, let's see if we can take one from
707                  * the stale pages list. cancel_work_sync() can sleep so we
708                  * limit this case to the contexts where we can sleep
709                  */
710                 if (zhdr) {
711                         list_del(&zhdr->buddy);
712                         spin_unlock(&pool->stale_lock);
713                         cancel_work_sync(&zhdr->work);
714                         page = virt_to_page(zhdr);
715                 } else {
716                         spin_unlock(&pool->stale_lock);
717                 }
718         }
719         if (!page)
720                 page = alloc_page(gfp);
721
722         if (!page)
723                 return -ENOMEM;
724
725         zhdr = init_z3fold_page(page, pool);
726         if (!zhdr) {
727                 __free_page(page);
728                 return -ENOMEM;
729         }
730         atomic64_inc(&pool->pages_nr);
731
732         if (bud == HEADLESS) {
733                 set_bit(PAGE_HEADLESS, &page->private);
734                 goto headless;
735         }
736         z3fold_page_lock(zhdr);
737
738 found:
739         if (bud == FIRST)
740                 zhdr->first_chunks = chunks;
741         else if (bud == LAST)
742                 zhdr->last_chunks = chunks;
743         else {
744                 zhdr->middle_chunks = chunks;
745                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
746         }
747         add_to_unbuddied(pool, zhdr);
748
749 headless:
750         spin_lock(&pool->lock);
751         /* Add/move z3fold page to beginning of LRU */
752         if (!list_empty(&page->lru))
753                 list_del(&page->lru);
754
755         list_add(&page->lru, &pool->lru);
756
757         *handle = encode_handle(zhdr, bud);
758         spin_unlock(&pool->lock);
759         if (bud != HEADLESS)
760                 z3fold_page_unlock(zhdr);
761
762         return 0;
763 }
764
765 /**
766  * z3fold_free() - frees the allocation associated with the given handle
767  * @pool:       pool in which the allocation resided
768  * @handle:     handle associated with the allocation returned by z3fold_alloc()
769  *
770  * In the case that the z3fold page in which the allocation resides is under
771  * reclaim, as indicated by the PG_reclaim flag being set, this function
772  * only sets the first|last_chunks to 0.  The page is actually freed
773  * once both buddies are evicted (see z3fold_reclaim_page() below).
774  */
775 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
776 {
777         struct z3fold_header *zhdr;
778         struct page *page;
779         enum buddy bud;
780
781         zhdr = handle_to_z3fold_header(handle);
782         page = virt_to_page(zhdr);
783
784         if (test_bit(PAGE_HEADLESS, &page->private)) {
785                 /* if a headless page is under reclaim, just leave.
786                  * NB: we use test_and_set_bit for a reason: if the bit
787                  * has not been set before, we release this page
788                  * immediately so we don't care about its value any more.
789                  */
790                 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
791                         spin_lock(&pool->lock);
792                         list_del(&page->lru);
793                         spin_unlock(&pool->lock);
794                         free_z3fold_page(page);
795                         atomic64_dec(&pool->pages_nr);
796                 }
797                 return;
798         }
799
800         /* Non-headless case */
801         z3fold_page_lock(zhdr);
802         bud = handle_to_buddy(handle);
803
804         switch (bud) {
805         case FIRST:
806                 zhdr->first_chunks = 0;
807                 break;
808         case MIDDLE:
809                 zhdr->middle_chunks = 0;
810                 break;
811         case LAST:
812                 zhdr->last_chunks = 0;
813                 break;
814         default:
815                 pr_err("%s: unknown bud %d\n", __func__, bud);
816                 WARN_ON(1);
817                 z3fold_page_unlock(zhdr);
818                 return;
819         }
820
821         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
822                 atomic64_dec(&pool->pages_nr);
823                 return;
824         }
825         if (test_bit(PAGE_CLAIMED, &page->private)) {
826                 z3fold_page_unlock(zhdr);
827                 return;
828         }
829         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
830                 z3fold_page_unlock(zhdr);
831                 return;
832         }
833         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
834                 spin_lock(&pool->lock);
835                 list_del_init(&zhdr->buddy);
836                 spin_unlock(&pool->lock);
837                 zhdr->cpu = -1;
838                 kref_get(&zhdr->refcount);
839                 do_compact_page(zhdr, true);
840                 return;
841         }
842         kref_get(&zhdr->refcount);
843         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
844         z3fold_page_unlock(zhdr);
845 }
846
847 /**
848  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
849  * @pool:       pool from which a page will attempt to be evicted
850  * @retries:    number of pages on the LRU list for which eviction will
851  *              be attempted before failing
852  *
853  * z3fold reclaim is different from normal system reclaim in that it is done
854  * from the bottom, up. This is because only the bottom layer, z3fold, has
855  * information on how the allocations are organized within each z3fold page.
856  * This has the potential to create interesting locking situations between
857  * z3fold and the user, however.
858  *
859  * To avoid these, this is how z3fold_reclaim_page() should be called:
860  *
861  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
862  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
863  * call the user-defined eviction handler with the pool and handle as
864  * arguments.
865  *
866  * If the handle can not be evicted, the eviction handler should return
867  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
868  * appropriate list and try the next z3fold page on the LRU up to
869  * a user defined number of retries.
870  *
871  * If the handle is successfully evicted, the eviction handler should
872  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
873  * contains logic to delay freeing the page if the page is under reclaim,
874  * as indicated by the setting of the PG_reclaim flag on the underlying page.
875  *
876  * If all buddies in the z3fold page are successfully evicted, then the
877  * z3fold page can be freed.
878  *
879  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
880  * no pages to evict or an eviction handler is not registered, -EAGAIN if
881  * the retry limit was hit.
882  */
883 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
884 {
885         int i, ret = 0;
886         struct z3fold_header *zhdr = NULL;
887         struct page *page = NULL;
888         struct list_head *pos;
889         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
890
891         spin_lock(&pool->lock);
892         if (!pool->ops || !pool->ops->evict || retries == 0) {
893                 spin_unlock(&pool->lock);
894                 return -EINVAL;
895         }
896         for (i = 0; i < retries; i++) {
897                 if (list_empty(&pool->lru)) {
898                         spin_unlock(&pool->lock);
899                         return -EINVAL;
900                 }
901                 list_for_each_prev(pos, &pool->lru) {
902                         page = list_entry(pos, struct page, lru);
903
904                         /* this bit could have been set by free, in which case
905                          * we pass over to the next page in the pool.
906                          */
907                         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
908                                 continue;
909
910                         zhdr = page_address(page);
911                         if (test_bit(PAGE_HEADLESS, &page->private))
912                                 break;
913
914                         if (!z3fold_page_trylock(zhdr)) {
915                                 zhdr = NULL;
916                                 continue; /* can't evict at this point */
917                         }
918                         kref_get(&zhdr->refcount);
919                         list_del_init(&zhdr->buddy);
920                         zhdr->cpu = -1;
921                         break;
922                 }
923
924                 if (!zhdr)
925                         break;
926
927                 list_del_init(&page->lru);
928                 spin_unlock(&pool->lock);
929
930                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
931                         /*
932                          * We need encode the handles before unlocking, since
933                          * we can race with free that will set
934                          * (first|last)_chunks to 0
935                          */
936                         first_handle = 0;
937                         last_handle = 0;
938                         middle_handle = 0;
939                         if (zhdr->first_chunks)
940                                 first_handle = encode_handle(zhdr, FIRST);
941                         if (zhdr->middle_chunks)
942                                 middle_handle = encode_handle(zhdr, MIDDLE);
943                         if (zhdr->last_chunks)
944                                 last_handle = encode_handle(zhdr, LAST);
945                         /*
946                          * it's safe to unlock here because we hold a
947                          * reference to this page
948                          */
949                         z3fold_page_unlock(zhdr);
950                 } else {
951                         first_handle = encode_handle(zhdr, HEADLESS);
952                         last_handle = middle_handle = 0;
953                 }
954
955                 /* Issue the eviction callback(s) */
956                 if (middle_handle) {
957                         ret = pool->ops->evict(pool, middle_handle);
958                         if (ret)
959                                 goto next;
960                 }
961                 if (first_handle) {
962                         ret = pool->ops->evict(pool, first_handle);
963                         if (ret)
964                                 goto next;
965                 }
966                 if (last_handle) {
967                         ret = pool->ops->evict(pool, last_handle);
968                         if (ret)
969                                 goto next;
970                 }
971 next:
972                 if (test_bit(PAGE_HEADLESS, &page->private)) {
973                         if (ret == 0) {
974                                 free_z3fold_page(page);
975                                 atomic64_dec(&pool->pages_nr);
976                                 return 0;
977                         }
978                         spin_lock(&pool->lock);
979                         list_add(&page->lru, &pool->lru);
980                         spin_unlock(&pool->lock);
981                 } else {
982                         z3fold_page_lock(zhdr);
983                         clear_bit(PAGE_CLAIMED, &page->private);
984                         if (kref_put(&zhdr->refcount,
985                                         release_z3fold_page_locked)) {
986                                 atomic64_dec(&pool->pages_nr);
987                                 return 0;
988                         }
989                         /*
990                          * if we are here, the page is still not completely
991                          * free. Take the global pool lock then to be able
992                          * to add it back to the lru list
993                          */
994                         spin_lock(&pool->lock);
995                         list_add(&page->lru, &pool->lru);
996                         spin_unlock(&pool->lock);
997                         z3fold_page_unlock(zhdr);
998                 }
999
1000                 /* We started off locked to we need to lock the pool back */
1001                 spin_lock(&pool->lock);
1002         }
1003         spin_unlock(&pool->lock);
1004         return -EAGAIN;
1005 }
1006
1007 /**
1008  * z3fold_map() - maps the allocation associated with the given handle
1009  * @pool:       pool in which the allocation resides
1010  * @handle:     handle associated with the allocation to be mapped
1011  *
1012  * Extracts the buddy number from handle and constructs the pointer to the
1013  * correct starting chunk within the page.
1014  *
1015  * Returns: a pointer to the mapped allocation
1016  */
1017 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1018 {
1019         struct z3fold_header *zhdr;
1020         struct page *page;
1021         void *addr;
1022         enum buddy buddy;
1023
1024         zhdr = handle_to_z3fold_header(handle);
1025         addr = zhdr;
1026         page = virt_to_page(zhdr);
1027
1028         if (test_bit(PAGE_HEADLESS, &page->private))
1029                 goto out;
1030
1031         z3fold_page_lock(zhdr);
1032         buddy = handle_to_buddy(handle);
1033         switch (buddy) {
1034         case FIRST:
1035                 addr += ZHDR_SIZE_ALIGNED;
1036                 break;
1037         case MIDDLE:
1038                 addr += zhdr->start_middle << CHUNK_SHIFT;
1039                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1040                 break;
1041         case LAST:
1042                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1043                 break;
1044         default:
1045                 pr_err("unknown buddy id %d\n", buddy);
1046                 WARN_ON(1);
1047                 addr = NULL;
1048                 break;
1049         }
1050
1051         z3fold_page_unlock(zhdr);
1052 out:
1053         return addr;
1054 }
1055
1056 /**
1057  * z3fold_unmap() - unmaps the allocation associated with the given handle
1058  * @pool:       pool in which the allocation resides
1059  * @handle:     handle associated with the allocation to be unmapped
1060  */
1061 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1062 {
1063         struct z3fold_header *zhdr;
1064         struct page *page;
1065         enum buddy buddy;
1066
1067         zhdr = handle_to_z3fold_header(handle);
1068         page = virt_to_page(zhdr);
1069
1070         if (test_bit(PAGE_HEADLESS, &page->private))
1071                 return;
1072
1073         z3fold_page_lock(zhdr);
1074         buddy = handle_to_buddy(handle);
1075         if (buddy == MIDDLE)
1076                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1077         z3fold_page_unlock(zhdr);
1078 }
1079
1080 /**
1081  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1082  * @pool:       pool whose size is being queried
1083  *
1084  * Returns: size in pages of the given pool.
1085  */
1086 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1087 {
1088         return atomic64_read(&pool->pages_nr);
1089 }
1090
1091 /*****************
1092  * zpool
1093  ****************/
1094
1095 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1096 {
1097         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1098                 return pool->zpool_ops->evict(pool->zpool, handle);
1099         else
1100                 return -ENOENT;
1101 }
1102
1103 static const struct z3fold_ops z3fold_zpool_ops = {
1104         .evict =        z3fold_zpool_evict
1105 };
1106
1107 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1108                                const struct zpool_ops *zpool_ops,
1109                                struct zpool *zpool)
1110 {
1111         struct z3fold_pool *pool;
1112
1113         pool = z3fold_create_pool(name, gfp,
1114                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1115         if (pool) {
1116                 pool->zpool = zpool;
1117                 pool->zpool_ops = zpool_ops;
1118         }
1119         return pool;
1120 }
1121
1122 static void z3fold_zpool_destroy(void *pool)
1123 {
1124         z3fold_destroy_pool(pool);
1125 }
1126
1127 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1128                         unsigned long *handle)
1129 {
1130         return z3fold_alloc(pool, size, gfp, handle);
1131 }
1132 static void z3fold_zpool_free(void *pool, unsigned long handle)
1133 {
1134         z3fold_free(pool, handle);
1135 }
1136
1137 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1138                         unsigned int *reclaimed)
1139 {
1140         unsigned int total = 0;
1141         int ret = -EINVAL;
1142
1143         while (total < pages) {
1144                 ret = z3fold_reclaim_page(pool, 8);
1145                 if (ret < 0)
1146                         break;
1147                 total++;
1148         }
1149
1150         if (reclaimed)
1151                 *reclaimed = total;
1152
1153         return ret;
1154 }
1155
1156 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1157                         enum zpool_mapmode mm)
1158 {
1159         return z3fold_map(pool, handle);
1160 }
1161 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1162 {
1163         z3fold_unmap(pool, handle);
1164 }
1165
1166 static u64 z3fold_zpool_total_size(void *pool)
1167 {
1168         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1169 }
1170
1171 static struct zpool_driver z3fold_zpool_driver = {
1172         .type =         "z3fold",
1173         .owner =        THIS_MODULE,
1174         .create =       z3fold_zpool_create,
1175         .destroy =      z3fold_zpool_destroy,
1176         .malloc =       z3fold_zpool_malloc,
1177         .free =         z3fold_zpool_free,
1178         .shrink =       z3fold_zpool_shrink,
1179         .map =          z3fold_zpool_map,
1180         .unmap =        z3fold_zpool_unmap,
1181         .total_size =   z3fold_zpool_total_size,
1182 };
1183
1184 MODULE_ALIAS("zpool-z3fold");
1185
1186 static int __init init_z3fold(void)
1187 {
1188         /* Make sure the z3fold header is not larger than the page size */
1189         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1190         zpool_register_driver(&z3fold_zpool_driver);
1191
1192         return 0;
1193 }
1194
1195 static void __exit exit_z3fold(void)
1196 {
1197         zpool_unregister_driver(&z3fold_zpool_driver);
1198 }
1199
1200 module_init(init_z3fold);
1201 module_exit(exit_z3fold);
1202
1203 MODULE_LICENSE("GPL");
1204 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1205 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");