bebc10083f1c6123e93f8ee9ba60053774b0482c
[sfrench/cifs-2.6.git] / mm / z3fold.c
1 /*
2  * z3fold.c
3  *
4  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5  * Copyright (C) 2016, Sony Mobile Communications Inc.
6  *
7  * This implementation is based on zbud written by Seth Jennings.
8  *
9  * z3fold is an special purpose allocator for storing compressed pages. It
10  * can store up to three compressed pages per page which improves the
11  * compression ratio of zbud while retaining its main concepts (e. g. always
12  * storing an integral number of objects per page) and simplicity.
13  * It still has simple and deterministic reclaim properties that make it
14  * preferable to a higher density approach (with no requirement on integral
15  * number of object per page) when reclaim is used.
16  *
17  * As in zbud, pages are divided into "chunks".  The size of the chunks is
18  * fixed at compile time and is determined by NCHUNKS_ORDER below.
19  *
20  * z3fold doesn't export any API and is meant to be used via zpool API.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
36
37 /*
38  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
39  * adjusting internal fragmentation.  It also determines the number of
40  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
41  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
42  * in the beginning of an allocated page are occupied by z3fold header, so
43  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
44  * which shows the max number of free chunks in z3fold page, also there will
45  * be 63, or 62, respectively, freelists per pool.
46  */
47 #define NCHUNKS_ORDER   6
48
49 #define CHUNK_SHIFT     (PAGE_SHIFT - NCHUNKS_ORDER)
50 #define CHUNK_SIZE      (1 << CHUNK_SHIFT)
51 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
52 #define ZHDR_CHUNKS     (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
53 #define TOTAL_CHUNKS    (PAGE_SIZE >> CHUNK_SHIFT)
54 #define NCHUNKS         ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
55
56 #define BUDDY_MASK      (0x3)
57 #define BUDDY_SHIFT     2
58 #define SLOTS_ALIGN     (0x40)
59
60 /*****************
61  * Structures
62 *****************/
63 struct z3fold_pool;
64 struct z3fold_ops {
65         int (*evict)(struct z3fold_pool *pool, unsigned long handle);
66 };
67
68 enum buddy {
69         HEADLESS = 0,
70         FIRST,
71         MIDDLE,
72         LAST,
73         BUDDIES_MAX = LAST
74 };
75
76 struct z3fold_buddy_slots {
77         /*
78          * we are using BUDDY_MASK in handle_to_buddy etc. so there should
79          * be enough slots to hold all possible variants
80          */
81         unsigned long slot[BUDDY_MASK + 1];
82         unsigned long pool; /* back link + flags */
83 };
84 #define HANDLE_FLAG_MASK        (0x03)
85
86 /*
87  * struct z3fold_header - z3fold page metadata occupying first chunks of each
88  *                      z3fold page, except for HEADLESS pages
89  * @buddy:              links the z3fold page into the relevant list in the
90  *                      pool
91  * @page_lock:          per-page lock
92  * @refcount:           reference count for the z3fold page
93  * @work:               work_struct for page layout optimization
94  * @slots:              pointer to the structure holding buddy slots
95  * @cpu:                CPU which this page "belongs" to
96  * @first_chunks:       the size of the first buddy in chunks, 0 if free
97  * @middle_chunks:      the size of the middle buddy in chunks, 0 if free
98  * @last_chunks:        the size of the last buddy in chunks, 0 if free
99  * @first_num:          the starting number (for the first handle)
100  */
101 struct z3fold_header {
102         struct list_head buddy;
103         spinlock_t page_lock;
104         struct kref refcount;
105         struct work_struct work;
106         struct z3fold_buddy_slots *slots;
107         short cpu;
108         unsigned short first_chunks;
109         unsigned short middle_chunks;
110         unsigned short last_chunks;
111         unsigned short start_middle;
112         unsigned short first_num:2;
113 };
114
115 /**
116  * struct z3fold_pool - stores metadata for each z3fold pool
117  * @name:       pool name
118  * @lock:       protects pool unbuddied/lru lists
119  * @stale_lock: protects pool stale page list
120  * @unbuddied:  per-cpu array of lists tracking z3fold pages that contain 2-
121  *              buddies; the list each z3fold page is added to depends on
122  *              the size of its free region.
123  * @lru:        list tracking the z3fold pages in LRU order by most recently
124  *              added buddy.
125  * @stale:      list of pages marked for freeing
126  * @pages_nr:   number of z3fold pages in the pool.
127  * @c_handle:   cache for z3fold_buddy_slots allocation
128  * @ops:        pointer to a structure of user defined operations specified at
129  *              pool creation time.
130  * @compact_wq: workqueue for page layout background optimization
131  * @release_wq: workqueue for safe page release
132  * @work:       work_struct for safe page release
133  *
134  * This structure is allocated at pool creation time and maintains metadata
135  * pertaining to a particular z3fold pool.
136  */
137 struct z3fold_pool {
138         const char *name;
139         spinlock_t lock;
140         spinlock_t stale_lock;
141         struct list_head *unbuddied;
142         struct list_head lru;
143         struct list_head stale;
144         atomic64_t pages_nr;
145         struct kmem_cache *c_handle;
146         const struct z3fold_ops *ops;
147         struct zpool *zpool;
148         const struct zpool_ops *zpool_ops;
149         struct workqueue_struct *compact_wq;
150         struct workqueue_struct *release_wq;
151         struct work_struct work;
152 };
153
154 /*
155  * Internal z3fold page flags
156  */
157 enum z3fold_page_flags {
158         PAGE_HEADLESS = 0,
159         MIDDLE_CHUNK_MAPPED,
160         NEEDS_COMPACTING,
161         PAGE_STALE,
162         PAGE_CLAIMED, /* by either reclaim or free */
163 };
164
165 /*****************
166  * Helpers
167 *****************/
168
169 /* Converts an allocation size in bytes to size in z3fold chunks */
170 static int size_to_chunks(size_t size)
171 {
172         return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
173 }
174
175 #define for_each_unbuddied_list(_iter, _begin) \
176         for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
177
178 static void compact_page_work(struct work_struct *w);
179
180 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
181 {
182         struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
183                                                         GFP_KERNEL);
184
185         if (slots) {
186                 memset(slots->slot, 0, sizeof(slots->slot));
187                 slots->pool = (unsigned long)pool;
188         }
189
190         return slots;
191 }
192
193 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
194 {
195         return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
196 }
197
198 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
199 {
200         return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
201 }
202
203 static inline void free_handle(unsigned long handle)
204 {
205         struct z3fold_buddy_slots *slots;
206         int i;
207         bool is_free;
208
209         if (handle & (1 << PAGE_HEADLESS))
210                 return;
211
212         WARN_ON(*(unsigned long *)handle == 0);
213         *(unsigned long *)handle = 0;
214         slots = handle_to_slots(handle);
215         is_free = true;
216         for (i = 0; i <= BUDDY_MASK; i++) {
217                 if (slots->slot[i]) {
218                         is_free = false;
219                         break;
220                 }
221         }
222
223         if (is_free) {
224                 struct z3fold_pool *pool = slots_to_pool(slots);
225
226                 kmem_cache_free(pool->c_handle, slots);
227         }
228 }
229
230 /* Initializes the z3fold header of a newly allocated z3fold page */
231 static struct z3fold_header *init_z3fold_page(struct page *page,
232                                         struct z3fold_pool *pool)
233 {
234         struct z3fold_header *zhdr = page_address(page);
235         struct z3fold_buddy_slots *slots = alloc_slots(pool);
236
237         if (!slots)
238                 return NULL;
239
240         INIT_LIST_HEAD(&page->lru);
241         clear_bit(PAGE_HEADLESS, &page->private);
242         clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
243         clear_bit(NEEDS_COMPACTING, &page->private);
244         clear_bit(PAGE_STALE, &page->private);
245         clear_bit(PAGE_CLAIMED, &page->private);
246
247         spin_lock_init(&zhdr->page_lock);
248         kref_init(&zhdr->refcount);
249         zhdr->first_chunks = 0;
250         zhdr->middle_chunks = 0;
251         zhdr->last_chunks = 0;
252         zhdr->first_num = 0;
253         zhdr->start_middle = 0;
254         zhdr->cpu = -1;
255         zhdr->slots = slots;
256         INIT_LIST_HEAD(&zhdr->buddy);
257         INIT_WORK(&zhdr->work, compact_page_work);
258         return zhdr;
259 }
260
261 /* Resets the struct page fields and frees the page */
262 static void free_z3fold_page(struct page *page)
263 {
264         __free_page(page);
265 }
266
267 /* Lock a z3fold page */
268 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
269 {
270         spin_lock(&zhdr->page_lock);
271 }
272
273 /* Try to lock a z3fold page */
274 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
275 {
276         return spin_trylock(&zhdr->page_lock);
277 }
278
279 /* Unlock a z3fold page */
280 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
281 {
282         spin_unlock(&zhdr->page_lock);
283 }
284
285 /* Helper function to build the index */
286 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
287 {
288         return (bud + zhdr->first_num) & BUDDY_MASK;
289 }
290
291 /*
292  * Encodes the handle of a particular buddy within a z3fold page
293  * Pool lock should be held as this function accesses first_num
294  */
295 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
296 {
297         struct z3fold_buddy_slots *slots;
298         unsigned long h = (unsigned long)zhdr;
299         int idx = 0;
300
301         /*
302          * For a headless page, its handle is its pointer with the extra
303          * PAGE_HEADLESS bit set
304          */
305         if (bud == HEADLESS)
306                 return h | (1 << PAGE_HEADLESS);
307
308         /* otherwise, return pointer to encoded handle */
309         idx = __idx(zhdr, bud);
310         h += idx;
311         if (bud == LAST)
312                 h |= (zhdr->last_chunks << BUDDY_SHIFT);
313
314         slots = zhdr->slots;
315         slots->slot[idx] = h;
316         return (unsigned long)&slots->slot[idx];
317 }
318
319 /* Returns the z3fold page where a given handle is stored */
320 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
321 {
322         unsigned long addr = handle;
323
324         if (!(addr & (1 << PAGE_HEADLESS)))
325                 addr = *(unsigned long *)handle;
326
327         return (struct z3fold_header *)(addr & PAGE_MASK);
328 }
329
330 /* only for LAST bud, returns zero otherwise */
331 static unsigned short handle_to_chunks(unsigned long handle)
332 {
333         unsigned long addr = *(unsigned long *)handle;
334
335         return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
336 }
337
338 /*
339  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
340  *  but that doesn't matter. because the masking will result in the
341  *  correct buddy number.
342  */
343 static enum buddy handle_to_buddy(unsigned long handle)
344 {
345         struct z3fold_header *zhdr;
346         unsigned long addr;
347
348         WARN_ON(handle & (1 << PAGE_HEADLESS));
349         addr = *(unsigned long *)handle;
350         zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
351         return (addr - zhdr->first_num) & BUDDY_MASK;
352 }
353
354 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
355 {
356         return slots_to_pool(zhdr->slots);
357 }
358
359 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
360 {
361         struct page *page = virt_to_page(zhdr);
362         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
363
364         WARN_ON(!list_empty(&zhdr->buddy));
365         set_bit(PAGE_STALE, &page->private);
366         clear_bit(NEEDS_COMPACTING, &page->private);
367         spin_lock(&pool->lock);
368         if (!list_empty(&page->lru))
369                 list_del(&page->lru);
370         spin_unlock(&pool->lock);
371         if (locked)
372                 z3fold_page_unlock(zhdr);
373         spin_lock(&pool->stale_lock);
374         list_add(&zhdr->buddy, &pool->stale);
375         queue_work(pool->release_wq, &pool->work);
376         spin_unlock(&pool->stale_lock);
377 }
378
379 static void __attribute__((__unused__))
380                         release_z3fold_page(struct kref *ref)
381 {
382         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
383                                                 refcount);
384         __release_z3fold_page(zhdr, false);
385 }
386
387 static void release_z3fold_page_locked(struct kref *ref)
388 {
389         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
390                                                 refcount);
391         WARN_ON(z3fold_page_trylock(zhdr));
392         __release_z3fold_page(zhdr, true);
393 }
394
395 static void release_z3fold_page_locked_list(struct kref *ref)
396 {
397         struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
398                                                refcount);
399         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
400         spin_lock(&pool->lock);
401         list_del_init(&zhdr->buddy);
402         spin_unlock(&pool->lock);
403
404         WARN_ON(z3fold_page_trylock(zhdr));
405         __release_z3fold_page(zhdr, true);
406 }
407
408 static void free_pages_work(struct work_struct *w)
409 {
410         struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
411
412         spin_lock(&pool->stale_lock);
413         while (!list_empty(&pool->stale)) {
414                 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
415                                                 struct z3fold_header, buddy);
416                 struct page *page = virt_to_page(zhdr);
417
418                 list_del(&zhdr->buddy);
419                 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
420                         continue;
421                 spin_unlock(&pool->stale_lock);
422                 cancel_work_sync(&zhdr->work);
423                 free_z3fold_page(page);
424                 cond_resched();
425                 spin_lock(&pool->stale_lock);
426         }
427         spin_unlock(&pool->stale_lock);
428 }
429
430 /*
431  * Returns the number of free chunks in a z3fold page.
432  * NB: can't be used with HEADLESS pages.
433  */
434 static int num_free_chunks(struct z3fold_header *zhdr)
435 {
436         int nfree;
437         /*
438          * If there is a middle object, pick up the bigger free space
439          * either before or after it. Otherwise just subtract the number
440          * of chunks occupied by the first and the last objects.
441          */
442         if (zhdr->middle_chunks != 0) {
443                 int nfree_before = zhdr->first_chunks ?
444                         0 : zhdr->start_middle - ZHDR_CHUNKS;
445                 int nfree_after = zhdr->last_chunks ?
446                         0 : TOTAL_CHUNKS -
447                                 (zhdr->start_middle + zhdr->middle_chunks);
448                 nfree = max(nfree_before, nfree_after);
449         } else
450                 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
451         return nfree;
452 }
453
454 /* Add to the appropriate unbuddied list */
455 static inline void add_to_unbuddied(struct z3fold_pool *pool,
456                                 struct z3fold_header *zhdr)
457 {
458         if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
459                         zhdr->middle_chunks == 0) {
460                 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
461
462                 int freechunks = num_free_chunks(zhdr);
463                 spin_lock(&pool->lock);
464                 list_add(&zhdr->buddy, &unbuddied[freechunks]);
465                 spin_unlock(&pool->lock);
466                 zhdr->cpu = smp_processor_id();
467                 put_cpu_ptr(pool->unbuddied);
468         }
469 }
470
471 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
472                                 unsigned short dst_chunk)
473 {
474         void *beg = zhdr;
475         return memmove(beg + (dst_chunk << CHUNK_SHIFT),
476                        beg + (zhdr->start_middle << CHUNK_SHIFT),
477                        zhdr->middle_chunks << CHUNK_SHIFT);
478 }
479
480 #define BIG_CHUNK_GAP   3
481 /* Has to be called with lock held */
482 static int z3fold_compact_page(struct z3fold_header *zhdr)
483 {
484         struct page *page = virt_to_page(zhdr);
485
486         if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
487                 return 0; /* can't move middle chunk, it's used */
488
489         if (zhdr->middle_chunks == 0)
490                 return 0; /* nothing to compact */
491
492         if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
493                 /* move to the beginning */
494                 mchunk_memmove(zhdr, ZHDR_CHUNKS);
495                 zhdr->first_chunks = zhdr->middle_chunks;
496                 zhdr->middle_chunks = 0;
497                 zhdr->start_middle = 0;
498                 zhdr->first_num++;
499                 return 1;
500         }
501
502         /*
503          * moving data is expensive, so let's only do that if
504          * there's substantial gain (at least BIG_CHUNK_GAP chunks)
505          */
506         if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
507             zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
508                         BIG_CHUNK_GAP) {
509                 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
510                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
511                 return 1;
512         } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
513                    TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
514                                         + zhdr->middle_chunks) >=
515                         BIG_CHUNK_GAP) {
516                 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
517                         zhdr->middle_chunks;
518                 mchunk_memmove(zhdr, new_start);
519                 zhdr->start_middle = new_start;
520                 return 1;
521         }
522
523         return 0;
524 }
525
526 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
527 {
528         struct z3fold_pool *pool = zhdr_to_pool(zhdr);
529         struct page *page;
530
531         page = virt_to_page(zhdr);
532         if (locked)
533                 WARN_ON(z3fold_page_trylock(zhdr));
534         else
535                 z3fold_page_lock(zhdr);
536         if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
537                 z3fold_page_unlock(zhdr);
538                 return;
539         }
540         spin_lock(&pool->lock);
541         list_del_init(&zhdr->buddy);
542         spin_unlock(&pool->lock);
543
544         if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
545                 atomic64_dec(&pool->pages_nr);
546                 return;
547         }
548
549         z3fold_compact_page(zhdr);
550         add_to_unbuddied(pool, zhdr);
551         z3fold_page_unlock(zhdr);
552 }
553
554 static void compact_page_work(struct work_struct *w)
555 {
556         struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
557                                                 work);
558
559         do_compact_page(zhdr, false);
560 }
561
562 /* returns _locked_ z3fold page header or NULL */
563 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
564                                                 size_t size, bool can_sleep)
565 {
566         struct z3fold_header *zhdr = NULL;
567         struct page *page;
568         struct list_head *unbuddied;
569         int chunks = size_to_chunks(size), i;
570
571 lookup:
572         /* First, try to find an unbuddied z3fold page. */
573         unbuddied = get_cpu_ptr(pool->unbuddied);
574         for_each_unbuddied_list(i, chunks) {
575                 struct list_head *l = &unbuddied[i];
576
577                 zhdr = list_first_entry_or_null(READ_ONCE(l),
578                                         struct z3fold_header, buddy);
579
580                 if (!zhdr)
581                         continue;
582
583                 /* Re-check under lock. */
584                 spin_lock(&pool->lock);
585                 l = &unbuddied[i];
586                 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
587                                                 struct z3fold_header, buddy)) ||
588                     !z3fold_page_trylock(zhdr)) {
589                         spin_unlock(&pool->lock);
590                         zhdr = NULL;
591                         put_cpu_ptr(pool->unbuddied);
592                         if (can_sleep)
593                                 cond_resched();
594                         goto lookup;
595                 }
596                 list_del_init(&zhdr->buddy);
597                 zhdr->cpu = -1;
598                 spin_unlock(&pool->lock);
599
600                 page = virt_to_page(zhdr);
601                 if (test_bit(NEEDS_COMPACTING, &page->private)) {
602                         z3fold_page_unlock(zhdr);
603                         zhdr = NULL;
604                         put_cpu_ptr(pool->unbuddied);
605                         if (can_sleep)
606                                 cond_resched();
607                         goto lookup;
608                 }
609
610                 /*
611                  * this page could not be removed from its unbuddied
612                  * list while pool lock was held, and then we've taken
613                  * page lock so kref_put could not be called before
614                  * we got here, so it's safe to just call kref_get()
615                  */
616                 kref_get(&zhdr->refcount);
617                 break;
618         }
619         put_cpu_ptr(pool->unbuddied);
620
621         if (!zhdr) {
622                 int cpu;
623
624                 /* look for _exact_ match on other cpus' lists */
625                 for_each_online_cpu(cpu) {
626                         struct list_head *l;
627
628                         unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
629                         spin_lock(&pool->lock);
630                         l = &unbuddied[chunks];
631
632                         zhdr = list_first_entry_or_null(READ_ONCE(l),
633                                                 struct z3fold_header, buddy);
634
635                         if (!zhdr || !z3fold_page_trylock(zhdr)) {
636                                 spin_unlock(&pool->lock);
637                                 zhdr = NULL;
638                                 continue;
639                         }
640                         list_del_init(&zhdr->buddy);
641                         zhdr->cpu = -1;
642                         spin_unlock(&pool->lock);
643
644                         page = virt_to_page(zhdr);
645                         if (test_bit(NEEDS_COMPACTING, &page->private)) {
646                                 z3fold_page_unlock(zhdr);
647                                 zhdr = NULL;
648                                 if (can_sleep)
649                                         cond_resched();
650                                 continue;
651                         }
652                         kref_get(&zhdr->refcount);
653                         break;
654                 }
655         }
656
657         return zhdr;
658 }
659
660 /*
661  * API Functions
662  */
663
664 /**
665  * z3fold_create_pool() - create a new z3fold pool
666  * @name:       pool name
667  * @gfp:        gfp flags when allocating the z3fold pool structure
668  * @ops:        user-defined operations for the z3fold pool
669  *
670  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
671  * failed.
672  */
673 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
674                 const struct z3fold_ops *ops)
675 {
676         struct z3fold_pool *pool = NULL;
677         int i, cpu;
678
679         pool = kzalloc(sizeof(struct z3fold_pool), gfp);
680         if (!pool)
681                 goto out;
682         pool->c_handle = kmem_cache_create("z3fold_handle",
683                                 sizeof(struct z3fold_buddy_slots),
684                                 SLOTS_ALIGN, 0, NULL);
685         if (!pool->c_handle)
686                 goto out_c;
687         spin_lock_init(&pool->lock);
688         spin_lock_init(&pool->stale_lock);
689         pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
690         if (!pool->unbuddied)
691                 goto out_pool;
692         for_each_possible_cpu(cpu) {
693                 struct list_head *unbuddied =
694                                 per_cpu_ptr(pool->unbuddied, cpu);
695                 for_each_unbuddied_list(i, 0)
696                         INIT_LIST_HEAD(&unbuddied[i]);
697         }
698         INIT_LIST_HEAD(&pool->lru);
699         INIT_LIST_HEAD(&pool->stale);
700         atomic64_set(&pool->pages_nr, 0);
701         pool->name = name;
702         pool->compact_wq = create_singlethread_workqueue(pool->name);
703         if (!pool->compact_wq)
704                 goto out_unbuddied;
705         pool->release_wq = create_singlethread_workqueue(pool->name);
706         if (!pool->release_wq)
707                 goto out_wq;
708         INIT_WORK(&pool->work, free_pages_work);
709         pool->ops = ops;
710         return pool;
711
712 out_wq:
713         destroy_workqueue(pool->compact_wq);
714 out_unbuddied:
715         free_percpu(pool->unbuddied);
716 out_pool:
717         kmem_cache_destroy(pool->c_handle);
718 out_c:
719         kfree(pool);
720 out:
721         return NULL;
722 }
723
724 /**
725  * z3fold_destroy_pool() - destroys an existing z3fold pool
726  * @pool:       the z3fold pool to be destroyed
727  *
728  * The pool should be emptied before this function is called.
729  */
730 static void z3fold_destroy_pool(struct z3fold_pool *pool)
731 {
732         kmem_cache_destroy(pool->c_handle);
733         destroy_workqueue(pool->release_wq);
734         destroy_workqueue(pool->compact_wq);
735         kfree(pool);
736 }
737
738 /**
739  * z3fold_alloc() - allocates a region of a given size
740  * @pool:       z3fold pool from which to allocate
741  * @size:       size in bytes of the desired allocation
742  * @gfp:        gfp flags used if the pool needs to grow
743  * @handle:     handle of the new allocation
744  *
745  * This function will attempt to find a free region in the pool large enough to
746  * satisfy the allocation request.  A search of the unbuddied lists is
747  * performed first. If no suitable free region is found, then a new page is
748  * allocated and added to the pool to satisfy the request.
749  *
750  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
751  * as z3fold pool pages.
752  *
753  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
754  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
755  * a new page.
756  */
757 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
758                         unsigned long *handle)
759 {
760         int chunks = size_to_chunks(size);
761         struct z3fold_header *zhdr = NULL;
762         struct page *page = NULL;
763         enum buddy bud;
764         bool can_sleep = gfpflags_allow_blocking(gfp);
765
766         if (!size || (gfp & __GFP_HIGHMEM))
767                 return -EINVAL;
768
769         if (size > PAGE_SIZE)
770                 return -ENOSPC;
771
772         if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
773                 bud = HEADLESS;
774         else {
775 retry:
776                 zhdr = __z3fold_alloc(pool, size, can_sleep);
777                 if (zhdr) {
778                         if (zhdr->first_chunks == 0) {
779                                 if (zhdr->middle_chunks != 0 &&
780                                     chunks >= zhdr->start_middle)
781                                         bud = LAST;
782                                 else
783                                         bud = FIRST;
784                         } else if (zhdr->last_chunks == 0)
785                                 bud = LAST;
786                         else if (zhdr->middle_chunks == 0)
787                                 bud = MIDDLE;
788                         else {
789                                 if (kref_put(&zhdr->refcount,
790                                              release_z3fold_page_locked))
791                                         atomic64_dec(&pool->pages_nr);
792                                 else
793                                         z3fold_page_unlock(zhdr);
794                                 pr_err("No free chunks in unbuddied\n");
795                                 WARN_ON(1);
796                                 goto retry;
797                         }
798                         page = virt_to_page(zhdr);
799                         goto found;
800                 }
801                 bud = FIRST;
802         }
803
804         page = NULL;
805         if (can_sleep) {
806                 spin_lock(&pool->stale_lock);
807                 zhdr = list_first_entry_or_null(&pool->stale,
808                                                 struct z3fold_header, buddy);
809                 /*
810                  * Before allocating a page, let's see if we can take one from
811                  * the stale pages list. cancel_work_sync() can sleep so we
812                  * limit this case to the contexts where we can sleep
813                  */
814                 if (zhdr) {
815                         list_del(&zhdr->buddy);
816                         spin_unlock(&pool->stale_lock);
817                         cancel_work_sync(&zhdr->work);
818                         page = virt_to_page(zhdr);
819                 } else {
820                         spin_unlock(&pool->stale_lock);
821                 }
822         }
823         if (!page)
824                 page = alloc_page(gfp);
825
826         if (!page)
827                 return -ENOMEM;
828
829         zhdr = init_z3fold_page(page, pool);
830         if (!zhdr) {
831                 __free_page(page);
832                 return -ENOMEM;
833         }
834         atomic64_inc(&pool->pages_nr);
835
836         if (bud == HEADLESS) {
837                 set_bit(PAGE_HEADLESS, &page->private);
838                 goto headless;
839         }
840         z3fold_page_lock(zhdr);
841
842 found:
843         if (bud == FIRST)
844                 zhdr->first_chunks = chunks;
845         else if (bud == LAST)
846                 zhdr->last_chunks = chunks;
847         else {
848                 zhdr->middle_chunks = chunks;
849                 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
850         }
851         add_to_unbuddied(pool, zhdr);
852
853 headless:
854         spin_lock(&pool->lock);
855         /* Add/move z3fold page to beginning of LRU */
856         if (!list_empty(&page->lru))
857                 list_del(&page->lru);
858
859         list_add(&page->lru, &pool->lru);
860
861         *handle = encode_handle(zhdr, bud);
862         spin_unlock(&pool->lock);
863         if (bud != HEADLESS)
864                 z3fold_page_unlock(zhdr);
865
866         return 0;
867 }
868
869 /**
870  * z3fold_free() - frees the allocation associated with the given handle
871  * @pool:       pool in which the allocation resided
872  * @handle:     handle associated with the allocation returned by z3fold_alloc()
873  *
874  * In the case that the z3fold page in which the allocation resides is under
875  * reclaim, as indicated by the PG_reclaim flag being set, this function
876  * only sets the first|last_chunks to 0.  The page is actually freed
877  * once both buddies are evicted (see z3fold_reclaim_page() below).
878  */
879 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
880 {
881         struct z3fold_header *zhdr;
882         struct page *page;
883         enum buddy bud;
884
885         zhdr = handle_to_z3fold_header(handle);
886         page = virt_to_page(zhdr);
887
888         if (test_bit(PAGE_HEADLESS, &page->private)) {
889                 /* if a headless page is under reclaim, just leave.
890                  * NB: we use test_and_set_bit for a reason: if the bit
891                  * has not been set before, we release this page
892                  * immediately so we don't care about its value any more.
893                  */
894                 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
895                         spin_lock(&pool->lock);
896                         list_del(&page->lru);
897                         spin_unlock(&pool->lock);
898                         free_z3fold_page(page);
899                         atomic64_dec(&pool->pages_nr);
900                 }
901                 return;
902         }
903
904         /* Non-headless case */
905         z3fold_page_lock(zhdr);
906         bud = handle_to_buddy(handle);
907
908         switch (bud) {
909         case FIRST:
910                 zhdr->first_chunks = 0;
911                 break;
912         case MIDDLE:
913                 zhdr->middle_chunks = 0;
914                 break;
915         case LAST:
916                 zhdr->last_chunks = 0;
917                 break;
918         default:
919                 pr_err("%s: unknown bud %d\n", __func__, bud);
920                 WARN_ON(1);
921                 z3fold_page_unlock(zhdr);
922                 return;
923         }
924
925         free_handle(handle);
926         if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
927                 atomic64_dec(&pool->pages_nr);
928                 return;
929         }
930         if (test_bit(PAGE_CLAIMED, &page->private)) {
931                 z3fold_page_unlock(zhdr);
932                 return;
933         }
934         if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
935                 z3fold_page_unlock(zhdr);
936                 return;
937         }
938         if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
939                 spin_lock(&pool->lock);
940                 list_del_init(&zhdr->buddy);
941                 spin_unlock(&pool->lock);
942                 zhdr->cpu = -1;
943                 kref_get(&zhdr->refcount);
944                 do_compact_page(zhdr, true);
945                 return;
946         }
947         kref_get(&zhdr->refcount);
948         queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
949         z3fold_page_unlock(zhdr);
950 }
951
952 /**
953  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
954  * @pool:       pool from which a page will attempt to be evicted
955  * @retries:    number of pages on the LRU list for which eviction will
956  *              be attempted before failing
957  *
958  * z3fold reclaim is different from normal system reclaim in that it is done
959  * from the bottom, up. This is because only the bottom layer, z3fold, has
960  * information on how the allocations are organized within each z3fold page.
961  * This has the potential to create interesting locking situations between
962  * z3fold and the user, however.
963  *
964  * To avoid these, this is how z3fold_reclaim_page() should be called:
965  *
966  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
967  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
968  * call the user-defined eviction handler with the pool and handle as
969  * arguments.
970  *
971  * If the handle can not be evicted, the eviction handler should return
972  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
973  * appropriate list and try the next z3fold page on the LRU up to
974  * a user defined number of retries.
975  *
976  * If the handle is successfully evicted, the eviction handler should
977  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
978  * contains logic to delay freeing the page if the page is under reclaim,
979  * as indicated by the setting of the PG_reclaim flag on the underlying page.
980  *
981  * If all buddies in the z3fold page are successfully evicted, then the
982  * z3fold page can be freed.
983  *
984  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
985  * no pages to evict or an eviction handler is not registered, -EAGAIN if
986  * the retry limit was hit.
987  */
988 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
989 {
990         int i, ret = 0;
991         struct z3fold_header *zhdr = NULL;
992         struct page *page = NULL;
993         struct list_head *pos;
994         unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
995
996         spin_lock(&pool->lock);
997         if (!pool->ops || !pool->ops->evict || retries == 0) {
998                 spin_unlock(&pool->lock);
999                 return -EINVAL;
1000         }
1001         for (i = 0; i < retries; i++) {
1002                 if (list_empty(&pool->lru)) {
1003                         spin_unlock(&pool->lock);
1004                         return -EINVAL;
1005                 }
1006                 list_for_each_prev(pos, &pool->lru) {
1007                         page = list_entry(pos, struct page, lru);
1008
1009                         /* this bit could have been set by free, in which case
1010                          * we pass over to the next page in the pool.
1011                          */
1012                         if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1013                                 continue;
1014
1015                         zhdr = page_address(page);
1016                         if (test_bit(PAGE_HEADLESS, &page->private))
1017                                 break;
1018
1019                         if (!z3fold_page_trylock(zhdr)) {
1020                                 zhdr = NULL;
1021                                 continue; /* can't evict at this point */
1022                         }
1023                         kref_get(&zhdr->refcount);
1024                         list_del_init(&zhdr->buddy);
1025                         zhdr->cpu = -1;
1026                         break;
1027                 }
1028
1029                 if (!zhdr)
1030                         break;
1031
1032                 list_del_init(&page->lru);
1033                 spin_unlock(&pool->lock);
1034
1035                 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1036                         /*
1037                          * We need encode the handles before unlocking, since
1038                          * we can race with free that will set
1039                          * (first|last)_chunks to 0
1040                          */
1041                         first_handle = 0;
1042                         last_handle = 0;
1043                         middle_handle = 0;
1044                         if (zhdr->first_chunks)
1045                                 first_handle = encode_handle(zhdr, FIRST);
1046                         if (zhdr->middle_chunks)
1047                                 middle_handle = encode_handle(zhdr, MIDDLE);
1048                         if (zhdr->last_chunks)
1049                                 last_handle = encode_handle(zhdr, LAST);
1050                         /*
1051                          * it's safe to unlock here because we hold a
1052                          * reference to this page
1053                          */
1054                         z3fold_page_unlock(zhdr);
1055                 } else {
1056                         first_handle = encode_handle(zhdr, HEADLESS);
1057                         last_handle = middle_handle = 0;
1058                 }
1059
1060                 /* Issue the eviction callback(s) */
1061                 if (middle_handle) {
1062                         ret = pool->ops->evict(pool, middle_handle);
1063                         if (ret)
1064                                 goto next;
1065                 }
1066                 if (first_handle) {
1067                         ret = pool->ops->evict(pool, first_handle);
1068                         if (ret)
1069                                 goto next;
1070                 }
1071                 if (last_handle) {
1072                         ret = pool->ops->evict(pool, last_handle);
1073                         if (ret)
1074                                 goto next;
1075                 }
1076 next:
1077                 if (test_bit(PAGE_HEADLESS, &page->private)) {
1078                         if (ret == 0) {
1079                                 free_z3fold_page(page);
1080                                 atomic64_dec(&pool->pages_nr);
1081                                 return 0;
1082                         }
1083                         spin_lock(&pool->lock);
1084                         list_add(&page->lru, &pool->lru);
1085                         spin_unlock(&pool->lock);
1086                 } else {
1087                         z3fold_page_lock(zhdr);
1088                         clear_bit(PAGE_CLAIMED, &page->private);
1089                         if (kref_put(&zhdr->refcount,
1090                                         release_z3fold_page_locked)) {
1091                                 atomic64_dec(&pool->pages_nr);
1092                                 return 0;
1093                         }
1094                         /*
1095                          * if we are here, the page is still not completely
1096                          * free. Take the global pool lock then to be able
1097                          * to add it back to the lru list
1098                          */
1099                         spin_lock(&pool->lock);
1100                         list_add(&page->lru, &pool->lru);
1101                         spin_unlock(&pool->lock);
1102                         z3fold_page_unlock(zhdr);
1103                 }
1104
1105                 /* We started off locked to we need to lock the pool back */
1106                 spin_lock(&pool->lock);
1107         }
1108         spin_unlock(&pool->lock);
1109         return -EAGAIN;
1110 }
1111
1112 /**
1113  * z3fold_map() - maps the allocation associated with the given handle
1114  * @pool:       pool in which the allocation resides
1115  * @handle:     handle associated with the allocation to be mapped
1116  *
1117  * Extracts the buddy number from handle and constructs the pointer to the
1118  * correct starting chunk within the page.
1119  *
1120  * Returns: a pointer to the mapped allocation
1121  */
1122 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1123 {
1124         struct z3fold_header *zhdr;
1125         struct page *page;
1126         void *addr;
1127         enum buddy buddy;
1128
1129         zhdr = handle_to_z3fold_header(handle);
1130         addr = zhdr;
1131         page = virt_to_page(zhdr);
1132
1133         if (test_bit(PAGE_HEADLESS, &page->private))
1134                 goto out;
1135
1136         z3fold_page_lock(zhdr);
1137         buddy = handle_to_buddy(handle);
1138         switch (buddy) {
1139         case FIRST:
1140                 addr += ZHDR_SIZE_ALIGNED;
1141                 break;
1142         case MIDDLE:
1143                 addr += zhdr->start_middle << CHUNK_SHIFT;
1144                 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1145                 break;
1146         case LAST:
1147                 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1148                 break;
1149         default:
1150                 pr_err("unknown buddy id %d\n", buddy);
1151                 WARN_ON(1);
1152                 addr = NULL;
1153                 break;
1154         }
1155
1156         z3fold_page_unlock(zhdr);
1157 out:
1158         return addr;
1159 }
1160
1161 /**
1162  * z3fold_unmap() - unmaps the allocation associated with the given handle
1163  * @pool:       pool in which the allocation resides
1164  * @handle:     handle associated with the allocation to be unmapped
1165  */
1166 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1167 {
1168         struct z3fold_header *zhdr;
1169         struct page *page;
1170         enum buddy buddy;
1171
1172         zhdr = handle_to_z3fold_header(handle);
1173         page = virt_to_page(zhdr);
1174
1175         if (test_bit(PAGE_HEADLESS, &page->private))
1176                 return;
1177
1178         z3fold_page_lock(zhdr);
1179         buddy = handle_to_buddy(handle);
1180         if (buddy == MIDDLE)
1181                 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1182         z3fold_page_unlock(zhdr);
1183 }
1184
1185 /**
1186  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1187  * @pool:       pool whose size is being queried
1188  *
1189  * Returns: size in pages of the given pool.
1190  */
1191 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1192 {
1193         return atomic64_read(&pool->pages_nr);
1194 }
1195
1196 /*****************
1197  * zpool
1198  ****************/
1199
1200 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1201 {
1202         if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1203                 return pool->zpool_ops->evict(pool->zpool, handle);
1204         else
1205                 return -ENOENT;
1206 }
1207
1208 static const struct z3fold_ops z3fold_zpool_ops = {
1209         .evict =        z3fold_zpool_evict
1210 };
1211
1212 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1213                                const struct zpool_ops *zpool_ops,
1214                                struct zpool *zpool)
1215 {
1216         struct z3fold_pool *pool;
1217
1218         pool = z3fold_create_pool(name, gfp,
1219                                 zpool_ops ? &z3fold_zpool_ops : NULL);
1220         if (pool) {
1221                 pool->zpool = zpool;
1222                 pool->zpool_ops = zpool_ops;
1223         }
1224         return pool;
1225 }
1226
1227 static void z3fold_zpool_destroy(void *pool)
1228 {
1229         z3fold_destroy_pool(pool);
1230 }
1231
1232 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1233                         unsigned long *handle)
1234 {
1235         return z3fold_alloc(pool, size, gfp, handle);
1236 }
1237 static void z3fold_zpool_free(void *pool, unsigned long handle)
1238 {
1239         z3fold_free(pool, handle);
1240 }
1241
1242 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1243                         unsigned int *reclaimed)
1244 {
1245         unsigned int total = 0;
1246         int ret = -EINVAL;
1247
1248         while (total < pages) {
1249                 ret = z3fold_reclaim_page(pool, 8);
1250                 if (ret < 0)
1251                         break;
1252                 total++;
1253         }
1254
1255         if (reclaimed)
1256                 *reclaimed = total;
1257
1258         return ret;
1259 }
1260
1261 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1262                         enum zpool_mapmode mm)
1263 {
1264         return z3fold_map(pool, handle);
1265 }
1266 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1267 {
1268         z3fold_unmap(pool, handle);
1269 }
1270
1271 static u64 z3fold_zpool_total_size(void *pool)
1272 {
1273         return z3fold_get_pool_size(pool) * PAGE_SIZE;
1274 }
1275
1276 static struct zpool_driver z3fold_zpool_driver = {
1277         .type =         "z3fold",
1278         .owner =        THIS_MODULE,
1279         .create =       z3fold_zpool_create,
1280         .destroy =      z3fold_zpool_destroy,
1281         .malloc =       z3fold_zpool_malloc,
1282         .free =         z3fold_zpool_free,
1283         .shrink =       z3fold_zpool_shrink,
1284         .map =          z3fold_zpool_map,
1285         .unmap =        z3fold_zpool_unmap,
1286         .total_size =   z3fold_zpool_total_size,
1287 };
1288
1289 MODULE_ALIAS("zpool-z3fold");
1290
1291 static int __init init_z3fold(void)
1292 {
1293         /* Make sure the z3fold header is not larger than the page size */
1294         BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1295         zpool_register_driver(&z3fold_zpool_driver);
1296
1297         return 0;
1298 }
1299
1300 static void __exit exit_z3fold(void)
1301 {
1302         zpool_unregister_driver(&z3fold_zpool_driver);
1303 }
1304
1305 module_init(init_z3fold);
1306 module_exit(exit_z3fold);
1307
1308 MODULE_LICENSE("GPL");
1309 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1310 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");