4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 * This implementation is based on zbud written by Seth Jennings.
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 * z3fold doesn't export any API and is meant to be used via zpool API.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/atomic.h>
26 #include <linux/sched.h>
27 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/percpu.h>
31 #include <linux/preempt.h>
32 #include <linux/workqueue.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/zpool.h>
42 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
54 * struct z3fold_header - z3fold page metadata occupying first chunks of each
55 * z3fold page, except for HEADLESS pages
56 * @buddy: links the z3fold page into the relevant list in the
58 * @page_lock: per-page lock
59 * @refcount: reference count for the z3fold page
60 * @work: work_struct for page layout optimization
61 * @pool: pointer to the pool which this page belongs to
62 * @cpu: CPU which this page "belongs" to
63 * @first_chunks: the size of the first buddy in chunks, 0 if free
64 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
65 * @last_chunks: the size of the last buddy in chunks, 0 if free
66 * @first_num: the starting number (for the first handle)
68 struct z3fold_header {
69 struct list_head buddy;
72 struct work_struct work;
73 struct z3fold_pool *pool;
75 unsigned short first_chunks;
76 unsigned short middle_chunks;
77 unsigned short last_chunks;
78 unsigned short start_middle;
79 unsigned short first_num:2;
83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
84 * adjusting internal fragmentation. It also determines the number of
85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
87 * in the beginning of an allocated page are occupied by z3fold header, so
88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
89 * which shows the max number of free chunks in z3fold page, also there will
90 * be 63, or 62, respectively, freelists per pool.
92 #define NCHUNKS_ORDER 6
94 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
95 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
97 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
98 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
99 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
101 #define BUDDY_MASK (0x3)
102 #define BUDDY_SHIFT 2
105 * struct z3fold_pool - stores metadata for each z3fold pool
107 * @lock: protects pool unbuddied/lru lists
108 * @stale_lock: protects pool stale page list
109 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
110 * buddies; the list each z3fold page is added to depends on
111 * the size of its free region.
112 * @lru: list tracking the z3fold pages in LRU order by most recently
114 * @stale: list of pages marked for freeing
115 * @pages_nr: number of z3fold pages in the pool.
116 * @ops: pointer to a structure of user defined operations specified at
117 * pool creation time.
118 * @compact_wq: workqueue for page layout background optimization
119 * @release_wq: workqueue for safe page release
120 * @work: work_struct for safe page release
122 * This structure is allocated at pool creation time and maintains metadata
123 * pertaining to a particular z3fold pool.
128 spinlock_t stale_lock;
129 struct list_head *unbuddied;
130 struct list_head lru;
131 struct list_head stale;
133 const struct z3fold_ops *ops;
135 const struct zpool_ops *zpool_ops;
136 struct workqueue_struct *compact_wq;
137 struct workqueue_struct *release_wq;
138 struct work_struct work;
142 * Internal z3fold page flags
144 enum z3fold_page_flags {
149 PAGE_CLAIMED, /* by either reclaim or free */
156 /* Converts an allocation size in bytes to size in z3fold chunks */
157 static int size_to_chunks(size_t size)
159 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
162 #define for_each_unbuddied_list(_iter, _begin) \
163 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
165 static void compact_page_work(struct work_struct *w);
167 /* Initializes the z3fold header of a newly allocated z3fold page */
168 static struct z3fold_header *init_z3fold_page(struct page *page,
169 struct z3fold_pool *pool)
171 struct z3fold_header *zhdr = page_address(page);
173 INIT_LIST_HEAD(&page->lru);
174 clear_bit(PAGE_HEADLESS, &page->private);
175 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
176 clear_bit(NEEDS_COMPACTING, &page->private);
177 clear_bit(PAGE_STALE, &page->private);
178 clear_bit(PAGE_CLAIMED, &page->private);
180 spin_lock_init(&zhdr->page_lock);
181 kref_init(&zhdr->refcount);
182 zhdr->first_chunks = 0;
183 zhdr->middle_chunks = 0;
184 zhdr->last_chunks = 0;
186 zhdr->start_middle = 0;
189 INIT_LIST_HEAD(&zhdr->buddy);
190 INIT_WORK(&zhdr->work, compact_page_work);
194 /* Resets the struct page fields and frees the page */
195 static void free_z3fold_page(struct page *page)
200 /* Lock a z3fold page */
201 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
203 spin_lock(&zhdr->page_lock);
206 /* Try to lock a z3fold page */
207 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
209 return spin_trylock(&zhdr->page_lock);
212 /* Unlock a z3fold page */
213 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
215 spin_unlock(&zhdr->page_lock);
219 * Encodes the handle of a particular buddy within a z3fold page
220 * Pool lock should be held as this function accesses first_num
222 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
224 unsigned long handle;
226 handle = (unsigned long)zhdr;
227 if (bud != HEADLESS) {
228 handle |= (bud + zhdr->first_num) & BUDDY_MASK;
230 handle |= (zhdr->last_chunks << BUDDY_SHIFT);
235 /* Returns the z3fold page where a given handle is stored */
236 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
238 return (struct z3fold_header *)(handle & PAGE_MASK);
241 /* only for LAST bud, returns zero otherwise */
242 static unsigned short handle_to_chunks(unsigned long handle)
244 return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
248 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
249 * but that doesn't matter. because the masking will result in the
250 * correct buddy number.
252 static enum buddy handle_to_buddy(unsigned long handle)
254 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
255 return (handle - zhdr->first_num) & BUDDY_MASK;
258 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
263 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
265 struct page *page = virt_to_page(zhdr);
266 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
268 WARN_ON(!list_empty(&zhdr->buddy));
269 set_bit(PAGE_STALE, &page->private);
270 clear_bit(NEEDS_COMPACTING, &page->private);
271 spin_lock(&pool->lock);
272 if (!list_empty(&page->lru))
273 list_del(&page->lru);
274 spin_unlock(&pool->lock);
276 z3fold_page_unlock(zhdr);
277 spin_lock(&pool->stale_lock);
278 list_add(&zhdr->buddy, &pool->stale);
279 queue_work(pool->release_wq, &pool->work);
280 spin_unlock(&pool->stale_lock);
283 static void __attribute__((__unused__))
284 release_z3fold_page(struct kref *ref)
286 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
288 __release_z3fold_page(zhdr, false);
291 static void release_z3fold_page_locked(struct kref *ref)
293 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
295 WARN_ON(z3fold_page_trylock(zhdr));
296 __release_z3fold_page(zhdr, true);
299 static void release_z3fold_page_locked_list(struct kref *ref)
301 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
303 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
304 spin_lock(&pool->lock);
305 list_del_init(&zhdr->buddy);
306 spin_unlock(&pool->lock);
308 WARN_ON(z3fold_page_trylock(zhdr));
309 __release_z3fold_page(zhdr, true);
312 static void free_pages_work(struct work_struct *w)
314 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
316 spin_lock(&pool->stale_lock);
317 while (!list_empty(&pool->stale)) {
318 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
319 struct z3fold_header, buddy);
320 struct page *page = virt_to_page(zhdr);
322 list_del(&zhdr->buddy);
323 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
325 spin_unlock(&pool->stale_lock);
326 cancel_work_sync(&zhdr->work);
327 free_z3fold_page(page);
329 spin_lock(&pool->stale_lock);
331 spin_unlock(&pool->stale_lock);
335 * Returns the number of free chunks in a z3fold page.
336 * NB: can't be used with HEADLESS pages.
338 static int num_free_chunks(struct z3fold_header *zhdr)
342 * If there is a middle object, pick up the bigger free space
343 * either before or after it. Otherwise just subtract the number
344 * of chunks occupied by the first and the last objects.
346 if (zhdr->middle_chunks != 0) {
347 int nfree_before = zhdr->first_chunks ?
348 0 : zhdr->start_middle - ZHDR_CHUNKS;
349 int nfree_after = zhdr->last_chunks ?
351 (zhdr->start_middle + zhdr->middle_chunks);
352 nfree = max(nfree_before, nfree_after);
354 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
358 /* Add to the appropriate unbuddied list */
359 static inline void add_to_unbuddied(struct z3fold_pool *pool,
360 struct z3fold_header *zhdr)
362 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
363 zhdr->middle_chunks == 0) {
364 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
366 int freechunks = num_free_chunks(zhdr);
367 spin_lock(&pool->lock);
368 list_add(&zhdr->buddy, &unbuddied[freechunks]);
369 spin_unlock(&pool->lock);
370 zhdr->cpu = smp_processor_id();
371 put_cpu_ptr(pool->unbuddied);
375 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
376 unsigned short dst_chunk)
379 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
380 beg + (zhdr->start_middle << CHUNK_SHIFT),
381 zhdr->middle_chunks << CHUNK_SHIFT);
384 #define BIG_CHUNK_GAP 3
385 /* Has to be called with lock held */
386 static int z3fold_compact_page(struct z3fold_header *zhdr)
388 struct page *page = virt_to_page(zhdr);
390 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
391 return 0; /* can't move middle chunk, it's used */
393 if (zhdr->middle_chunks == 0)
394 return 0; /* nothing to compact */
396 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
397 /* move to the beginning */
398 mchunk_memmove(zhdr, ZHDR_CHUNKS);
399 zhdr->first_chunks = zhdr->middle_chunks;
400 zhdr->middle_chunks = 0;
401 zhdr->start_middle = 0;
407 * moving data is expensive, so let's only do that if
408 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
410 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
411 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
413 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
414 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
416 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
417 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
418 + zhdr->middle_chunks) >=
420 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
422 mchunk_memmove(zhdr, new_start);
423 zhdr->start_middle = new_start;
430 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
432 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
435 page = virt_to_page(zhdr);
437 WARN_ON(z3fold_page_trylock(zhdr));
439 z3fold_page_lock(zhdr);
440 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
441 z3fold_page_unlock(zhdr);
444 spin_lock(&pool->lock);
445 list_del_init(&zhdr->buddy);
446 spin_unlock(&pool->lock);
448 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
449 atomic64_dec(&pool->pages_nr);
453 z3fold_compact_page(zhdr);
454 add_to_unbuddied(pool, zhdr);
455 z3fold_page_unlock(zhdr);
458 static void compact_page_work(struct work_struct *w)
460 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
463 do_compact_page(zhdr, false);
466 /* returns _locked_ z3fold page header or NULL */
467 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
468 size_t size, bool can_sleep)
470 struct z3fold_header *zhdr = NULL;
472 struct list_head *unbuddied;
473 int chunks = size_to_chunks(size), i;
476 /* First, try to find an unbuddied z3fold page. */
477 unbuddied = get_cpu_ptr(pool->unbuddied);
478 for_each_unbuddied_list(i, chunks) {
479 struct list_head *l = &unbuddied[i];
481 zhdr = list_first_entry_or_null(READ_ONCE(l),
482 struct z3fold_header, buddy);
487 /* Re-check under lock. */
488 spin_lock(&pool->lock);
490 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
491 struct z3fold_header, buddy)) ||
492 !z3fold_page_trylock(zhdr)) {
493 spin_unlock(&pool->lock);
495 put_cpu_ptr(pool->unbuddied);
500 list_del_init(&zhdr->buddy);
502 spin_unlock(&pool->lock);
504 page = virt_to_page(zhdr);
505 if (test_bit(NEEDS_COMPACTING, &page->private)) {
506 z3fold_page_unlock(zhdr);
508 put_cpu_ptr(pool->unbuddied);
515 * this page could not be removed from its unbuddied
516 * list while pool lock was held, and then we've taken
517 * page lock so kref_put could not be called before
518 * we got here, so it's safe to just call kref_get()
520 kref_get(&zhdr->refcount);
523 put_cpu_ptr(pool->unbuddied);
528 /* look for _exact_ match on other cpus' lists */
529 for_each_online_cpu(cpu) {
532 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
533 spin_lock(&pool->lock);
534 l = &unbuddied[chunks];
536 zhdr = list_first_entry_or_null(READ_ONCE(l),
537 struct z3fold_header, buddy);
539 if (!zhdr || !z3fold_page_trylock(zhdr)) {
540 spin_unlock(&pool->lock);
544 list_del_init(&zhdr->buddy);
546 spin_unlock(&pool->lock);
548 page = virt_to_page(zhdr);
549 if (test_bit(NEEDS_COMPACTING, &page->private)) {
550 z3fold_page_unlock(zhdr);
556 kref_get(&zhdr->refcount);
569 * z3fold_create_pool() - create a new z3fold pool
571 * @gfp: gfp flags when allocating the z3fold pool structure
572 * @ops: user-defined operations for the z3fold pool
574 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
577 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
578 const struct z3fold_ops *ops)
580 struct z3fold_pool *pool = NULL;
583 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
586 spin_lock_init(&pool->lock);
587 spin_lock_init(&pool->stale_lock);
588 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
589 if (!pool->unbuddied)
591 for_each_possible_cpu(cpu) {
592 struct list_head *unbuddied =
593 per_cpu_ptr(pool->unbuddied, cpu);
594 for_each_unbuddied_list(i, 0)
595 INIT_LIST_HEAD(&unbuddied[i]);
597 INIT_LIST_HEAD(&pool->lru);
598 INIT_LIST_HEAD(&pool->stale);
599 atomic64_set(&pool->pages_nr, 0);
601 pool->compact_wq = create_singlethread_workqueue(pool->name);
602 if (!pool->compact_wq)
604 pool->release_wq = create_singlethread_workqueue(pool->name);
605 if (!pool->release_wq)
607 INIT_WORK(&pool->work, free_pages_work);
612 destroy_workqueue(pool->compact_wq);
614 free_percpu(pool->unbuddied);
622 * z3fold_destroy_pool() - destroys an existing z3fold pool
623 * @pool: the z3fold pool to be destroyed
625 * The pool should be emptied before this function is called.
627 static void z3fold_destroy_pool(struct z3fold_pool *pool)
629 destroy_workqueue(pool->release_wq);
630 destroy_workqueue(pool->compact_wq);
635 * z3fold_alloc() - allocates a region of a given size
636 * @pool: z3fold pool from which to allocate
637 * @size: size in bytes of the desired allocation
638 * @gfp: gfp flags used if the pool needs to grow
639 * @handle: handle of the new allocation
641 * This function will attempt to find a free region in the pool large enough to
642 * satisfy the allocation request. A search of the unbuddied lists is
643 * performed first. If no suitable free region is found, then a new page is
644 * allocated and added to the pool to satisfy the request.
646 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
647 * as z3fold pool pages.
649 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
650 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
653 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
654 unsigned long *handle)
656 int chunks = size_to_chunks(size);
657 struct z3fold_header *zhdr = NULL;
658 struct page *page = NULL;
660 bool can_sleep = gfpflags_allow_blocking(gfp);
662 if (!size || (gfp & __GFP_HIGHMEM))
665 if (size > PAGE_SIZE)
668 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
672 zhdr = __z3fold_alloc(pool, size, can_sleep);
674 if (zhdr->first_chunks == 0) {
675 if (zhdr->middle_chunks != 0 &&
676 chunks >= zhdr->start_middle)
680 } else if (zhdr->last_chunks == 0)
682 else if (zhdr->middle_chunks == 0)
685 if (kref_put(&zhdr->refcount,
686 release_z3fold_page_locked))
687 atomic64_dec(&pool->pages_nr);
689 z3fold_page_unlock(zhdr);
690 pr_err("No free chunks in unbuddied\n");
694 page = virt_to_page(zhdr);
702 spin_lock(&pool->stale_lock);
703 zhdr = list_first_entry_or_null(&pool->stale,
704 struct z3fold_header, buddy);
706 * Before allocating a page, let's see if we can take one from
707 * the stale pages list. cancel_work_sync() can sleep so we
708 * limit this case to the contexts where we can sleep
711 list_del(&zhdr->buddy);
712 spin_unlock(&pool->stale_lock);
713 cancel_work_sync(&zhdr->work);
714 page = virt_to_page(zhdr);
716 spin_unlock(&pool->stale_lock);
720 page = alloc_page(gfp);
725 zhdr = init_z3fold_page(page, pool);
730 atomic64_inc(&pool->pages_nr);
732 if (bud == HEADLESS) {
733 set_bit(PAGE_HEADLESS, &page->private);
736 z3fold_page_lock(zhdr);
740 zhdr->first_chunks = chunks;
741 else if (bud == LAST)
742 zhdr->last_chunks = chunks;
744 zhdr->middle_chunks = chunks;
745 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
747 add_to_unbuddied(pool, zhdr);
750 spin_lock(&pool->lock);
751 /* Add/move z3fold page to beginning of LRU */
752 if (!list_empty(&page->lru))
753 list_del(&page->lru);
755 list_add(&page->lru, &pool->lru);
757 *handle = encode_handle(zhdr, bud);
758 spin_unlock(&pool->lock);
760 z3fold_page_unlock(zhdr);
766 * z3fold_free() - frees the allocation associated with the given handle
767 * @pool: pool in which the allocation resided
768 * @handle: handle associated with the allocation returned by z3fold_alloc()
770 * In the case that the z3fold page in which the allocation resides is under
771 * reclaim, as indicated by the PG_reclaim flag being set, this function
772 * only sets the first|last_chunks to 0. The page is actually freed
773 * once both buddies are evicted (see z3fold_reclaim_page() below).
775 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
777 struct z3fold_header *zhdr;
781 zhdr = handle_to_z3fold_header(handle);
782 page = virt_to_page(zhdr);
784 if (test_bit(PAGE_HEADLESS, &page->private)) {
785 /* if a headless page is under reclaim, just leave.
786 * NB: we use test_and_set_bit for a reason: if the bit
787 * has not been set before, we release this page
788 * immediately so we don't care about its value any more.
790 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
791 spin_lock(&pool->lock);
792 list_del(&page->lru);
793 spin_unlock(&pool->lock);
794 free_z3fold_page(page);
795 atomic64_dec(&pool->pages_nr);
800 /* Non-headless case */
801 z3fold_page_lock(zhdr);
802 bud = handle_to_buddy(handle);
806 zhdr->first_chunks = 0;
809 zhdr->middle_chunks = 0;
812 zhdr->last_chunks = 0;
815 pr_err("%s: unknown bud %d\n", __func__, bud);
817 z3fold_page_unlock(zhdr);
821 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
822 atomic64_dec(&pool->pages_nr);
825 if (test_bit(PAGE_CLAIMED, &page->private)) {
826 z3fold_page_unlock(zhdr);
829 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
830 z3fold_page_unlock(zhdr);
833 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
834 spin_lock(&pool->lock);
835 list_del_init(&zhdr->buddy);
836 spin_unlock(&pool->lock);
838 kref_get(&zhdr->refcount);
839 do_compact_page(zhdr, true);
842 kref_get(&zhdr->refcount);
843 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
844 z3fold_page_unlock(zhdr);
848 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
849 * @pool: pool from which a page will attempt to be evicted
850 * @retries: number of pages on the LRU list for which eviction will
851 * be attempted before failing
853 * z3fold reclaim is different from normal system reclaim in that it is done
854 * from the bottom, up. This is because only the bottom layer, z3fold, has
855 * information on how the allocations are organized within each z3fold page.
856 * This has the potential to create interesting locking situations between
857 * z3fold and the user, however.
859 * To avoid these, this is how z3fold_reclaim_page() should be called:
861 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
862 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
863 * call the user-defined eviction handler with the pool and handle as
866 * If the handle can not be evicted, the eviction handler should return
867 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
868 * appropriate list and try the next z3fold page on the LRU up to
869 * a user defined number of retries.
871 * If the handle is successfully evicted, the eviction handler should
872 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
873 * contains logic to delay freeing the page if the page is under reclaim,
874 * as indicated by the setting of the PG_reclaim flag on the underlying page.
876 * If all buddies in the z3fold page are successfully evicted, then the
877 * z3fold page can be freed.
879 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
880 * no pages to evict or an eviction handler is not registered, -EAGAIN if
881 * the retry limit was hit.
883 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
886 struct z3fold_header *zhdr = NULL;
887 struct page *page = NULL;
888 struct list_head *pos;
889 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
891 spin_lock(&pool->lock);
892 if (!pool->ops || !pool->ops->evict || retries == 0) {
893 spin_unlock(&pool->lock);
896 for (i = 0; i < retries; i++) {
897 if (list_empty(&pool->lru)) {
898 spin_unlock(&pool->lock);
901 list_for_each_prev(pos, &pool->lru) {
902 page = list_entry(pos, struct page, lru);
904 /* this bit could have been set by free, in which case
905 * we pass over to the next page in the pool.
907 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
910 zhdr = page_address(page);
911 if (test_bit(PAGE_HEADLESS, &page->private))
914 if (!z3fold_page_trylock(zhdr)) {
916 continue; /* can't evict at this point */
918 kref_get(&zhdr->refcount);
919 list_del_init(&zhdr->buddy);
927 list_del_init(&page->lru);
928 spin_unlock(&pool->lock);
930 if (!test_bit(PAGE_HEADLESS, &page->private)) {
932 * We need encode the handles before unlocking, since
933 * we can race with free that will set
934 * (first|last)_chunks to 0
939 if (zhdr->first_chunks)
940 first_handle = encode_handle(zhdr, FIRST);
941 if (zhdr->middle_chunks)
942 middle_handle = encode_handle(zhdr, MIDDLE);
943 if (zhdr->last_chunks)
944 last_handle = encode_handle(zhdr, LAST);
946 * it's safe to unlock here because we hold a
947 * reference to this page
949 z3fold_page_unlock(zhdr);
951 first_handle = encode_handle(zhdr, HEADLESS);
952 last_handle = middle_handle = 0;
955 /* Issue the eviction callback(s) */
957 ret = pool->ops->evict(pool, middle_handle);
962 ret = pool->ops->evict(pool, first_handle);
967 ret = pool->ops->evict(pool, last_handle);
972 if (test_bit(PAGE_HEADLESS, &page->private)) {
974 free_z3fold_page(page);
975 atomic64_dec(&pool->pages_nr);
978 spin_lock(&pool->lock);
979 list_add(&page->lru, &pool->lru);
980 spin_unlock(&pool->lock);
982 z3fold_page_lock(zhdr);
983 clear_bit(PAGE_CLAIMED, &page->private);
984 if (kref_put(&zhdr->refcount,
985 release_z3fold_page_locked)) {
986 atomic64_dec(&pool->pages_nr);
990 * if we are here, the page is still not completely
991 * free. Take the global pool lock then to be able
992 * to add it back to the lru list
994 spin_lock(&pool->lock);
995 list_add(&page->lru, &pool->lru);
996 spin_unlock(&pool->lock);
997 z3fold_page_unlock(zhdr);
1000 /* We started off locked to we need to lock the pool back */
1001 spin_lock(&pool->lock);
1003 spin_unlock(&pool->lock);
1008 * z3fold_map() - maps the allocation associated with the given handle
1009 * @pool: pool in which the allocation resides
1010 * @handle: handle associated with the allocation to be mapped
1012 * Extracts the buddy number from handle and constructs the pointer to the
1013 * correct starting chunk within the page.
1015 * Returns: a pointer to the mapped allocation
1017 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1019 struct z3fold_header *zhdr;
1024 zhdr = handle_to_z3fold_header(handle);
1026 page = virt_to_page(zhdr);
1028 if (test_bit(PAGE_HEADLESS, &page->private))
1031 z3fold_page_lock(zhdr);
1032 buddy = handle_to_buddy(handle);
1035 addr += ZHDR_SIZE_ALIGNED;
1038 addr += zhdr->start_middle << CHUNK_SHIFT;
1039 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1042 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1045 pr_err("unknown buddy id %d\n", buddy);
1051 z3fold_page_unlock(zhdr);
1057 * z3fold_unmap() - unmaps the allocation associated with the given handle
1058 * @pool: pool in which the allocation resides
1059 * @handle: handle associated with the allocation to be unmapped
1061 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1063 struct z3fold_header *zhdr;
1067 zhdr = handle_to_z3fold_header(handle);
1068 page = virt_to_page(zhdr);
1070 if (test_bit(PAGE_HEADLESS, &page->private))
1073 z3fold_page_lock(zhdr);
1074 buddy = handle_to_buddy(handle);
1075 if (buddy == MIDDLE)
1076 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1077 z3fold_page_unlock(zhdr);
1081 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1082 * @pool: pool whose size is being queried
1084 * Returns: size in pages of the given pool.
1086 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1088 return atomic64_read(&pool->pages_nr);
1095 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1097 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1098 return pool->zpool_ops->evict(pool->zpool, handle);
1103 static const struct z3fold_ops z3fold_zpool_ops = {
1104 .evict = z3fold_zpool_evict
1107 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1108 const struct zpool_ops *zpool_ops,
1109 struct zpool *zpool)
1111 struct z3fold_pool *pool;
1113 pool = z3fold_create_pool(name, gfp,
1114 zpool_ops ? &z3fold_zpool_ops : NULL);
1116 pool->zpool = zpool;
1117 pool->zpool_ops = zpool_ops;
1122 static void z3fold_zpool_destroy(void *pool)
1124 z3fold_destroy_pool(pool);
1127 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1128 unsigned long *handle)
1130 return z3fold_alloc(pool, size, gfp, handle);
1132 static void z3fold_zpool_free(void *pool, unsigned long handle)
1134 z3fold_free(pool, handle);
1137 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1138 unsigned int *reclaimed)
1140 unsigned int total = 0;
1143 while (total < pages) {
1144 ret = z3fold_reclaim_page(pool, 8);
1156 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1157 enum zpool_mapmode mm)
1159 return z3fold_map(pool, handle);
1161 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1163 z3fold_unmap(pool, handle);
1166 static u64 z3fold_zpool_total_size(void *pool)
1168 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1171 static struct zpool_driver z3fold_zpool_driver = {
1173 .owner = THIS_MODULE,
1174 .create = z3fold_zpool_create,
1175 .destroy = z3fold_zpool_destroy,
1176 .malloc = z3fold_zpool_malloc,
1177 .free = z3fold_zpool_free,
1178 .shrink = z3fold_zpool_shrink,
1179 .map = z3fold_zpool_map,
1180 .unmap = z3fold_zpool_unmap,
1181 .total_size = z3fold_zpool_total_size,
1184 MODULE_ALIAS("zpool-z3fold");
1186 static int __init init_z3fold(void)
1188 /* Make sure the z3fold header is not larger than the page size */
1189 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1190 zpool_register_driver(&z3fold_zpool_driver);
1195 static void __exit exit_z3fold(void)
1197 zpool_unregister_driver(&z3fold_zpool_driver);
1200 module_init(init_z3fold);
1201 module_exit(exit_z3fold);
1203 MODULE_LICENSE("GPL");
1204 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1205 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");