2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/backing-dev.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
33 #include <linux/debugfs.h>
34 #include <linux/cpuhotplug.h>
35 #include <linux/part_stat.h>
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
43 static int zram_major;
44 static const char *default_compressor = CONFIG_ZRAM_DEF_COMP;
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
52 static size_t huge_class_size;
54 static const struct block_device_operations zram_devops;
55 static const struct block_device_operations zram_wb_devops;
57 static void zram_free_page(struct zram *zram, size_t index);
58 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
59 u32 index, int offset, struct bio *bio);
62 static int zram_slot_trylock(struct zram *zram, u32 index)
64 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
67 static void zram_slot_lock(struct zram *zram, u32 index)
69 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
72 static void zram_slot_unlock(struct zram *zram, u32 index)
74 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
77 static inline bool init_done(struct zram *zram)
79 return zram->disksize;
82 static inline struct zram *dev_to_zram(struct device *dev)
84 return (struct zram *)dev_to_disk(dev)->private_data;
87 static unsigned long zram_get_handle(struct zram *zram, u32 index)
89 return zram->table[index].handle;
92 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
94 zram->table[index].handle = handle;
97 /* flag operations require table entry bit_spin_lock() being held */
98 static bool zram_test_flag(struct zram *zram, u32 index,
99 enum zram_pageflags flag)
101 return zram->table[index].flags & BIT(flag);
104 static void zram_set_flag(struct zram *zram, u32 index,
105 enum zram_pageflags flag)
107 zram->table[index].flags |= BIT(flag);
110 static void zram_clear_flag(struct zram *zram, u32 index,
111 enum zram_pageflags flag)
113 zram->table[index].flags &= ~BIT(flag);
116 static inline void zram_set_element(struct zram *zram, u32 index,
117 unsigned long element)
119 zram->table[index].element = element;
122 static unsigned long zram_get_element(struct zram *zram, u32 index)
124 return zram->table[index].element;
127 static size_t zram_get_obj_size(struct zram *zram, u32 index)
129 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
132 static void zram_set_obj_size(struct zram *zram,
133 u32 index, size_t size)
135 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
137 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
140 static inline bool zram_allocated(struct zram *zram, u32 index)
142 return zram_get_obj_size(zram, index) ||
143 zram_test_flag(zram, index, ZRAM_SAME) ||
144 zram_test_flag(zram, index, ZRAM_WB);
147 #if PAGE_SIZE != 4096
148 static inline bool is_partial_io(struct bio_vec *bvec)
150 return bvec->bv_len != PAGE_SIZE;
153 static inline bool is_partial_io(struct bio_vec *bvec)
160 * Check if request is within bounds and aligned on zram logical blocks.
162 static inline bool valid_io_request(struct zram *zram,
163 sector_t start, unsigned int size)
167 /* unaligned request */
168 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
170 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
173 end = start + (size >> SECTOR_SHIFT);
174 bound = zram->disksize >> SECTOR_SHIFT;
175 /* out of range range */
176 if (unlikely(start >= bound || end > bound || start > end))
179 /* I/O request is valid */
183 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
185 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
186 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
189 static inline void update_used_max(struct zram *zram,
190 const unsigned long pages)
192 unsigned long old_max, cur_max;
194 old_max = atomic_long_read(&zram->stats.max_used_pages);
199 old_max = atomic_long_cmpxchg(
200 &zram->stats.max_used_pages, cur_max, pages);
201 } while (old_max != cur_max);
204 static inline void zram_fill_page(void *ptr, unsigned long len,
207 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
208 memset_l(ptr, value, len / sizeof(unsigned long));
211 static bool page_same_filled(void *ptr, unsigned long *element)
215 unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
217 page = (unsigned long *)ptr;
220 if (val != page[last_pos])
223 for (pos = 1; pos < last_pos; pos++) {
224 if (val != page[pos])
233 static ssize_t initstate_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
237 struct zram *zram = dev_to_zram(dev);
239 down_read(&zram->init_lock);
240 val = init_done(zram);
241 up_read(&zram->init_lock);
243 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
246 static ssize_t disksize_show(struct device *dev,
247 struct device_attribute *attr, char *buf)
249 struct zram *zram = dev_to_zram(dev);
251 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
254 static ssize_t mem_limit_store(struct device *dev,
255 struct device_attribute *attr, const char *buf, size_t len)
259 struct zram *zram = dev_to_zram(dev);
261 limit = memparse(buf, &tmp);
262 if (buf == tmp) /* no chars parsed, invalid input */
265 down_write(&zram->init_lock);
266 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
267 up_write(&zram->init_lock);
272 static ssize_t mem_used_max_store(struct device *dev,
273 struct device_attribute *attr, const char *buf, size_t len)
277 struct zram *zram = dev_to_zram(dev);
279 err = kstrtoul(buf, 10, &val);
283 down_read(&zram->init_lock);
284 if (init_done(zram)) {
285 atomic_long_set(&zram->stats.max_used_pages,
286 zs_get_total_pages(zram->mem_pool));
288 up_read(&zram->init_lock);
294 * Mark all pages which are older than or equal to cutoff as IDLE.
295 * Callers should hold the zram init lock in read mode
297 static void mark_idle(struct zram *zram, ktime_t cutoff)
300 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
303 for (index = 0; index < nr_pages; index++) {
305 * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race.
306 * See the comment in writeback_store.
308 zram_slot_lock(zram, index);
309 if (zram_allocated(zram, index) &&
310 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
311 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
312 is_idle = !cutoff || ktime_after(cutoff, zram->table[index].ac_time);
315 zram_set_flag(zram, index, ZRAM_IDLE);
317 zram_slot_unlock(zram, index);
321 static ssize_t idle_store(struct device *dev,
322 struct device_attribute *attr, const char *buf, size_t len)
324 struct zram *zram = dev_to_zram(dev);
325 ktime_t cutoff_time = 0;
326 ssize_t rv = -EINVAL;
328 if (!sysfs_streq(buf, "all")) {
330 * If it did not parse as 'all' try to treat it as an integer when
331 * we have memory tracking enabled.
335 if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(buf, 0, &age_sec))
336 cutoff_time = ktime_sub(ktime_get_boottime(),
337 ns_to_ktime(age_sec * NSEC_PER_SEC));
342 down_read(&zram->init_lock);
343 if (!init_done(zram))
346 /* A cutoff_time of 0 marks everything as idle, this is the "all" behavior */
347 mark_idle(zram, cutoff_time);
351 up_read(&zram->init_lock);
356 #ifdef CONFIG_ZRAM_WRITEBACK
357 static ssize_t writeback_limit_enable_store(struct device *dev,
358 struct device_attribute *attr, const char *buf, size_t len)
360 struct zram *zram = dev_to_zram(dev);
362 ssize_t ret = -EINVAL;
364 if (kstrtoull(buf, 10, &val))
367 down_read(&zram->init_lock);
368 spin_lock(&zram->wb_limit_lock);
369 zram->wb_limit_enable = val;
370 spin_unlock(&zram->wb_limit_lock);
371 up_read(&zram->init_lock);
377 static ssize_t writeback_limit_enable_show(struct device *dev,
378 struct device_attribute *attr, char *buf)
381 struct zram *zram = dev_to_zram(dev);
383 down_read(&zram->init_lock);
384 spin_lock(&zram->wb_limit_lock);
385 val = zram->wb_limit_enable;
386 spin_unlock(&zram->wb_limit_lock);
387 up_read(&zram->init_lock);
389 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
392 static ssize_t writeback_limit_store(struct device *dev,
393 struct device_attribute *attr, const char *buf, size_t len)
395 struct zram *zram = dev_to_zram(dev);
397 ssize_t ret = -EINVAL;
399 if (kstrtoull(buf, 10, &val))
402 down_read(&zram->init_lock);
403 spin_lock(&zram->wb_limit_lock);
404 zram->bd_wb_limit = val;
405 spin_unlock(&zram->wb_limit_lock);
406 up_read(&zram->init_lock);
412 static ssize_t writeback_limit_show(struct device *dev,
413 struct device_attribute *attr, char *buf)
416 struct zram *zram = dev_to_zram(dev);
418 down_read(&zram->init_lock);
419 spin_lock(&zram->wb_limit_lock);
420 val = zram->bd_wb_limit;
421 spin_unlock(&zram->wb_limit_lock);
422 up_read(&zram->init_lock);
424 return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
427 static void reset_bdev(struct zram *zram)
429 struct block_device *bdev;
431 if (!zram->backing_dev)
435 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
436 /* hope filp_close flush all of IO */
437 filp_close(zram->backing_dev, NULL);
438 zram->backing_dev = NULL;
440 zram->disk->fops = &zram_devops;
441 kvfree(zram->bitmap);
445 static ssize_t backing_dev_show(struct device *dev,
446 struct device_attribute *attr, char *buf)
449 struct zram *zram = dev_to_zram(dev);
453 down_read(&zram->init_lock);
454 file = zram->backing_dev;
456 memcpy(buf, "none\n", 5);
457 up_read(&zram->init_lock);
461 p = file_path(file, buf, PAGE_SIZE - 1);
468 memmove(buf, p, ret);
471 up_read(&zram->init_lock);
475 static ssize_t backing_dev_store(struct device *dev,
476 struct device_attribute *attr, const char *buf, size_t len)
480 struct file *backing_dev = NULL;
482 struct address_space *mapping;
483 unsigned int bitmap_sz;
484 unsigned long nr_pages, *bitmap = NULL;
485 struct block_device *bdev = NULL;
487 struct zram *zram = dev_to_zram(dev);
489 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
493 down_write(&zram->init_lock);
494 if (init_done(zram)) {
495 pr_info("Can't setup backing device for initialized device\n");
500 strlcpy(file_name, buf, PATH_MAX);
501 /* ignore trailing newline */
502 sz = strlen(file_name);
503 if (sz > 0 && file_name[sz - 1] == '\n')
504 file_name[sz - 1] = 0x00;
506 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
507 if (IS_ERR(backing_dev)) {
508 err = PTR_ERR(backing_dev);
513 mapping = backing_dev->f_mapping;
514 inode = mapping->host;
516 /* Support only block device in this moment */
517 if (!S_ISBLK(inode->i_mode)) {
522 bdev = blkdev_get_by_dev(inode->i_rdev,
523 FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
530 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
531 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
532 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
541 zram->backing_dev = backing_dev;
542 zram->bitmap = bitmap;
543 zram->nr_pages = nr_pages;
545 * With writeback feature, zram does asynchronous IO so it's no longer
546 * synchronous device so let's remove synchronous io flag. Othewise,
547 * upper layer(e.g., swap) could wait IO completion rather than
548 * (submit and return), which will cause system sluggish.
549 * Furthermore, when the IO function returns(e.g., swap_readpage),
550 * upper layer expects IO was done so it could deallocate the page
551 * freely but in fact, IO is going on so finally could cause
552 * use-after-free when the IO is really done.
554 zram->disk->fops = &zram_wb_devops;
555 up_write(&zram->init_lock);
557 pr_info("setup backing device %s\n", file_name);
565 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
568 filp_close(backing_dev, NULL);
570 up_write(&zram->init_lock);
577 static unsigned long alloc_block_bdev(struct zram *zram)
579 unsigned long blk_idx = 1;
581 /* skip 0 bit to confuse zram.handle = 0 */
582 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
583 if (blk_idx == zram->nr_pages)
586 if (test_and_set_bit(blk_idx, zram->bitmap))
589 atomic64_inc(&zram->stats.bd_count);
593 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
597 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
598 WARN_ON_ONCE(!was_set);
599 atomic64_dec(&zram->stats.bd_count);
602 static void zram_page_end_io(struct bio *bio)
604 struct page *page = bio_first_page_all(bio);
606 page_endio(page, op_is_write(bio_op(bio)),
607 blk_status_to_errno(bio->bi_status));
612 * Returns 1 if the submission is successful.
614 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
615 unsigned long entry, struct bio *parent)
619 bio = bio_alloc(zram->bdev, 1, parent ? parent->bi_opf : REQ_OP_READ,
624 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
625 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
631 bio->bi_end_io = zram_page_end_io;
633 bio_chain(bio, parent);
639 #define PAGE_WB_SIG "page_index="
641 #define PAGE_WRITEBACK 0
642 #define HUGE_WRITEBACK (1<<0)
643 #define IDLE_WRITEBACK (1<<1)
646 static ssize_t writeback_store(struct device *dev,
647 struct device_attribute *attr, const char *buf, size_t len)
649 struct zram *zram = dev_to_zram(dev);
650 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
651 unsigned long index = 0;
653 struct bio_vec bio_vec;
657 unsigned long blk_idx = 0;
659 if (sysfs_streq(buf, "idle"))
660 mode = IDLE_WRITEBACK;
661 else if (sysfs_streq(buf, "huge"))
662 mode = HUGE_WRITEBACK;
663 else if (sysfs_streq(buf, "huge_idle"))
664 mode = IDLE_WRITEBACK | HUGE_WRITEBACK;
666 if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1))
669 if (kstrtol(buf + sizeof(PAGE_WB_SIG) - 1, 10, &index) ||
674 mode = PAGE_WRITEBACK;
677 down_read(&zram->init_lock);
678 if (!init_done(zram)) {
680 goto release_init_lock;
683 if (!zram->backing_dev) {
685 goto release_init_lock;
688 page = alloc_page(GFP_KERNEL);
691 goto release_init_lock;
694 for (; nr_pages != 0; index++, nr_pages--) {
698 bvec.bv_len = PAGE_SIZE;
701 spin_lock(&zram->wb_limit_lock);
702 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
703 spin_unlock(&zram->wb_limit_lock);
707 spin_unlock(&zram->wb_limit_lock);
710 blk_idx = alloc_block_bdev(zram);
717 zram_slot_lock(zram, index);
718 if (!zram_allocated(zram, index))
721 if (zram_test_flag(zram, index, ZRAM_WB) ||
722 zram_test_flag(zram, index, ZRAM_SAME) ||
723 zram_test_flag(zram, index, ZRAM_UNDER_WB))
726 if (mode & IDLE_WRITEBACK &&
727 !zram_test_flag(zram, index, ZRAM_IDLE))
729 if (mode & HUGE_WRITEBACK &&
730 !zram_test_flag(zram, index, ZRAM_HUGE))
733 * Clearing ZRAM_UNDER_WB is duty of caller.
734 * IOW, zram_free_page never clear it.
736 zram_set_flag(zram, index, ZRAM_UNDER_WB);
737 /* Need for hugepage writeback racing */
738 zram_set_flag(zram, index, ZRAM_IDLE);
739 zram_slot_unlock(zram, index);
740 if (zram_bvec_read(zram, &bvec, index, 0, NULL)) {
741 zram_slot_lock(zram, index);
742 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
743 zram_clear_flag(zram, index, ZRAM_IDLE);
744 zram_slot_unlock(zram, index);
748 bio_init(&bio, zram->bdev, &bio_vec, 1,
749 REQ_OP_WRITE | REQ_SYNC);
750 bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
752 bio_add_page(&bio, bvec.bv_page, bvec.bv_len,
755 * XXX: A single page IO would be inefficient for write
756 * but it would be not bad as starter.
758 err = submit_bio_wait(&bio);
760 zram_slot_lock(zram, index);
761 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
762 zram_clear_flag(zram, index, ZRAM_IDLE);
763 zram_slot_unlock(zram, index);
765 * Return last IO error unless every IO were
772 atomic64_inc(&zram->stats.bd_writes);
774 * We released zram_slot_lock so need to check if the slot was
775 * changed. If there is freeing for the slot, we can catch it
776 * easily by zram_allocated.
777 * A subtle case is the slot is freed/reallocated/marked as
778 * ZRAM_IDLE again. To close the race, idle_store doesn't
779 * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB.
780 * Thus, we could close the race by checking ZRAM_IDLE bit.
782 zram_slot_lock(zram, index);
783 if (!zram_allocated(zram, index) ||
784 !zram_test_flag(zram, index, ZRAM_IDLE)) {
785 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
786 zram_clear_flag(zram, index, ZRAM_IDLE);
790 zram_free_page(zram, index);
791 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
792 zram_set_flag(zram, index, ZRAM_WB);
793 zram_set_element(zram, index, blk_idx);
795 atomic64_inc(&zram->stats.pages_stored);
796 spin_lock(&zram->wb_limit_lock);
797 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
798 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
799 spin_unlock(&zram->wb_limit_lock);
801 zram_slot_unlock(zram, index);
805 free_block_bdev(zram, blk_idx);
808 up_read(&zram->init_lock);
814 struct work_struct work;
821 #if PAGE_SIZE != 4096
822 static void zram_sync_read(struct work_struct *work)
824 struct zram_work *zw = container_of(work, struct zram_work, work);
825 struct zram *zram = zw->zram;
826 unsigned long entry = zw->entry;
827 struct bio *bio = zw->bio;
829 read_from_bdev_async(zram, &zw->bvec, entry, bio);
833 * Block layer want one ->submit_bio to be active at a time, so if we use
834 * chained IO with parent IO in same context, it's a deadlock. To avoid that,
835 * use a worker thread context.
837 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
838 unsigned long entry, struct bio *bio)
840 struct zram_work work;
847 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
848 queue_work(system_unbound_wq, &work.work);
849 flush_work(&work.work);
850 destroy_work_on_stack(&work.work);
855 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
856 unsigned long entry, struct bio *bio)
863 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
864 unsigned long entry, struct bio *parent, bool sync)
866 atomic64_inc(&zram->stats.bd_reads);
868 return read_from_bdev_sync(zram, bvec, entry, parent);
870 return read_from_bdev_async(zram, bvec, entry, parent);
873 static inline void reset_bdev(struct zram *zram) {};
874 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
875 unsigned long entry, struct bio *parent, bool sync)
880 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
883 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
885 static struct dentry *zram_debugfs_root;
887 static void zram_debugfs_create(void)
889 zram_debugfs_root = debugfs_create_dir("zram", NULL);
892 static void zram_debugfs_destroy(void)
894 debugfs_remove_recursive(zram_debugfs_root);
897 static void zram_accessed(struct zram *zram, u32 index)
899 zram_clear_flag(zram, index, ZRAM_IDLE);
900 zram->table[index].ac_time = ktime_get_boottime();
903 static ssize_t read_block_state(struct file *file, char __user *buf,
904 size_t count, loff_t *ppos)
907 ssize_t index, written = 0;
908 struct zram *zram = file->private_data;
909 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
910 struct timespec64 ts;
912 kbuf = kvmalloc(count, GFP_KERNEL);
916 down_read(&zram->init_lock);
917 if (!init_done(zram)) {
918 up_read(&zram->init_lock);
923 for (index = *ppos; index < nr_pages; index++) {
926 zram_slot_lock(zram, index);
927 if (!zram_allocated(zram, index))
930 ts = ktime_to_timespec64(zram->table[index].ac_time);
931 copied = snprintf(kbuf + written, count,
932 "%12zd %12lld.%06lu %c%c%c%c\n",
933 index, (s64)ts.tv_sec,
934 ts.tv_nsec / NSEC_PER_USEC,
935 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
936 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
937 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
938 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.');
940 if (count <= copied) {
941 zram_slot_unlock(zram, index);
947 zram_slot_unlock(zram, index);
951 up_read(&zram->init_lock);
952 if (copy_to_user(buf, kbuf, written))
959 static const struct file_operations proc_zram_block_state_op = {
961 .read = read_block_state,
962 .llseek = default_llseek,
965 static void zram_debugfs_register(struct zram *zram)
967 if (!zram_debugfs_root)
970 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
972 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
973 zram, &proc_zram_block_state_op);
976 static void zram_debugfs_unregister(struct zram *zram)
978 debugfs_remove_recursive(zram->debugfs_dir);
981 static void zram_debugfs_create(void) {};
982 static void zram_debugfs_destroy(void) {};
983 static void zram_accessed(struct zram *zram, u32 index)
985 zram_clear_flag(zram, index, ZRAM_IDLE);
987 static void zram_debugfs_register(struct zram *zram) {};
988 static void zram_debugfs_unregister(struct zram *zram) {};
992 * We switched to per-cpu streams and this attr is not needed anymore.
993 * However, we will keep it around for some time, because:
994 * a) we may revert per-cpu streams in the future
995 * b) it's visible to user space and we need to follow our 2 years
996 * retirement rule; but we already have a number of 'soon to be
997 * altered' attrs, so max_comp_streams need to wait for the next
1000 static ssize_t max_comp_streams_show(struct device *dev,
1001 struct device_attribute *attr, char *buf)
1003 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
1006 static ssize_t max_comp_streams_store(struct device *dev,
1007 struct device_attribute *attr, const char *buf, size_t len)
1012 static ssize_t comp_algorithm_show(struct device *dev,
1013 struct device_attribute *attr, char *buf)
1016 struct zram *zram = dev_to_zram(dev);
1018 down_read(&zram->init_lock);
1019 sz = zcomp_available_show(zram->compressor, buf);
1020 up_read(&zram->init_lock);
1025 static ssize_t comp_algorithm_store(struct device *dev,
1026 struct device_attribute *attr, const char *buf, size_t len)
1028 struct zram *zram = dev_to_zram(dev);
1029 char compressor[ARRAY_SIZE(zram->compressor)];
1032 strlcpy(compressor, buf, sizeof(compressor));
1033 /* ignore trailing newline */
1034 sz = strlen(compressor);
1035 if (sz > 0 && compressor[sz - 1] == '\n')
1036 compressor[sz - 1] = 0x00;
1038 if (!zcomp_available_algorithm(compressor))
1041 down_write(&zram->init_lock);
1042 if (init_done(zram)) {
1043 up_write(&zram->init_lock);
1044 pr_info("Can't change algorithm for initialized device\n");
1048 strcpy(zram->compressor, compressor);
1049 up_write(&zram->init_lock);
1053 static ssize_t compact_store(struct device *dev,
1054 struct device_attribute *attr, const char *buf, size_t len)
1056 struct zram *zram = dev_to_zram(dev);
1058 down_read(&zram->init_lock);
1059 if (!init_done(zram)) {
1060 up_read(&zram->init_lock);
1064 zs_compact(zram->mem_pool);
1065 up_read(&zram->init_lock);
1070 static ssize_t io_stat_show(struct device *dev,
1071 struct device_attribute *attr, char *buf)
1073 struct zram *zram = dev_to_zram(dev);
1076 down_read(&zram->init_lock);
1077 ret = scnprintf(buf, PAGE_SIZE,
1078 "%8llu %8llu %8llu %8llu\n",
1079 (u64)atomic64_read(&zram->stats.failed_reads),
1080 (u64)atomic64_read(&zram->stats.failed_writes),
1081 (u64)atomic64_read(&zram->stats.invalid_io),
1082 (u64)atomic64_read(&zram->stats.notify_free));
1083 up_read(&zram->init_lock);
1088 static ssize_t mm_stat_show(struct device *dev,
1089 struct device_attribute *attr, char *buf)
1091 struct zram *zram = dev_to_zram(dev);
1092 struct zs_pool_stats pool_stats;
1093 u64 orig_size, mem_used = 0;
1097 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
1099 down_read(&zram->init_lock);
1100 if (init_done(zram)) {
1101 mem_used = zs_get_total_pages(zram->mem_pool);
1102 zs_pool_stats(zram->mem_pool, &pool_stats);
1105 orig_size = atomic64_read(&zram->stats.pages_stored);
1106 max_used = atomic_long_read(&zram->stats.max_used_pages);
1108 ret = scnprintf(buf, PAGE_SIZE,
1109 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
1110 orig_size << PAGE_SHIFT,
1111 (u64)atomic64_read(&zram->stats.compr_data_size),
1112 mem_used << PAGE_SHIFT,
1113 zram->limit_pages << PAGE_SHIFT,
1114 max_used << PAGE_SHIFT,
1115 (u64)atomic64_read(&zram->stats.same_pages),
1116 atomic_long_read(&pool_stats.pages_compacted),
1117 (u64)atomic64_read(&zram->stats.huge_pages),
1118 (u64)atomic64_read(&zram->stats.huge_pages_since));
1119 up_read(&zram->init_lock);
1124 #ifdef CONFIG_ZRAM_WRITEBACK
1125 #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12)))
1126 static ssize_t bd_stat_show(struct device *dev,
1127 struct device_attribute *attr, char *buf)
1129 struct zram *zram = dev_to_zram(dev);
1132 down_read(&zram->init_lock);
1133 ret = scnprintf(buf, PAGE_SIZE,
1134 "%8llu %8llu %8llu\n",
1135 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1136 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1137 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1138 up_read(&zram->init_lock);
1144 static ssize_t debug_stat_show(struct device *dev,
1145 struct device_attribute *attr, char *buf)
1148 struct zram *zram = dev_to_zram(dev);
1151 down_read(&zram->init_lock);
1152 ret = scnprintf(buf, PAGE_SIZE,
1153 "version: %d\n%8llu\n",
1155 (u64)atomic64_read(&zram->stats.miss_free));
1156 up_read(&zram->init_lock);
1161 static DEVICE_ATTR_RO(io_stat);
1162 static DEVICE_ATTR_RO(mm_stat);
1163 #ifdef CONFIG_ZRAM_WRITEBACK
1164 static DEVICE_ATTR_RO(bd_stat);
1166 static DEVICE_ATTR_RO(debug_stat);
1168 static void zram_meta_free(struct zram *zram, u64 disksize)
1170 size_t num_pages = disksize >> PAGE_SHIFT;
1173 /* Free all pages that are still in this zram device */
1174 for (index = 0; index < num_pages; index++)
1175 zram_free_page(zram, index);
1177 zs_destroy_pool(zram->mem_pool);
1181 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1185 num_pages = disksize >> PAGE_SHIFT;
1186 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1190 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1191 if (!zram->mem_pool) {
1196 if (!huge_class_size)
1197 huge_class_size = zs_huge_class_size(zram->mem_pool);
1202 * To protect concurrent access to the same index entry,
1203 * caller should hold this table index entry's bit_spinlock to
1204 * indicate this index entry is accessing.
1206 static void zram_free_page(struct zram *zram, size_t index)
1208 unsigned long handle;
1210 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
1211 zram->table[index].ac_time = 0;
1213 if (zram_test_flag(zram, index, ZRAM_IDLE))
1214 zram_clear_flag(zram, index, ZRAM_IDLE);
1216 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1217 zram_clear_flag(zram, index, ZRAM_HUGE);
1218 atomic64_dec(&zram->stats.huge_pages);
1221 if (zram_test_flag(zram, index, ZRAM_WB)) {
1222 zram_clear_flag(zram, index, ZRAM_WB);
1223 free_block_bdev(zram, zram_get_element(zram, index));
1228 * No memory is allocated for same element filled pages.
1229 * Simply clear same page flag.
1231 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1232 zram_clear_flag(zram, index, ZRAM_SAME);
1233 atomic64_dec(&zram->stats.same_pages);
1237 handle = zram_get_handle(zram, index);
1241 zs_free(zram->mem_pool, handle);
1243 atomic64_sub(zram_get_obj_size(zram, index),
1244 &zram->stats.compr_data_size);
1246 atomic64_dec(&zram->stats.pages_stored);
1247 zram_set_handle(zram, index, 0);
1248 zram_set_obj_size(zram, index, 0);
1249 WARN_ON_ONCE(zram->table[index].flags &
1250 ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB));
1253 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
1254 struct bio *bio, bool partial_io)
1256 struct zcomp_strm *zstrm;
1257 unsigned long handle;
1262 zram_slot_lock(zram, index);
1263 if (zram_test_flag(zram, index, ZRAM_WB)) {
1264 struct bio_vec bvec;
1266 zram_slot_unlock(zram, index);
1268 bvec.bv_page = page;
1269 bvec.bv_len = PAGE_SIZE;
1271 return read_from_bdev(zram, &bvec,
1272 zram_get_element(zram, index),
1276 handle = zram_get_handle(zram, index);
1277 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1278 unsigned long value;
1281 value = handle ? zram_get_element(zram, index) : 0;
1282 mem = kmap_atomic(page);
1283 zram_fill_page(mem, PAGE_SIZE, value);
1285 zram_slot_unlock(zram, index);
1289 size = zram_get_obj_size(zram, index);
1291 if (size != PAGE_SIZE)
1292 zstrm = zcomp_stream_get(zram->comp);
1294 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1295 if (size == PAGE_SIZE) {
1296 dst = kmap_atomic(page);
1297 memcpy(dst, src, PAGE_SIZE);
1301 dst = kmap_atomic(page);
1302 ret = zcomp_decompress(zstrm, src, size, dst);
1304 zcomp_stream_put(zram->comp);
1306 zs_unmap_object(zram->mem_pool, handle);
1307 zram_slot_unlock(zram, index);
1309 /* Should NEVER happen. Return bio error if it does. */
1311 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1316 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1317 u32 index, int offset, struct bio *bio)
1322 page = bvec->bv_page;
1323 if (is_partial_io(bvec)) {
1324 /* Use a temporary buffer to decompress the page */
1325 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1330 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1334 if (is_partial_io(bvec)) {
1335 void *src = kmap_atomic(page);
1337 memcpy_to_bvec(bvec, src + offset);
1341 if (is_partial_io(bvec))
1347 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1348 u32 index, struct bio *bio)
1351 unsigned long alloced_pages;
1352 unsigned long handle = 0;
1353 unsigned int comp_len = 0;
1354 void *src, *dst, *mem;
1355 struct zcomp_strm *zstrm;
1356 struct page *page = bvec->bv_page;
1357 unsigned long element = 0;
1358 enum zram_pageflags flags = 0;
1360 mem = kmap_atomic(page);
1361 if (page_same_filled(mem, &element)) {
1363 /* Free memory associated with this sector now. */
1365 atomic64_inc(&zram->stats.same_pages);
1370 zstrm = zcomp_stream_get(zram->comp);
1371 src = kmap_atomic(page);
1372 ret = zcomp_compress(zstrm, src, &comp_len);
1375 if (unlikely(ret)) {
1376 zcomp_stream_put(zram->comp);
1377 pr_err("Compression failed! err=%d\n", ret);
1381 if (comp_len >= huge_class_size)
1382 comp_len = PAGE_SIZE;
1384 handle = zs_malloc(zram->mem_pool, comp_len,
1385 __GFP_KSWAPD_RECLAIM |
1390 if (unlikely(!handle)) {
1391 zcomp_stream_put(zram->comp);
1395 alloced_pages = zs_get_total_pages(zram->mem_pool);
1396 update_used_max(zram, alloced_pages);
1398 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1399 zcomp_stream_put(zram->comp);
1400 zs_free(zram->mem_pool, handle);
1404 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1406 src = zstrm->buffer;
1407 if (comp_len == PAGE_SIZE)
1408 src = kmap_atomic(page);
1409 memcpy(dst, src, comp_len);
1410 if (comp_len == PAGE_SIZE)
1413 zcomp_stream_put(zram->comp);
1414 zs_unmap_object(zram->mem_pool, handle);
1415 atomic64_add(comp_len, &zram->stats.compr_data_size);
1418 * Free memory associated with this sector
1419 * before overwriting unused sectors.
1421 zram_slot_lock(zram, index);
1422 zram_free_page(zram, index);
1424 if (comp_len == PAGE_SIZE) {
1425 zram_set_flag(zram, index, ZRAM_HUGE);
1426 atomic64_inc(&zram->stats.huge_pages);
1427 atomic64_inc(&zram->stats.huge_pages_since);
1431 zram_set_flag(zram, index, flags);
1432 zram_set_element(zram, index, element);
1434 zram_set_handle(zram, index, handle);
1435 zram_set_obj_size(zram, index, comp_len);
1437 zram_slot_unlock(zram, index);
1440 atomic64_inc(&zram->stats.pages_stored);
1444 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1445 u32 index, int offset, struct bio *bio)
1448 struct page *page = NULL;
1452 if (is_partial_io(bvec)) {
1455 * This is a partial IO. We need to read the full page
1456 * before to write the changes.
1458 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1462 ret = __zram_bvec_read(zram, page, index, bio, true);
1466 dst = kmap_atomic(page);
1467 memcpy_from_bvec(dst + offset, bvec);
1471 vec.bv_len = PAGE_SIZE;
1475 ret = __zram_bvec_write(zram, &vec, index, bio);
1477 if (is_partial_io(bvec))
1483 * zram_bio_discard - handler on discard request
1484 * @index: physical block index in PAGE_SIZE units
1485 * @offset: byte offset within physical block
1487 static void zram_bio_discard(struct zram *zram, u32 index,
1488 int offset, struct bio *bio)
1490 size_t n = bio->bi_iter.bi_size;
1493 * zram manages data in physical block size units. Because logical block
1494 * size isn't identical with physical block size on some arch, we
1495 * could get a discard request pointing to a specific offset within a
1496 * certain physical block. Although we can handle this request by
1497 * reading that physiclal block and decompressing and partially zeroing
1498 * and re-compressing and then re-storing it, this isn't reasonable
1499 * because our intent with a discard request is to save memory. So
1500 * skipping this logical block is appropriate here.
1503 if (n <= (PAGE_SIZE - offset))
1506 n -= (PAGE_SIZE - offset);
1510 while (n >= PAGE_SIZE) {
1511 zram_slot_lock(zram, index);
1512 zram_free_page(zram, index);
1513 zram_slot_unlock(zram, index);
1514 atomic64_inc(&zram->stats.notify_free);
1521 * Returns errno if it has some problem. Otherwise return 0 or 1.
1522 * Returns 0 if IO request was done synchronously
1523 * Returns 1 if IO request was successfully submitted.
1525 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1526 int offset, unsigned int op, struct bio *bio)
1530 if (!op_is_write(op)) {
1531 atomic64_inc(&zram->stats.num_reads);
1532 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1533 flush_dcache_page(bvec->bv_page);
1535 atomic64_inc(&zram->stats.num_writes);
1536 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1539 zram_slot_lock(zram, index);
1540 zram_accessed(zram, index);
1541 zram_slot_unlock(zram, index);
1543 if (unlikely(ret < 0)) {
1544 if (!op_is_write(op))
1545 atomic64_inc(&zram->stats.failed_reads);
1547 atomic64_inc(&zram->stats.failed_writes);
1553 static void __zram_make_request(struct zram *zram, struct bio *bio)
1557 struct bio_vec bvec;
1558 struct bvec_iter iter;
1559 unsigned long start_time;
1561 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1562 offset = (bio->bi_iter.bi_sector &
1563 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1565 switch (bio_op(bio)) {
1566 case REQ_OP_DISCARD:
1567 case REQ_OP_WRITE_ZEROES:
1568 zram_bio_discard(zram, index, offset, bio);
1575 start_time = bio_start_io_acct(bio);
1576 bio_for_each_segment(bvec, bio, iter) {
1577 struct bio_vec bv = bvec;
1578 unsigned int unwritten = bvec.bv_len;
1581 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1583 if (zram_bvec_rw(zram, &bv, index, offset,
1584 bio_op(bio), bio) < 0) {
1585 bio->bi_status = BLK_STS_IOERR;
1589 bv.bv_offset += bv.bv_len;
1590 unwritten -= bv.bv_len;
1592 update_position(&index, &offset, &bv);
1593 } while (unwritten);
1595 bio_end_io_acct(bio, start_time);
1600 * Handler function for all zram I/O requests.
1602 static void zram_submit_bio(struct bio *bio)
1604 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
1606 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1607 bio->bi_iter.bi_size)) {
1608 atomic64_inc(&zram->stats.invalid_io);
1613 __zram_make_request(zram, bio);
1616 static void zram_slot_free_notify(struct block_device *bdev,
1617 unsigned long index)
1621 zram = bdev->bd_disk->private_data;
1623 atomic64_inc(&zram->stats.notify_free);
1624 if (!zram_slot_trylock(zram, index)) {
1625 atomic64_inc(&zram->stats.miss_free);
1629 zram_free_page(zram, index);
1630 zram_slot_unlock(zram, index);
1633 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1634 struct page *page, unsigned int op)
1640 unsigned long start_time;
1642 if (PageTransHuge(page))
1644 zram = bdev->bd_disk->private_data;
1646 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1647 atomic64_inc(&zram->stats.invalid_io);
1652 index = sector >> SECTORS_PER_PAGE_SHIFT;
1653 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1656 bv.bv_len = PAGE_SIZE;
1659 start_time = bdev_start_io_acct(bdev->bd_disk->part0,
1660 SECTORS_PER_PAGE, op, jiffies);
1661 ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
1662 bdev_end_io_acct(bdev->bd_disk->part0, op, start_time);
1665 * If I/O fails, just return error(ie, non-zero) without
1666 * calling page_endio.
1667 * It causes resubmit the I/O with bio request by upper functions
1668 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1669 * bio->bi_end_io does things to handle the error
1670 * (e.g., SetPageError, set_page_dirty and extra works).
1672 if (unlikely(ret < 0))
1677 page_endio(page, op_is_write(op), 0);
1688 static void zram_reset_device(struct zram *zram)
1693 down_write(&zram->init_lock);
1695 zram->limit_pages = 0;
1697 if (!init_done(zram)) {
1698 up_write(&zram->init_lock);
1703 disksize = zram->disksize;
1706 set_capacity_and_notify(zram->disk, 0);
1707 part_stat_set_all(zram->disk->part0, 0);
1709 /* I/O operation under all of CPU are done so let's free */
1710 zram_meta_free(zram, disksize);
1711 memset(&zram->stats, 0, sizeof(zram->stats));
1712 zcomp_destroy(comp);
1715 up_write(&zram->init_lock);
1718 static ssize_t disksize_store(struct device *dev,
1719 struct device_attribute *attr, const char *buf, size_t len)
1723 struct zram *zram = dev_to_zram(dev);
1726 disksize = memparse(buf, NULL);
1730 down_write(&zram->init_lock);
1731 if (init_done(zram)) {
1732 pr_info("Cannot change disksize for initialized device\n");
1737 disksize = PAGE_ALIGN(disksize);
1738 if (!zram_meta_alloc(zram, disksize)) {
1743 comp = zcomp_create(zram->compressor);
1745 pr_err("Cannot initialise %s compressing backend\n",
1747 err = PTR_ERR(comp);
1752 zram->disksize = disksize;
1753 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
1754 up_write(&zram->init_lock);
1759 zram_meta_free(zram, disksize);
1761 up_write(&zram->init_lock);
1765 static ssize_t reset_store(struct device *dev,
1766 struct device_attribute *attr, const char *buf, size_t len)
1769 unsigned short do_reset;
1771 struct gendisk *disk;
1773 ret = kstrtou16(buf, 10, &do_reset);
1780 zram = dev_to_zram(dev);
1783 mutex_lock(&disk->open_mutex);
1784 /* Do not reset an active device or claimed device */
1785 if (disk_openers(disk) || zram->claim) {
1786 mutex_unlock(&disk->open_mutex);
1790 /* From now on, anyone can't open /dev/zram[0-9] */
1792 mutex_unlock(&disk->open_mutex);
1794 /* Make sure all the pending I/O are finished */
1795 sync_blockdev(disk->part0);
1796 zram_reset_device(zram);
1798 mutex_lock(&disk->open_mutex);
1799 zram->claim = false;
1800 mutex_unlock(&disk->open_mutex);
1805 static int zram_open(struct block_device *bdev, fmode_t mode)
1810 WARN_ON(!mutex_is_locked(&bdev->bd_disk->open_mutex));
1812 zram = bdev->bd_disk->private_data;
1813 /* zram was claimed to reset so open request fails */
1820 static const struct block_device_operations zram_devops = {
1822 .submit_bio = zram_submit_bio,
1823 .swap_slot_free_notify = zram_slot_free_notify,
1824 .rw_page = zram_rw_page,
1825 .owner = THIS_MODULE
1828 #ifdef CONFIG_ZRAM_WRITEBACK
1829 static const struct block_device_operations zram_wb_devops = {
1831 .submit_bio = zram_submit_bio,
1832 .swap_slot_free_notify = zram_slot_free_notify,
1833 .owner = THIS_MODULE
1837 static DEVICE_ATTR_WO(compact);
1838 static DEVICE_ATTR_RW(disksize);
1839 static DEVICE_ATTR_RO(initstate);
1840 static DEVICE_ATTR_WO(reset);
1841 static DEVICE_ATTR_WO(mem_limit);
1842 static DEVICE_ATTR_WO(mem_used_max);
1843 static DEVICE_ATTR_WO(idle);
1844 static DEVICE_ATTR_RW(max_comp_streams);
1845 static DEVICE_ATTR_RW(comp_algorithm);
1846 #ifdef CONFIG_ZRAM_WRITEBACK
1847 static DEVICE_ATTR_RW(backing_dev);
1848 static DEVICE_ATTR_WO(writeback);
1849 static DEVICE_ATTR_RW(writeback_limit);
1850 static DEVICE_ATTR_RW(writeback_limit_enable);
1853 static struct attribute *zram_disk_attrs[] = {
1854 &dev_attr_disksize.attr,
1855 &dev_attr_initstate.attr,
1856 &dev_attr_reset.attr,
1857 &dev_attr_compact.attr,
1858 &dev_attr_mem_limit.attr,
1859 &dev_attr_mem_used_max.attr,
1860 &dev_attr_idle.attr,
1861 &dev_attr_max_comp_streams.attr,
1862 &dev_attr_comp_algorithm.attr,
1863 #ifdef CONFIG_ZRAM_WRITEBACK
1864 &dev_attr_backing_dev.attr,
1865 &dev_attr_writeback.attr,
1866 &dev_attr_writeback_limit.attr,
1867 &dev_attr_writeback_limit_enable.attr,
1869 &dev_attr_io_stat.attr,
1870 &dev_attr_mm_stat.attr,
1871 #ifdef CONFIG_ZRAM_WRITEBACK
1872 &dev_attr_bd_stat.attr,
1874 &dev_attr_debug_stat.attr,
1878 ATTRIBUTE_GROUPS(zram_disk);
1881 * Allocate and initialize new zram device. the function returns
1882 * '>= 0' device_id upon success, and negative value otherwise.
1884 static int zram_add(void)
1889 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1893 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1898 init_rwsem(&zram->init_lock);
1899 #ifdef CONFIG_ZRAM_WRITEBACK
1900 spin_lock_init(&zram->wb_limit_lock);
1903 /* gendisk structure */
1904 zram->disk = blk_alloc_disk(NUMA_NO_NODE);
1906 pr_err("Error allocating disk structure for device %d\n",
1912 zram->disk->major = zram_major;
1913 zram->disk->first_minor = device_id;
1914 zram->disk->minors = 1;
1915 zram->disk->flags |= GENHD_FL_NO_PART;
1916 zram->disk->fops = &zram_devops;
1917 zram->disk->private_data = zram;
1918 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1920 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1921 set_capacity(zram->disk, 0);
1922 /* zram devices sort of resembles non-rotational disks */
1923 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1924 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1927 * To ensure that we always get PAGE_SIZE aligned
1928 * and n*PAGE_SIZED sized I/O requests.
1930 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1931 blk_queue_logical_block_size(zram->disk->queue,
1932 ZRAM_LOGICAL_BLOCK_SIZE);
1933 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1934 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1935 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1936 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1939 * zram_bio_discard() will clear all logical blocks if logical block
1940 * size is identical with physical block size(PAGE_SIZE). But if it is
1941 * different, we will skip discarding some parts of logical blocks in
1942 * the part of the request range which isn't aligned to physical block
1943 * size. So we can't ensure that all discarded logical blocks are
1946 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1947 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1949 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
1951 goto out_cleanup_disk;
1953 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1955 zram_debugfs_register(zram);
1956 pr_info("Added device: %s\n", zram->disk->disk_name);
1960 blk_cleanup_disk(zram->disk);
1962 idr_remove(&zram_index_idr, device_id);
1968 static int zram_remove(struct zram *zram)
1972 mutex_lock(&zram->disk->open_mutex);
1973 if (disk_openers(zram->disk)) {
1974 mutex_unlock(&zram->disk->open_mutex);
1978 claimed = zram->claim;
1981 mutex_unlock(&zram->disk->open_mutex);
1983 zram_debugfs_unregister(zram);
1987 * If we were claimed by reset_store(), del_gendisk() will
1988 * wait until reset_store() is done, so nothing need to do.
1992 /* Make sure all the pending I/O are finished */
1993 sync_blockdev(zram->disk->part0);
1994 zram_reset_device(zram);
1997 pr_info("Removed device: %s\n", zram->disk->disk_name);
1999 del_gendisk(zram->disk);
2001 /* del_gendisk drains pending reset_store */
2002 WARN_ON_ONCE(claimed && zram->claim);
2005 * disksize_store() may be called in between zram_reset_device()
2006 * and del_gendisk(), so run the last reset to avoid leaking
2007 * anything allocated with disksize_store()
2009 zram_reset_device(zram);
2011 blk_cleanup_disk(zram->disk);
2016 /* zram-control sysfs attributes */
2019 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
2020 * sense that reading from this file does alter the state of your system -- it
2021 * creates a new un-initialized zram device and returns back this device's
2022 * device_id (or an error code if it fails to create a new device).
2024 static ssize_t hot_add_show(struct class *class,
2025 struct class_attribute *attr,
2030 mutex_lock(&zram_index_mutex);
2032 mutex_unlock(&zram_index_mutex);
2036 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
2038 static struct class_attribute class_attr_hot_add =
2039 __ATTR(hot_add, 0400, hot_add_show, NULL);
2041 static ssize_t hot_remove_store(struct class *class,
2042 struct class_attribute *attr,
2049 /* dev_id is gendisk->first_minor, which is `int' */
2050 ret = kstrtoint(buf, 10, &dev_id);
2056 mutex_lock(&zram_index_mutex);
2058 zram = idr_find(&zram_index_idr, dev_id);
2060 ret = zram_remove(zram);
2062 idr_remove(&zram_index_idr, dev_id);
2067 mutex_unlock(&zram_index_mutex);
2068 return ret ? ret : count;
2070 static CLASS_ATTR_WO(hot_remove);
2072 static struct attribute *zram_control_class_attrs[] = {
2073 &class_attr_hot_add.attr,
2074 &class_attr_hot_remove.attr,
2077 ATTRIBUTE_GROUPS(zram_control_class);
2079 static struct class zram_control_class = {
2080 .name = "zram-control",
2081 .owner = THIS_MODULE,
2082 .class_groups = zram_control_class_groups,
2085 static int zram_remove_cb(int id, void *ptr, void *data)
2087 WARN_ON_ONCE(zram_remove(ptr));
2091 static void destroy_devices(void)
2093 class_unregister(&zram_control_class);
2094 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
2095 zram_debugfs_destroy();
2096 idr_destroy(&zram_index_idr);
2097 unregister_blkdev(zram_major, "zram");
2098 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2101 static int __init zram_init(void)
2105 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2106 zcomp_cpu_up_prepare, zcomp_cpu_dead);
2110 ret = class_register(&zram_control_class);
2112 pr_err("Unable to register zram-control class\n");
2113 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2117 zram_debugfs_create();
2118 zram_major = register_blkdev(0, "zram");
2119 if (zram_major <= 0) {
2120 pr_err("Unable to get major number\n");
2121 class_unregister(&zram_control_class);
2122 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
2126 while (num_devices != 0) {
2127 mutex_lock(&zram_index_mutex);
2129 mutex_unlock(&zram_index_mutex);
2142 static void __exit zram_exit(void)
2147 module_init(zram_init);
2148 module_exit(zram_exit);
2150 module_param(num_devices, uint, 0);
2151 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
2153 MODULE_LICENSE("Dual BSD/GPL");
2154 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
2155 MODULE_DESCRIPTION("Compressed RAM Block Device");