2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/debugfs.h>
35 #include <linux/cpuhotplug.h>
39 static DEFINE_IDR(zram_index_idr);
40 /* idr index must be protected */
41 static DEFINE_MUTEX(zram_index_mutex);
43 static int zram_major;
44 static const char *default_compressor = "lzo";
46 /* Module params (documentation at end) */
47 static unsigned int num_devices = 1;
49 * Pages that compress to sizes equals or greater than this are stored
50 * uncompressed in memory.
52 static size_t huge_class_size;
54 static void zram_free_page(struct zram *zram, size_t index);
56 static void zram_slot_lock(struct zram *zram, u32 index)
58 bit_spin_lock(ZRAM_LOCK, &zram->table[index].value);
61 static void zram_slot_unlock(struct zram *zram, u32 index)
63 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].value);
66 static inline bool init_done(struct zram *zram)
68 return zram->disksize;
71 static inline bool zram_allocated(struct zram *zram, u32 index)
74 return (zram->table[index].value >> (ZRAM_FLAG_SHIFT + 1)) ||
75 zram->table[index].handle;
78 static inline struct zram *dev_to_zram(struct device *dev)
80 return (struct zram *)dev_to_disk(dev)->private_data;
83 static unsigned long zram_get_handle(struct zram *zram, u32 index)
85 return zram->table[index].handle;
88 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
90 zram->table[index].handle = handle;
93 /* flag operations require table entry bit_spin_lock() being held */
94 static bool zram_test_flag(struct zram *zram, u32 index,
95 enum zram_pageflags flag)
97 return zram->table[index].value & BIT(flag);
100 static void zram_set_flag(struct zram *zram, u32 index,
101 enum zram_pageflags flag)
103 zram->table[index].value |= BIT(flag);
106 static void zram_clear_flag(struct zram *zram, u32 index,
107 enum zram_pageflags flag)
109 zram->table[index].value &= ~BIT(flag);
112 static inline void zram_set_element(struct zram *zram, u32 index,
113 unsigned long element)
115 zram->table[index].element = element;
118 static unsigned long zram_get_element(struct zram *zram, u32 index)
120 return zram->table[index].element;
123 static size_t zram_get_obj_size(struct zram *zram, u32 index)
125 return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
128 static void zram_set_obj_size(struct zram *zram,
129 u32 index, size_t size)
131 unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
133 zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
136 #if PAGE_SIZE != 4096
137 static inline bool is_partial_io(struct bio_vec *bvec)
139 return bvec->bv_len != PAGE_SIZE;
142 static inline bool is_partial_io(struct bio_vec *bvec)
149 * Check if request is within bounds and aligned on zram logical blocks.
151 static inline bool valid_io_request(struct zram *zram,
152 sector_t start, unsigned int size)
156 /* unaligned request */
157 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
159 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
162 end = start + (size >> SECTOR_SHIFT);
163 bound = zram->disksize >> SECTOR_SHIFT;
164 /* out of range range */
165 if (unlikely(start >= bound || end > bound || start > end))
168 /* I/O request is valid */
172 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
174 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
175 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
178 static inline void update_used_max(struct zram *zram,
179 const unsigned long pages)
181 unsigned long old_max, cur_max;
183 old_max = atomic_long_read(&zram->stats.max_used_pages);
188 old_max = atomic_long_cmpxchg(
189 &zram->stats.max_used_pages, cur_max, pages);
190 } while (old_max != cur_max);
193 static inline void zram_fill_page(void *ptr, unsigned long len,
196 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
197 memset_l(ptr, value, len / sizeof(unsigned long));
200 static bool page_same_filled(void *ptr, unsigned long *element)
206 page = (unsigned long *)ptr;
209 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
210 if (val != page[pos])
219 static ssize_t initstate_show(struct device *dev,
220 struct device_attribute *attr, char *buf)
223 struct zram *zram = dev_to_zram(dev);
225 down_read(&zram->init_lock);
226 val = init_done(zram);
227 up_read(&zram->init_lock);
229 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
232 static ssize_t disksize_show(struct device *dev,
233 struct device_attribute *attr, char *buf)
235 struct zram *zram = dev_to_zram(dev);
237 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
240 static ssize_t mem_limit_store(struct device *dev,
241 struct device_attribute *attr, const char *buf, size_t len)
245 struct zram *zram = dev_to_zram(dev);
247 limit = memparse(buf, &tmp);
248 if (buf == tmp) /* no chars parsed, invalid input */
251 down_write(&zram->init_lock);
252 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
253 up_write(&zram->init_lock);
258 static ssize_t mem_used_max_store(struct device *dev,
259 struct device_attribute *attr, const char *buf, size_t len)
263 struct zram *zram = dev_to_zram(dev);
265 err = kstrtoul(buf, 10, &val);
269 down_read(&zram->init_lock);
270 if (init_done(zram)) {
271 atomic_long_set(&zram->stats.max_used_pages,
272 zs_get_total_pages(zram->mem_pool));
274 up_read(&zram->init_lock);
279 #ifdef CONFIG_ZRAM_WRITEBACK
280 static bool zram_wb_enabled(struct zram *zram)
282 return zram->backing_dev;
285 static void reset_bdev(struct zram *zram)
287 struct block_device *bdev;
289 if (!zram_wb_enabled(zram))
293 if (zram->old_block_size)
294 set_blocksize(bdev, zram->old_block_size);
295 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
296 /* hope filp_close flush all of IO */
297 filp_close(zram->backing_dev, NULL);
298 zram->backing_dev = NULL;
299 zram->old_block_size = 0;
301 zram->disk->queue->backing_dev_info->capabilities |=
302 BDI_CAP_SYNCHRONOUS_IO;
303 kvfree(zram->bitmap);
307 static ssize_t backing_dev_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
310 struct zram *zram = dev_to_zram(dev);
311 struct file *file = zram->backing_dev;
315 down_read(&zram->init_lock);
316 if (!zram_wb_enabled(zram)) {
317 memcpy(buf, "none\n", 5);
318 up_read(&zram->init_lock);
322 p = file_path(file, buf, PAGE_SIZE - 1);
329 memmove(buf, p, ret);
332 up_read(&zram->init_lock);
336 static ssize_t backing_dev_store(struct device *dev,
337 struct device_attribute *attr, const char *buf, size_t len)
340 struct file *backing_dev = NULL;
342 struct address_space *mapping;
343 unsigned int bitmap_sz, old_block_size = 0;
344 unsigned long nr_pages, *bitmap = NULL;
345 struct block_device *bdev = NULL;
347 struct zram *zram = dev_to_zram(dev);
349 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
353 down_write(&zram->init_lock);
354 if (init_done(zram)) {
355 pr_info("Can't setup backing device for initialized device\n");
360 strlcpy(file_name, buf, len);
362 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
363 if (IS_ERR(backing_dev)) {
364 err = PTR_ERR(backing_dev);
369 mapping = backing_dev->f_mapping;
370 inode = mapping->host;
372 /* Support only block device in this moment */
373 if (!S_ISBLK(inode->i_mode)) {
378 bdev = bdgrab(I_BDEV(inode));
379 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
383 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
384 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
385 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
391 old_block_size = block_size(bdev);
392 err = set_blocksize(bdev, PAGE_SIZE);
397 spin_lock_init(&zram->bitmap_lock);
399 zram->old_block_size = old_block_size;
401 zram->backing_dev = backing_dev;
402 zram->bitmap = bitmap;
403 zram->nr_pages = nr_pages;
405 * With writeback feature, zram does asynchronous IO so it's no longer
406 * synchronous device so let's remove synchronous io flag. Othewise,
407 * upper layer(e.g., swap) could wait IO completion rather than
408 * (submit and return), which will cause system sluggish.
409 * Furthermore, when the IO function returns(e.g., swap_readpage),
410 * upper layer expects IO was done so it could deallocate the page
411 * freely but in fact, IO is going on so finally could cause
412 * use-after-free when the IO is really done.
414 zram->disk->queue->backing_dev_info->capabilities &=
415 ~BDI_CAP_SYNCHRONOUS_IO;
416 up_write(&zram->init_lock);
418 pr_info("setup backing device %s\n", file_name);
427 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
430 filp_close(backing_dev, NULL);
432 up_write(&zram->init_lock);
439 static unsigned long get_entry_bdev(struct zram *zram)
443 spin_lock(&zram->bitmap_lock);
444 /* skip 0 bit to confuse zram.handle = 0 */
445 entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
446 if (entry == zram->nr_pages) {
447 spin_unlock(&zram->bitmap_lock);
451 set_bit(entry, zram->bitmap);
452 spin_unlock(&zram->bitmap_lock);
457 static void put_entry_bdev(struct zram *zram, unsigned long entry)
461 spin_lock(&zram->bitmap_lock);
462 was_set = test_and_clear_bit(entry, zram->bitmap);
463 spin_unlock(&zram->bitmap_lock);
464 WARN_ON_ONCE(!was_set);
467 static void zram_page_end_io(struct bio *bio)
469 struct page *page = bio_first_page_all(bio);
471 page_endio(page, op_is_write(bio_op(bio)),
472 blk_status_to_errno(bio->bi_status));
477 * Returns 1 if the submission is successful.
479 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
480 unsigned long entry, struct bio *parent)
484 bio = bio_alloc(GFP_ATOMIC, 1);
488 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
489 bio_set_dev(bio, zram->bdev);
490 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
496 bio->bi_opf = REQ_OP_READ;
497 bio->bi_end_io = zram_page_end_io;
499 bio->bi_opf = parent->bi_opf;
500 bio_chain(bio, parent);
508 struct work_struct work;
514 #if PAGE_SIZE != 4096
515 static void zram_sync_read(struct work_struct *work)
518 struct zram_work *zw = container_of(work, struct zram_work, work);
519 struct zram *zram = zw->zram;
520 unsigned long entry = zw->entry;
521 struct bio *bio = zw->bio;
523 read_from_bdev_async(zram, &bvec, entry, bio);
527 * Block layer want one ->make_request_fn to be active at a time
528 * so if we use chained IO with parent IO in same context,
529 * it's a deadlock. To avoid, it, it uses worker thread context.
531 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
532 unsigned long entry, struct bio *bio)
534 struct zram_work work;
540 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
541 queue_work(system_unbound_wq, &work.work);
542 flush_work(&work.work);
543 destroy_work_on_stack(&work.work);
548 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
549 unsigned long entry, struct bio *bio)
556 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
557 unsigned long entry, struct bio *parent, bool sync)
560 return read_from_bdev_sync(zram, bvec, entry, parent);
562 return read_from_bdev_async(zram, bvec, entry, parent);
565 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
566 u32 index, struct bio *parent,
567 unsigned long *pentry)
572 bio = bio_alloc(GFP_ATOMIC, 1);
576 entry = get_entry_bdev(zram);
582 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
583 bio_set_dev(bio, zram->bdev);
584 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
587 put_entry_bdev(zram, entry);
592 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
593 bio->bi_end_io = zram_page_end_io;
595 bio->bi_opf = parent->bi_opf;
596 bio_chain(bio, parent);
605 static void zram_wb_clear(struct zram *zram, u32 index)
609 zram_clear_flag(zram, index, ZRAM_WB);
610 entry = zram_get_element(zram, index);
611 zram_set_element(zram, index, 0);
612 put_entry_bdev(zram, entry);
616 static bool zram_wb_enabled(struct zram *zram) { return false; }
617 static inline void reset_bdev(struct zram *zram) {};
618 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
619 u32 index, struct bio *parent,
620 unsigned long *pentry)
626 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
627 unsigned long entry, struct bio *parent, bool sync)
631 static void zram_wb_clear(struct zram *zram, u32 index) {}
634 #ifdef CONFIG_ZRAM_MEMORY_TRACKING
636 static struct dentry *zram_debugfs_root;
638 static void zram_debugfs_create(void)
640 zram_debugfs_root = debugfs_create_dir("zram", NULL);
643 static void zram_debugfs_destroy(void)
645 debugfs_remove_recursive(zram_debugfs_root);
648 static void zram_accessed(struct zram *zram, u32 index)
650 zram->table[index].ac_time = ktime_get_boottime();
653 static void zram_reset_access(struct zram *zram, u32 index)
655 zram->table[index].ac_time = 0;
658 static ssize_t read_block_state(struct file *file, char __user *buf,
659 size_t count, loff_t *ppos)
662 ssize_t index, written = 0;
663 struct zram *zram = file->private_data;
664 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
665 struct timespec64 ts;
667 kbuf = kvmalloc(count, GFP_KERNEL);
671 down_read(&zram->init_lock);
672 if (!init_done(zram)) {
673 up_read(&zram->init_lock);
678 for (index = *ppos; index < nr_pages; index++) {
681 zram_slot_lock(zram, index);
682 if (!zram_allocated(zram, index))
685 ts = ktime_to_timespec64(zram->table[index].ac_time);
686 copied = snprintf(kbuf + written, count,
687 "%12zd %12lld.%06lu %c%c%c\n",
688 index, (s64)ts.tv_sec,
689 ts.tv_nsec / NSEC_PER_USEC,
690 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
691 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
692 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.');
694 if (count < copied) {
695 zram_slot_unlock(zram, index);
701 zram_slot_unlock(zram, index);
705 up_read(&zram->init_lock);
706 if (copy_to_user(buf, kbuf, written))
713 static const struct file_operations proc_zram_block_state_op = {
715 .read = read_block_state,
716 .llseek = default_llseek,
719 static void zram_debugfs_register(struct zram *zram)
721 if (!zram_debugfs_root)
724 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
726 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
727 zram, &proc_zram_block_state_op);
730 static void zram_debugfs_unregister(struct zram *zram)
732 debugfs_remove_recursive(zram->debugfs_dir);
735 static void zram_debugfs_create(void) {};
736 static void zram_debugfs_destroy(void) {};
737 static void zram_accessed(struct zram *zram, u32 index) {};
738 static void zram_reset_access(struct zram *zram, u32 index) {};
739 static void zram_debugfs_register(struct zram *zram) {};
740 static void zram_debugfs_unregister(struct zram *zram) {};
744 * We switched to per-cpu streams and this attr is not needed anymore.
745 * However, we will keep it around for some time, because:
746 * a) we may revert per-cpu streams in the future
747 * b) it's visible to user space and we need to follow our 2 years
748 * retirement rule; but we already have a number of 'soon to be
749 * altered' attrs, so max_comp_streams need to wait for the next
752 static ssize_t max_comp_streams_show(struct device *dev,
753 struct device_attribute *attr, char *buf)
755 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
758 static ssize_t max_comp_streams_store(struct device *dev,
759 struct device_attribute *attr, const char *buf, size_t len)
764 static ssize_t comp_algorithm_show(struct device *dev,
765 struct device_attribute *attr, char *buf)
768 struct zram *zram = dev_to_zram(dev);
770 down_read(&zram->init_lock);
771 sz = zcomp_available_show(zram->compressor, buf);
772 up_read(&zram->init_lock);
777 static ssize_t comp_algorithm_store(struct device *dev,
778 struct device_attribute *attr, const char *buf, size_t len)
780 struct zram *zram = dev_to_zram(dev);
781 char compressor[ARRAY_SIZE(zram->compressor)];
784 strlcpy(compressor, buf, sizeof(compressor));
785 /* ignore trailing newline */
786 sz = strlen(compressor);
787 if (sz > 0 && compressor[sz - 1] == '\n')
788 compressor[sz - 1] = 0x00;
790 if (!zcomp_available_algorithm(compressor))
793 down_write(&zram->init_lock);
794 if (init_done(zram)) {
795 up_write(&zram->init_lock);
796 pr_info("Can't change algorithm for initialized device\n");
800 strcpy(zram->compressor, compressor);
801 up_write(&zram->init_lock);
805 static ssize_t compact_store(struct device *dev,
806 struct device_attribute *attr, const char *buf, size_t len)
808 struct zram *zram = dev_to_zram(dev);
810 down_read(&zram->init_lock);
811 if (!init_done(zram)) {
812 up_read(&zram->init_lock);
816 zs_compact(zram->mem_pool);
817 up_read(&zram->init_lock);
822 static ssize_t io_stat_show(struct device *dev,
823 struct device_attribute *attr, char *buf)
825 struct zram *zram = dev_to_zram(dev);
828 down_read(&zram->init_lock);
829 ret = scnprintf(buf, PAGE_SIZE,
830 "%8llu %8llu %8llu %8llu\n",
831 (u64)atomic64_read(&zram->stats.failed_reads),
832 (u64)atomic64_read(&zram->stats.failed_writes),
833 (u64)atomic64_read(&zram->stats.invalid_io),
834 (u64)atomic64_read(&zram->stats.notify_free));
835 up_read(&zram->init_lock);
840 static ssize_t mm_stat_show(struct device *dev,
841 struct device_attribute *attr, char *buf)
843 struct zram *zram = dev_to_zram(dev);
844 struct zs_pool_stats pool_stats;
845 u64 orig_size, mem_used = 0;
849 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
851 down_read(&zram->init_lock);
852 if (init_done(zram)) {
853 mem_used = zs_get_total_pages(zram->mem_pool);
854 zs_pool_stats(zram->mem_pool, &pool_stats);
857 orig_size = atomic64_read(&zram->stats.pages_stored);
858 max_used = atomic_long_read(&zram->stats.max_used_pages);
860 ret = scnprintf(buf, PAGE_SIZE,
861 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
862 orig_size << PAGE_SHIFT,
863 (u64)atomic64_read(&zram->stats.compr_data_size),
864 mem_used << PAGE_SHIFT,
865 zram->limit_pages << PAGE_SHIFT,
866 max_used << PAGE_SHIFT,
867 (u64)atomic64_read(&zram->stats.same_pages),
868 pool_stats.pages_compacted,
869 (u64)atomic64_read(&zram->stats.huge_pages));
870 up_read(&zram->init_lock);
875 static ssize_t debug_stat_show(struct device *dev,
876 struct device_attribute *attr, char *buf)
879 struct zram *zram = dev_to_zram(dev);
882 down_read(&zram->init_lock);
883 ret = scnprintf(buf, PAGE_SIZE,
884 "version: %d\n%8llu\n",
886 (u64)atomic64_read(&zram->stats.writestall));
887 up_read(&zram->init_lock);
892 static DEVICE_ATTR_RO(io_stat);
893 static DEVICE_ATTR_RO(mm_stat);
894 static DEVICE_ATTR_RO(debug_stat);
896 static void zram_meta_free(struct zram *zram, u64 disksize)
898 size_t num_pages = disksize >> PAGE_SHIFT;
901 /* Free all pages that are still in this zram device */
902 for (index = 0; index < num_pages; index++)
903 zram_free_page(zram, index);
905 zs_destroy_pool(zram->mem_pool);
909 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
913 num_pages = disksize >> PAGE_SHIFT;
914 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
918 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
919 if (!zram->mem_pool) {
924 if (!huge_class_size)
925 huge_class_size = zs_huge_class_size(zram->mem_pool);
930 * To protect concurrent access to the same index entry,
931 * caller should hold this table index entry's bit_spinlock to
932 * indicate this index entry is accessing.
934 static void zram_free_page(struct zram *zram, size_t index)
936 unsigned long handle;
938 zram_reset_access(zram, index);
940 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
941 zram_clear_flag(zram, index, ZRAM_HUGE);
942 atomic64_dec(&zram->stats.huge_pages);
945 if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
946 zram_wb_clear(zram, index);
947 atomic64_dec(&zram->stats.pages_stored);
952 * No memory is allocated for same element filled pages.
953 * Simply clear same page flag.
955 if (zram_test_flag(zram, index, ZRAM_SAME)) {
956 zram_clear_flag(zram, index, ZRAM_SAME);
957 zram_set_element(zram, index, 0);
958 atomic64_dec(&zram->stats.same_pages);
959 atomic64_dec(&zram->stats.pages_stored);
963 handle = zram_get_handle(zram, index);
967 zs_free(zram->mem_pool, handle);
969 atomic64_sub(zram_get_obj_size(zram, index),
970 &zram->stats.compr_data_size);
971 atomic64_dec(&zram->stats.pages_stored);
973 zram_set_handle(zram, index, 0);
974 zram_set_obj_size(zram, index, 0);
977 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
978 struct bio *bio, bool partial_io)
981 unsigned long handle;
985 if (zram_wb_enabled(zram)) {
986 zram_slot_lock(zram, index);
987 if (zram_test_flag(zram, index, ZRAM_WB)) {
990 zram_slot_unlock(zram, index);
993 bvec.bv_len = PAGE_SIZE;
995 return read_from_bdev(zram, &bvec,
996 zram_get_element(zram, index),
999 zram_slot_unlock(zram, index);
1002 zram_slot_lock(zram, index);
1003 handle = zram_get_handle(zram, index);
1004 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1005 unsigned long value;
1008 value = handle ? zram_get_element(zram, index) : 0;
1009 mem = kmap_atomic(page);
1010 zram_fill_page(mem, PAGE_SIZE, value);
1012 zram_slot_unlock(zram, index);
1016 size = zram_get_obj_size(zram, index);
1018 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1019 if (size == PAGE_SIZE) {
1020 dst = kmap_atomic(page);
1021 memcpy(dst, src, PAGE_SIZE);
1025 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
1027 dst = kmap_atomic(page);
1028 ret = zcomp_decompress(zstrm, src, size, dst);
1030 zcomp_stream_put(zram->comp);
1032 zs_unmap_object(zram->mem_pool, handle);
1033 zram_slot_unlock(zram, index);
1035 /* Should NEVER happen. Return bio error if it does. */
1037 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
1042 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1043 u32 index, int offset, struct bio *bio)
1048 page = bvec->bv_page;
1049 if (is_partial_io(bvec)) {
1050 /* Use a temporary buffer to decompress the page */
1051 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1056 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
1060 if (is_partial_io(bvec)) {
1061 void *dst = kmap_atomic(bvec->bv_page);
1062 void *src = kmap_atomic(page);
1064 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
1069 if (is_partial_io(bvec))
1075 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1076 u32 index, struct bio *bio)
1079 unsigned long alloced_pages;
1080 unsigned long handle = 0;
1081 unsigned int comp_len = 0;
1082 void *src, *dst, *mem;
1083 struct zcomp_strm *zstrm;
1084 struct page *page = bvec->bv_page;
1085 unsigned long element = 0;
1086 enum zram_pageflags flags = 0;
1087 bool allow_wb = true;
1089 mem = kmap_atomic(page);
1090 if (page_same_filled(mem, &element)) {
1092 /* Free memory associated with this sector now. */
1094 atomic64_inc(&zram->stats.same_pages);
1100 zstrm = zcomp_stream_get(zram->comp);
1101 src = kmap_atomic(page);
1102 ret = zcomp_compress(zstrm, src, &comp_len);
1105 if (unlikely(ret)) {
1106 zcomp_stream_put(zram->comp);
1107 pr_err("Compression failed! err=%d\n", ret);
1108 zs_free(zram->mem_pool, handle);
1112 if (unlikely(comp_len >= huge_class_size)) {
1113 comp_len = PAGE_SIZE;
1114 if (zram_wb_enabled(zram) && allow_wb) {
1115 zcomp_stream_put(zram->comp);
1116 ret = write_to_bdev(zram, bvec, index, bio, &element);
1123 goto compress_again;
1128 * handle allocation has 2 paths:
1129 * a) fast path is executed with preemption disabled (for
1130 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1131 * since we can't sleep;
1132 * b) slow path enables preemption and attempts to allocate
1133 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1134 * put per-cpu compression stream and, thus, to re-do
1135 * the compression once handle is allocated.
1137 * if we have a 'non-null' handle here then we are coming
1138 * from the slow path and handle has already been allocated.
1141 handle = zs_malloc(zram->mem_pool, comp_len,
1142 __GFP_KSWAPD_RECLAIM |
1147 zcomp_stream_put(zram->comp);
1148 atomic64_inc(&zram->stats.writestall);
1149 handle = zs_malloc(zram->mem_pool, comp_len,
1150 GFP_NOIO | __GFP_HIGHMEM |
1153 goto compress_again;
1157 alloced_pages = zs_get_total_pages(zram->mem_pool);
1158 update_used_max(zram, alloced_pages);
1160 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1161 zcomp_stream_put(zram->comp);
1162 zs_free(zram->mem_pool, handle);
1166 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1168 src = zstrm->buffer;
1169 if (comp_len == PAGE_SIZE)
1170 src = kmap_atomic(page);
1171 memcpy(dst, src, comp_len);
1172 if (comp_len == PAGE_SIZE)
1175 zcomp_stream_put(zram->comp);
1176 zs_unmap_object(zram->mem_pool, handle);
1177 atomic64_add(comp_len, &zram->stats.compr_data_size);
1180 * Free memory associated with this sector
1181 * before overwriting unused sectors.
1183 zram_slot_lock(zram, index);
1184 zram_free_page(zram, index);
1186 if (comp_len == PAGE_SIZE) {
1187 zram_set_flag(zram, index, ZRAM_HUGE);
1188 atomic64_inc(&zram->stats.huge_pages);
1192 zram_set_flag(zram, index, flags);
1193 zram_set_element(zram, index, element);
1195 zram_set_handle(zram, index, handle);
1196 zram_set_obj_size(zram, index, comp_len);
1198 zram_slot_unlock(zram, index);
1201 atomic64_inc(&zram->stats.pages_stored);
1205 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1206 u32 index, int offset, struct bio *bio)
1209 struct page *page = NULL;
1214 if (is_partial_io(bvec)) {
1217 * This is a partial IO. We need to read the full page
1218 * before to write the changes.
1220 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1224 ret = __zram_bvec_read(zram, page, index, bio, true);
1228 src = kmap_atomic(bvec->bv_page);
1229 dst = kmap_atomic(page);
1230 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1235 vec.bv_len = PAGE_SIZE;
1239 ret = __zram_bvec_write(zram, &vec, index, bio);
1241 if (is_partial_io(bvec))
1247 * zram_bio_discard - handler on discard request
1248 * @index: physical block index in PAGE_SIZE units
1249 * @offset: byte offset within physical block
1251 static void zram_bio_discard(struct zram *zram, u32 index,
1252 int offset, struct bio *bio)
1254 size_t n = bio->bi_iter.bi_size;
1257 * zram manages data in physical block size units. Because logical block
1258 * size isn't identical with physical block size on some arch, we
1259 * could get a discard request pointing to a specific offset within a
1260 * certain physical block. Although we can handle this request by
1261 * reading that physiclal block and decompressing and partially zeroing
1262 * and re-compressing and then re-storing it, this isn't reasonable
1263 * because our intent with a discard request is to save memory. So
1264 * skipping this logical block is appropriate here.
1267 if (n <= (PAGE_SIZE - offset))
1270 n -= (PAGE_SIZE - offset);
1274 while (n >= PAGE_SIZE) {
1275 zram_slot_lock(zram, index);
1276 zram_free_page(zram, index);
1277 zram_slot_unlock(zram, index);
1278 atomic64_inc(&zram->stats.notify_free);
1285 * Returns errno if it has some problem. Otherwise return 0 or 1.
1286 * Returns 0 if IO request was done synchronously
1287 * Returns 1 if IO request was successfully submitted.
1289 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1290 int offset, bool is_write, struct bio *bio)
1292 unsigned long start_time = jiffies;
1293 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
1294 struct request_queue *q = zram->disk->queue;
1297 generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
1298 &zram->disk->part0);
1301 atomic64_inc(&zram->stats.num_reads);
1302 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1303 flush_dcache_page(bvec->bv_page);
1305 atomic64_inc(&zram->stats.num_writes);
1306 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1309 generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
1311 zram_slot_lock(zram, index);
1312 zram_accessed(zram, index);
1313 zram_slot_unlock(zram, index);
1315 if (unlikely(ret < 0)) {
1317 atomic64_inc(&zram->stats.failed_reads);
1319 atomic64_inc(&zram->stats.failed_writes);
1325 static void __zram_make_request(struct zram *zram, struct bio *bio)
1329 struct bio_vec bvec;
1330 struct bvec_iter iter;
1332 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1333 offset = (bio->bi_iter.bi_sector &
1334 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1336 switch (bio_op(bio)) {
1337 case REQ_OP_DISCARD:
1338 case REQ_OP_WRITE_ZEROES:
1339 zram_bio_discard(zram, index, offset, bio);
1346 bio_for_each_segment(bvec, bio, iter) {
1347 struct bio_vec bv = bvec;
1348 unsigned int unwritten = bvec.bv_len;
1351 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1353 if (zram_bvec_rw(zram, &bv, index, offset,
1354 op_is_write(bio_op(bio)), bio) < 0)
1357 bv.bv_offset += bv.bv_len;
1358 unwritten -= bv.bv_len;
1360 update_position(&index, &offset, &bv);
1361 } while (unwritten);
1372 * Handler function for all zram I/O requests.
1374 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1376 struct zram *zram = queue->queuedata;
1378 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1379 bio->bi_iter.bi_size)) {
1380 atomic64_inc(&zram->stats.invalid_io);
1384 __zram_make_request(zram, bio);
1385 return BLK_QC_T_NONE;
1389 return BLK_QC_T_NONE;
1392 static void zram_slot_free_notify(struct block_device *bdev,
1393 unsigned long index)
1397 zram = bdev->bd_disk->private_data;
1399 zram_slot_lock(zram, index);
1400 zram_free_page(zram, index);
1401 zram_slot_unlock(zram, index);
1402 atomic64_inc(&zram->stats.notify_free);
1405 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1406 struct page *page, bool is_write)
1413 if (PageTransHuge(page))
1415 zram = bdev->bd_disk->private_data;
1417 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1418 atomic64_inc(&zram->stats.invalid_io);
1423 index = sector >> SECTORS_PER_PAGE_SHIFT;
1424 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1427 bv.bv_len = PAGE_SIZE;
1430 ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
1433 * If I/O fails, just return error(ie, non-zero) without
1434 * calling page_endio.
1435 * It causes resubmit the I/O with bio request by upper functions
1436 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1437 * bio->bi_end_io does things to handle the error
1438 * (e.g., SetPageError, set_page_dirty and extra works).
1440 if (unlikely(ret < 0))
1445 page_endio(page, is_write, 0);
1456 static void zram_reset_device(struct zram *zram)
1461 down_write(&zram->init_lock);
1463 zram->limit_pages = 0;
1465 if (!init_done(zram)) {
1466 up_write(&zram->init_lock);
1471 disksize = zram->disksize;
1474 set_capacity(zram->disk, 0);
1475 part_stat_set_all(&zram->disk->part0, 0);
1477 up_write(&zram->init_lock);
1478 /* I/O operation under all of CPU are done so let's free */
1479 zram_meta_free(zram, disksize);
1480 memset(&zram->stats, 0, sizeof(zram->stats));
1481 zcomp_destroy(comp);
1485 static ssize_t disksize_store(struct device *dev,
1486 struct device_attribute *attr, const char *buf, size_t len)
1490 struct zram *zram = dev_to_zram(dev);
1493 disksize = memparse(buf, NULL);
1497 down_write(&zram->init_lock);
1498 if (init_done(zram)) {
1499 pr_info("Cannot change disksize for initialized device\n");
1504 disksize = PAGE_ALIGN(disksize);
1505 if (!zram_meta_alloc(zram, disksize)) {
1510 comp = zcomp_create(zram->compressor);
1512 pr_err("Cannot initialise %s compressing backend\n",
1514 err = PTR_ERR(comp);
1519 zram->disksize = disksize;
1520 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1522 revalidate_disk(zram->disk);
1523 up_write(&zram->init_lock);
1528 zram_meta_free(zram, disksize);
1530 up_write(&zram->init_lock);
1534 static ssize_t reset_store(struct device *dev,
1535 struct device_attribute *attr, const char *buf, size_t len)
1538 unsigned short do_reset;
1540 struct block_device *bdev;
1542 ret = kstrtou16(buf, 10, &do_reset);
1549 zram = dev_to_zram(dev);
1550 bdev = bdget_disk(zram->disk, 0);
1554 mutex_lock(&bdev->bd_mutex);
1555 /* Do not reset an active device or claimed device */
1556 if (bdev->bd_openers || zram->claim) {
1557 mutex_unlock(&bdev->bd_mutex);
1562 /* From now on, anyone can't open /dev/zram[0-9] */
1564 mutex_unlock(&bdev->bd_mutex);
1566 /* Make sure all the pending I/O are finished */
1568 zram_reset_device(zram);
1569 revalidate_disk(zram->disk);
1572 mutex_lock(&bdev->bd_mutex);
1573 zram->claim = false;
1574 mutex_unlock(&bdev->bd_mutex);
1579 static int zram_open(struct block_device *bdev, fmode_t mode)
1584 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1586 zram = bdev->bd_disk->private_data;
1587 /* zram was claimed to reset so open request fails */
1594 static const struct block_device_operations zram_devops = {
1596 .swap_slot_free_notify = zram_slot_free_notify,
1597 .rw_page = zram_rw_page,
1598 .owner = THIS_MODULE
1601 static DEVICE_ATTR_WO(compact);
1602 static DEVICE_ATTR_RW(disksize);
1603 static DEVICE_ATTR_RO(initstate);
1604 static DEVICE_ATTR_WO(reset);
1605 static DEVICE_ATTR_WO(mem_limit);
1606 static DEVICE_ATTR_WO(mem_used_max);
1607 static DEVICE_ATTR_RW(max_comp_streams);
1608 static DEVICE_ATTR_RW(comp_algorithm);
1609 #ifdef CONFIG_ZRAM_WRITEBACK
1610 static DEVICE_ATTR_RW(backing_dev);
1613 static struct attribute *zram_disk_attrs[] = {
1614 &dev_attr_disksize.attr,
1615 &dev_attr_initstate.attr,
1616 &dev_attr_reset.attr,
1617 &dev_attr_compact.attr,
1618 &dev_attr_mem_limit.attr,
1619 &dev_attr_mem_used_max.attr,
1620 &dev_attr_max_comp_streams.attr,
1621 &dev_attr_comp_algorithm.attr,
1622 #ifdef CONFIG_ZRAM_WRITEBACK
1623 &dev_attr_backing_dev.attr,
1625 &dev_attr_io_stat.attr,
1626 &dev_attr_mm_stat.attr,
1627 &dev_attr_debug_stat.attr,
1631 static const struct attribute_group zram_disk_attr_group = {
1632 .attrs = zram_disk_attrs,
1636 * Allocate and initialize new zram device. the function returns
1637 * '>= 0' device_id upon success, and negative value otherwise.
1639 static int zram_add(void)
1642 struct request_queue *queue;
1645 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1649 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1654 init_rwsem(&zram->init_lock);
1656 queue = blk_alloc_queue(GFP_KERNEL);
1658 pr_err("Error allocating disk queue for device %d\n",
1664 blk_queue_make_request(queue, zram_make_request);
1666 /* gendisk structure */
1667 zram->disk = alloc_disk(1);
1669 pr_err("Error allocating disk structure for device %d\n",
1672 goto out_free_queue;
1675 zram->disk->major = zram_major;
1676 zram->disk->first_minor = device_id;
1677 zram->disk->fops = &zram_devops;
1678 zram->disk->queue = queue;
1679 zram->disk->queue->queuedata = zram;
1680 zram->disk->private_data = zram;
1681 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1683 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1684 set_capacity(zram->disk, 0);
1685 /* zram devices sort of resembles non-rotational disks */
1686 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
1687 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1690 * To ensure that we always get PAGE_SIZE aligned
1691 * and n*PAGE_SIZED sized I/O requests.
1693 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1694 blk_queue_logical_block_size(zram->disk->queue,
1695 ZRAM_LOGICAL_BLOCK_SIZE);
1696 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1697 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1698 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1699 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1700 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);
1703 * zram_bio_discard() will clear all logical blocks if logical block
1704 * size is identical with physical block size(PAGE_SIZE). But if it is
1705 * different, we will skip discarding some parts of logical blocks in
1706 * the part of the request range which isn't aligned to physical block
1707 * size. So we can't ensure that all discarded logical blocks are
1710 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1711 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1713 zram->disk->queue->backing_dev_info->capabilities |=
1714 (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
1715 add_disk(zram->disk);
1717 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1718 &zram_disk_attr_group);
1720 pr_err("Error creating sysfs group for device %d\n",
1724 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1726 zram_debugfs_register(zram);
1727 pr_info("Added device: %s\n", zram->disk->disk_name);
1731 del_gendisk(zram->disk);
1732 put_disk(zram->disk);
1734 blk_cleanup_queue(queue);
1736 idr_remove(&zram_index_idr, device_id);
1742 static int zram_remove(struct zram *zram)
1744 struct block_device *bdev;
1746 bdev = bdget_disk(zram->disk, 0);
1750 mutex_lock(&bdev->bd_mutex);
1751 if (bdev->bd_openers || zram->claim) {
1752 mutex_unlock(&bdev->bd_mutex);
1758 mutex_unlock(&bdev->bd_mutex);
1760 zram_debugfs_unregister(zram);
1762 * Remove sysfs first, so no one will perform a disksize
1763 * store while we destroy the devices. This also helps during
1764 * hot_remove -- zram_reset_device() is the last holder of
1765 * ->init_lock, no later/concurrent disksize_store() or any
1766 * other sysfs handlers are possible.
1768 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1769 &zram_disk_attr_group);
1771 /* Make sure all the pending I/O are finished */
1773 zram_reset_device(zram);
1776 pr_info("Removed device: %s\n", zram->disk->disk_name);
1778 del_gendisk(zram->disk);
1779 blk_cleanup_queue(zram->disk->queue);
1780 put_disk(zram->disk);
1785 /* zram-control sysfs attributes */
1788 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1789 * sense that reading from this file does alter the state of your system -- it
1790 * creates a new un-initialized zram device and returns back this device's
1791 * device_id (or an error code if it fails to create a new device).
1793 static ssize_t hot_add_show(struct class *class,
1794 struct class_attribute *attr,
1799 mutex_lock(&zram_index_mutex);
1801 mutex_unlock(&zram_index_mutex);
1805 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1807 static CLASS_ATTR_RO(hot_add);
1809 static ssize_t hot_remove_store(struct class *class,
1810 struct class_attribute *attr,
1817 /* dev_id is gendisk->first_minor, which is `int' */
1818 ret = kstrtoint(buf, 10, &dev_id);
1824 mutex_lock(&zram_index_mutex);
1826 zram = idr_find(&zram_index_idr, dev_id);
1828 ret = zram_remove(zram);
1830 idr_remove(&zram_index_idr, dev_id);
1835 mutex_unlock(&zram_index_mutex);
1836 return ret ? ret : count;
1838 static CLASS_ATTR_WO(hot_remove);
1840 static struct attribute *zram_control_class_attrs[] = {
1841 &class_attr_hot_add.attr,
1842 &class_attr_hot_remove.attr,
1845 ATTRIBUTE_GROUPS(zram_control_class);
1847 static struct class zram_control_class = {
1848 .name = "zram-control",
1849 .owner = THIS_MODULE,
1850 .class_groups = zram_control_class_groups,
1853 static int zram_remove_cb(int id, void *ptr, void *data)
1859 static void destroy_devices(void)
1861 class_unregister(&zram_control_class);
1862 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1863 zram_debugfs_destroy();
1864 idr_destroy(&zram_index_idr);
1865 unregister_blkdev(zram_major, "zram");
1866 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1869 static int __init zram_init(void)
1873 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1874 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1878 ret = class_register(&zram_control_class);
1880 pr_err("Unable to register zram-control class\n");
1881 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1885 zram_debugfs_create();
1886 zram_major = register_blkdev(0, "zram");
1887 if (zram_major <= 0) {
1888 pr_err("Unable to get major number\n");
1889 class_unregister(&zram_control_class);
1890 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1894 while (num_devices != 0) {
1895 mutex_lock(&zram_index_mutex);
1897 mutex_unlock(&zram_index_mutex);
1910 static void __exit zram_exit(void)
1915 module_init(zram_init);
1916 module_exit(zram_exit);
1918 module_param(num_devices, uint, 0);
1919 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1921 MODULE_LICENSE("Dual BSD/GPL");
1922 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1923 MODULE_DESCRIPTION("Compressed RAM Block Device");