2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/backing-dev.h>
29 #include <linux/string.h>
30 #include <linux/vmalloc.h>
31 #include <linux/err.h>
32 #include <linux/idr.h>
33 #include <linux/sysfs.h>
34 #include <linux/cpuhotplug.h>
38 static DEFINE_IDR(zram_index_idr);
39 /* idr index must be protected */
40 static DEFINE_MUTEX(zram_index_mutex);
42 static int zram_major;
43 static const char *default_compressor = "lzo";
45 /* Module params (documentation at end) */
46 static unsigned int num_devices = 1;
48 static void zram_free_page(struct zram *zram, size_t index);
50 static inline bool init_done(struct zram *zram)
52 return zram->disksize;
55 static inline struct zram *dev_to_zram(struct device *dev)
57 return (struct zram *)dev_to_disk(dev)->private_data;
60 static unsigned long zram_get_handle(struct zram *zram, u32 index)
62 return zram->table[index].handle;
65 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
67 zram->table[index].handle = handle;
70 /* flag operations require table entry bit_spin_lock() being held */
71 static int zram_test_flag(struct zram *zram, u32 index,
72 enum zram_pageflags flag)
74 return zram->table[index].value & BIT(flag);
77 static void zram_set_flag(struct zram *zram, u32 index,
78 enum zram_pageflags flag)
80 zram->table[index].value |= BIT(flag);
83 static void zram_clear_flag(struct zram *zram, u32 index,
84 enum zram_pageflags flag)
86 zram->table[index].value &= ~BIT(flag);
89 static inline void zram_set_element(struct zram *zram, u32 index,
90 unsigned long element)
92 zram->table[index].element = element;
95 static unsigned long zram_get_element(struct zram *zram, u32 index)
97 return zram->table[index].element;
100 static size_t zram_get_obj_size(struct zram *zram, u32 index)
102 return zram->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
105 static void zram_set_obj_size(struct zram *zram,
106 u32 index, size_t size)
108 unsigned long flags = zram->table[index].value >> ZRAM_FLAG_SHIFT;
110 zram->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
113 #if PAGE_SIZE != 4096
114 static inline bool is_partial_io(struct bio_vec *bvec)
116 return bvec->bv_len != PAGE_SIZE;
119 static inline bool is_partial_io(struct bio_vec *bvec)
125 static void zram_revalidate_disk(struct zram *zram)
127 revalidate_disk(zram->disk);
128 /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
129 zram->disk->queue->backing_dev_info->capabilities |=
130 BDI_CAP_STABLE_WRITES;
134 * Check if request is within bounds and aligned on zram logical blocks.
136 static inline bool valid_io_request(struct zram *zram,
137 sector_t start, unsigned int size)
141 /* unaligned request */
142 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
144 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
147 end = start + (size >> SECTOR_SHIFT);
148 bound = zram->disksize >> SECTOR_SHIFT;
149 /* out of range range */
150 if (unlikely(start >= bound || end > bound || start > end))
153 /* I/O request is valid */
157 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
159 *index += (*offset + bvec->bv_len) / PAGE_SIZE;
160 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
163 static inline void update_used_max(struct zram *zram,
164 const unsigned long pages)
166 unsigned long old_max, cur_max;
168 old_max = atomic_long_read(&zram->stats.max_used_pages);
173 old_max = atomic_long_cmpxchg(
174 &zram->stats.max_used_pages, cur_max, pages);
175 } while (old_max != cur_max);
178 static inline void zram_fill_page(void *ptr, unsigned long len,
181 WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
182 memset_l(ptr, value, len / sizeof(unsigned long));
185 static bool page_same_filled(void *ptr, unsigned long *element)
191 page = (unsigned long *)ptr;
194 for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
195 if (val != page[pos])
204 static ssize_t initstate_show(struct device *dev,
205 struct device_attribute *attr, char *buf)
208 struct zram *zram = dev_to_zram(dev);
210 down_read(&zram->init_lock);
211 val = init_done(zram);
212 up_read(&zram->init_lock);
214 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
217 static ssize_t disksize_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
220 struct zram *zram = dev_to_zram(dev);
222 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
225 static ssize_t mem_limit_store(struct device *dev,
226 struct device_attribute *attr, const char *buf, size_t len)
230 struct zram *zram = dev_to_zram(dev);
232 limit = memparse(buf, &tmp);
233 if (buf == tmp) /* no chars parsed, invalid input */
236 down_write(&zram->init_lock);
237 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
238 up_write(&zram->init_lock);
243 static ssize_t mem_used_max_store(struct device *dev,
244 struct device_attribute *attr, const char *buf, size_t len)
248 struct zram *zram = dev_to_zram(dev);
250 err = kstrtoul(buf, 10, &val);
254 down_read(&zram->init_lock);
255 if (init_done(zram)) {
256 atomic_long_set(&zram->stats.max_used_pages,
257 zs_get_total_pages(zram->mem_pool));
259 up_read(&zram->init_lock);
264 #ifdef CONFIG_ZRAM_WRITEBACK
265 static bool zram_wb_enabled(struct zram *zram)
267 return zram->backing_dev;
270 static void reset_bdev(struct zram *zram)
272 struct block_device *bdev;
274 if (!zram_wb_enabled(zram))
278 if (zram->old_block_size)
279 set_blocksize(bdev, zram->old_block_size);
280 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
281 /* hope filp_close flush all of IO */
282 filp_close(zram->backing_dev, NULL);
283 zram->backing_dev = NULL;
284 zram->old_block_size = 0;
287 kvfree(zram->bitmap);
291 static ssize_t backing_dev_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
294 struct zram *zram = dev_to_zram(dev);
295 struct file *file = zram->backing_dev;
299 down_read(&zram->init_lock);
300 if (!zram_wb_enabled(zram)) {
301 memcpy(buf, "none\n", 5);
302 up_read(&zram->init_lock);
306 p = file_path(file, buf, PAGE_SIZE - 1);
313 memmove(buf, p, ret);
316 up_read(&zram->init_lock);
320 static ssize_t backing_dev_store(struct device *dev,
321 struct device_attribute *attr, const char *buf, size_t len)
324 struct file *backing_dev = NULL;
326 struct address_space *mapping;
327 unsigned int bitmap_sz, old_block_size = 0;
328 unsigned long nr_pages, *bitmap = NULL;
329 struct block_device *bdev = NULL;
331 struct zram *zram = dev_to_zram(dev);
333 file_name = kmalloc(PATH_MAX, GFP_KERNEL);
337 down_write(&zram->init_lock);
338 if (init_done(zram)) {
339 pr_info("Can't setup backing device for initialized device\n");
344 strlcpy(file_name, buf, len);
346 backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
347 if (IS_ERR(backing_dev)) {
348 err = PTR_ERR(backing_dev);
353 mapping = backing_dev->f_mapping;
354 inode = mapping->host;
356 /* Support only block device in this moment */
357 if (!S_ISBLK(inode->i_mode)) {
362 bdev = bdgrab(I_BDEV(inode));
363 err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
367 nr_pages = i_size_read(inode) >> PAGE_SHIFT;
368 bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
369 bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
375 old_block_size = block_size(bdev);
376 err = set_blocksize(bdev, PAGE_SIZE);
381 spin_lock_init(&zram->bitmap_lock);
383 zram->old_block_size = old_block_size;
385 zram->backing_dev = backing_dev;
386 zram->bitmap = bitmap;
387 zram->nr_pages = nr_pages;
388 up_write(&zram->init_lock);
390 pr_info("setup backing device %s\n", file_name);
399 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
402 filp_close(backing_dev, NULL);
404 up_write(&zram->init_lock);
411 static unsigned long get_entry_bdev(struct zram *zram)
415 spin_lock(&zram->bitmap_lock);
416 /* skip 0 bit to confuse zram.handle = 0 */
417 entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
418 if (entry == zram->nr_pages) {
419 spin_unlock(&zram->bitmap_lock);
423 set_bit(entry, zram->bitmap);
424 spin_unlock(&zram->bitmap_lock);
429 static void put_entry_bdev(struct zram *zram, unsigned long entry)
433 spin_lock(&zram->bitmap_lock);
434 was_set = test_and_clear_bit(entry, zram->bitmap);
435 spin_unlock(&zram->bitmap_lock);
436 WARN_ON_ONCE(!was_set);
439 void zram_page_end_io(struct bio *bio)
441 struct page *page = bio->bi_io_vec[0].bv_page;
443 page_endio(page, op_is_write(bio_op(bio)),
444 blk_status_to_errno(bio->bi_status));
449 * Returns 1 if the submission is successful.
451 static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
452 unsigned long entry, struct bio *parent)
456 bio = bio_alloc(GFP_ATOMIC, 1);
460 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
461 bio_set_dev(bio, zram->bdev);
462 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
468 bio->bi_opf = REQ_OP_READ;
469 bio->bi_end_io = zram_page_end_io;
471 bio->bi_opf = parent->bi_opf;
472 bio_chain(bio, parent);
480 struct work_struct work;
486 #if PAGE_SIZE != 4096
487 static void zram_sync_read(struct work_struct *work)
490 struct zram_work *zw = container_of(work, struct zram_work, work);
491 struct zram *zram = zw->zram;
492 unsigned long entry = zw->entry;
493 struct bio *bio = zw->bio;
495 read_from_bdev_async(zram, &bvec, entry, bio);
499 * Block layer want one ->make_request_fn to be active at a time
500 * so if we use chained IO with parent IO in same context,
501 * it's a deadlock. To avoid, it, it uses worker thread context.
503 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
504 unsigned long entry, struct bio *bio)
506 struct zram_work work;
512 INIT_WORK_ONSTACK(&work.work, zram_sync_read);
513 queue_work(system_unbound_wq, &work.work);
514 flush_work(&work.work);
515 destroy_work_on_stack(&work.work);
520 static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
521 unsigned long entry, struct bio *bio)
528 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
529 unsigned long entry, struct bio *parent, bool sync)
532 return read_from_bdev_sync(zram, bvec, entry, parent);
534 return read_from_bdev_async(zram, bvec, entry, parent);
537 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
538 u32 index, struct bio *parent,
539 unsigned long *pentry)
544 bio = bio_alloc(GFP_ATOMIC, 1);
548 entry = get_entry_bdev(zram);
554 bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
555 bio_set_dev(bio, zram->bdev);
556 if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
559 put_entry_bdev(zram, entry);
564 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
565 bio->bi_end_io = zram_page_end_io;
567 bio->bi_opf = parent->bi_opf;
568 bio_chain(bio, parent);
577 static void zram_wb_clear(struct zram *zram, u32 index)
581 zram_clear_flag(zram, index, ZRAM_WB);
582 entry = zram_get_element(zram, index);
583 zram_set_element(zram, index, 0);
584 put_entry_bdev(zram, entry);
588 static bool zram_wb_enabled(struct zram *zram) { return false; }
589 static inline void reset_bdev(struct zram *zram) {};
590 static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
591 u32 index, struct bio *parent,
592 unsigned long *pentry)
598 static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
599 unsigned long entry, struct bio *parent, bool sync)
603 static void zram_wb_clear(struct zram *zram, u32 index) {}
608 * We switched to per-cpu streams and this attr is not needed anymore.
609 * However, we will keep it around for some time, because:
610 * a) we may revert per-cpu streams in the future
611 * b) it's visible to user space and we need to follow our 2 years
612 * retirement rule; but we already have a number of 'soon to be
613 * altered' attrs, so max_comp_streams need to wait for the next
616 static ssize_t max_comp_streams_show(struct device *dev,
617 struct device_attribute *attr, char *buf)
619 return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
622 static ssize_t max_comp_streams_store(struct device *dev,
623 struct device_attribute *attr, const char *buf, size_t len)
628 static ssize_t comp_algorithm_show(struct device *dev,
629 struct device_attribute *attr, char *buf)
632 struct zram *zram = dev_to_zram(dev);
634 down_read(&zram->init_lock);
635 sz = zcomp_available_show(zram->compressor, buf);
636 up_read(&zram->init_lock);
641 static ssize_t comp_algorithm_store(struct device *dev,
642 struct device_attribute *attr, const char *buf, size_t len)
644 struct zram *zram = dev_to_zram(dev);
645 char compressor[ARRAY_SIZE(zram->compressor)];
648 strlcpy(compressor, buf, sizeof(compressor));
649 /* ignore trailing newline */
650 sz = strlen(compressor);
651 if (sz > 0 && compressor[sz - 1] == '\n')
652 compressor[sz - 1] = 0x00;
654 if (!zcomp_available_algorithm(compressor))
657 down_write(&zram->init_lock);
658 if (init_done(zram)) {
659 up_write(&zram->init_lock);
660 pr_info("Can't change algorithm for initialized device\n");
664 strcpy(zram->compressor, compressor);
665 up_write(&zram->init_lock);
669 static ssize_t compact_store(struct device *dev,
670 struct device_attribute *attr, const char *buf, size_t len)
672 struct zram *zram = dev_to_zram(dev);
674 down_read(&zram->init_lock);
675 if (!init_done(zram)) {
676 up_read(&zram->init_lock);
680 zs_compact(zram->mem_pool);
681 up_read(&zram->init_lock);
686 static ssize_t io_stat_show(struct device *dev,
687 struct device_attribute *attr, char *buf)
689 struct zram *zram = dev_to_zram(dev);
692 down_read(&zram->init_lock);
693 ret = scnprintf(buf, PAGE_SIZE,
694 "%8llu %8llu %8llu %8llu\n",
695 (u64)atomic64_read(&zram->stats.failed_reads),
696 (u64)atomic64_read(&zram->stats.failed_writes),
697 (u64)atomic64_read(&zram->stats.invalid_io),
698 (u64)atomic64_read(&zram->stats.notify_free));
699 up_read(&zram->init_lock);
704 static ssize_t mm_stat_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
707 struct zram *zram = dev_to_zram(dev);
708 struct zs_pool_stats pool_stats;
709 u64 orig_size, mem_used = 0;
713 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
715 down_read(&zram->init_lock);
716 if (init_done(zram)) {
717 mem_used = zs_get_total_pages(zram->mem_pool);
718 zs_pool_stats(zram->mem_pool, &pool_stats);
721 orig_size = atomic64_read(&zram->stats.pages_stored);
722 max_used = atomic_long_read(&zram->stats.max_used_pages);
724 ret = scnprintf(buf, PAGE_SIZE,
725 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
726 orig_size << PAGE_SHIFT,
727 (u64)atomic64_read(&zram->stats.compr_data_size),
728 mem_used << PAGE_SHIFT,
729 zram->limit_pages << PAGE_SHIFT,
730 max_used << PAGE_SHIFT,
731 (u64)atomic64_read(&zram->stats.same_pages),
732 pool_stats.pages_compacted);
733 up_read(&zram->init_lock);
738 static ssize_t debug_stat_show(struct device *dev,
739 struct device_attribute *attr, char *buf)
742 struct zram *zram = dev_to_zram(dev);
745 down_read(&zram->init_lock);
746 ret = scnprintf(buf, PAGE_SIZE,
747 "version: %d\n%8llu\n",
749 (u64)atomic64_read(&zram->stats.writestall));
750 up_read(&zram->init_lock);
755 static DEVICE_ATTR_RO(io_stat);
756 static DEVICE_ATTR_RO(mm_stat);
757 static DEVICE_ATTR_RO(debug_stat);
759 static void zram_slot_lock(struct zram *zram, u32 index)
761 bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
764 static void zram_slot_unlock(struct zram *zram, u32 index)
766 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
769 static bool zram_same_page_read(struct zram *zram, u32 index,
771 unsigned int offset, unsigned int len)
773 zram_slot_lock(zram, index);
774 if (unlikely(!zram_get_handle(zram, index) ||
775 zram_test_flag(zram, index, ZRAM_SAME))) {
778 zram_slot_unlock(zram, index);
779 mem = kmap_atomic(page);
780 zram_fill_page(mem + offset, len,
781 zram_get_element(zram, index));
785 zram_slot_unlock(zram, index);
790 static void zram_meta_free(struct zram *zram, u64 disksize)
792 size_t num_pages = disksize >> PAGE_SHIFT;
795 /* Free all pages that are still in this zram device */
796 for (index = 0; index < num_pages; index++)
797 zram_free_page(zram, index);
799 zs_destroy_pool(zram->mem_pool);
803 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
807 num_pages = disksize >> PAGE_SHIFT;
808 zram->table = vzalloc(num_pages * sizeof(*zram->table));
812 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
813 if (!zram->mem_pool) {
822 * To protect concurrent access to the same index entry,
823 * caller should hold this table index entry's bit_spinlock to
824 * indicate this index entry is accessing.
826 static void zram_free_page(struct zram *zram, size_t index)
828 unsigned long handle;
830 if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
831 zram_wb_clear(zram, index);
832 atomic64_dec(&zram->stats.pages_stored);
837 * No memory is allocated for same element filled pages.
838 * Simply clear same page flag.
840 if (zram_test_flag(zram, index, ZRAM_SAME)) {
841 zram_clear_flag(zram, index, ZRAM_SAME);
842 zram_set_element(zram, index, 0);
843 atomic64_dec(&zram->stats.same_pages);
844 atomic64_dec(&zram->stats.pages_stored);
848 handle = zram_get_handle(zram, index);
852 zs_free(zram->mem_pool, handle);
854 atomic64_sub(zram_get_obj_size(zram, index),
855 &zram->stats.compr_data_size);
856 atomic64_dec(&zram->stats.pages_stored);
858 zram_set_handle(zram, index, 0);
859 zram_set_obj_size(zram, index, 0);
862 static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
863 struct bio *bio, bool partial_io)
866 unsigned long handle;
870 if (zram_wb_enabled(zram)) {
871 zram_slot_lock(zram, index);
872 if (zram_test_flag(zram, index, ZRAM_WB)) {
875 zram_slot_unlock(zram, index);
878 bvec.bv_len = PAGE_SIZE;
880 return read_from_bdev(zram, &bvec,
881 zram_get_element(zram, index),
884 zram_slot_unlock(zram, index);
887 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
890 zram_slot_lock(zram, index);
891 handle = zram_get_handle(zram, index);
892 size = zram_get_obj_size(zram, index);
894 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
895 if (size == PAGE_SIZE) {
896 dst = kmap_atomic(page);
897 memcpy(dst, src, PAGE_SIZE);
901 struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
903 dst = kmap_atomic(page);
904 ret = zcomp_decompress(zstrm, src, size, dst);
906 zcomp_stream_put(zram->comp);
908 zs_unmap_object(zram->mem_pool, handle);
909 zram_slot_unlock(zram, index);
911 /* Should NEVER happen. Return bio error if it does. */
913 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
918 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
919 u32 index, int offset, struct bio *bio)
924 page = bvec->bv_page;
925 if (is_partial_io(bvec)) {
926 /* Use a temporary buffer to decompress the page */
927 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
932 ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
936 if (is_partial_io(bvec)) {
937 void *dst = kmap_atomic(bvec->bv_page);
938 void *src = kmap_atomic(page);
940 memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
945 if (is_partial_io(bvec))
951 static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
952 u32 index, struct bio *bio)
955 unsigned long alloced_pages;
956 unsigned long handle = 0;
957 unsigned int comp_len = 0;
958 void *src, *dst, *mem;
959 struct zcomp_strm *zstrm;
960 struct page *page = bvec->bv_page;
961 unsigned long element = 0;
962 enum zram_pageflags flags = 0;
963 bool allow_wb = true;
965 mem = kmap_atomic(page);
966 if (page_same_filled(mem, &element)) {
968 /* Free memory associated with this sector now. */
970 atomic64_inc(&zram->stats.same_pages);
976 zstrm = zcomp_stream_get(zram->comp);
977 src = kmap_atomic(page);
978 ret = zcomp_compress(zstrm, src, &comp_len);
982 zcomp_stream_put(zram->comp);
983 pr_err("Compression failed! err=%d\n", ret);
984 zs_free(zram->mem_pool, handle);
988 if (unlikely(comp_len > max_zpage_size)) {
989 if (zram_wb_enabled(zram) && allow_wb) {
990 zcomp_stream_put(zram->comp);
991 ret = write_to_bdev(zram, bvec, index, bio, &element);
1000 comp_len = PAGE_SIZE;
1004 * handle allocation has 2 paths:
1005 * a) fast path is executed with preemption disabled (for
1006 * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
1007 * since we can't sleep;
1008 * b) slow path enables preemption and attempts to allocate
1009 * the page with __GFP_DIRECT_RECLAIM bit set. we have to
1010 * put per-cpu compression stream and, thus, to re-do
1011 * the compression once handle is allocated.
1013 * if we have a 'non-null' handle here then we are coming
1014 * from the slow path and handle has already been allocated.
1017 handle = zs_malloc(zram->mem_pool, comp_len,
1018 __GFP_KSWAPD_RECLAIM |
1023 zcomp_stream_put(zram->comp);
1024 atomic64_inc(&zram->stats.writestall);
1025 handle = zs_malloc(zram->mem_pool, comp_len,
1026 GFP_NOIO | __GFP_HIGHMEM |
1029 goto compress_again;
1033 alloced_pages = zs_get_total_pages(zram->mem_pool);
1034 update_used_max(zram, alloced_pages);
1036 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1037 zcomp_stream_put(zram->comp);
1038 zs_free(zram->mem_pool, handle);
1042 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1044 src = zstrm->buffer;
1045 if (comp_len == PAGE_SIZE)
1046 src = kmap_atomic(page);
1047 memcpy(dst, src, comp_len);
1048 if (comp_len == PAGE_SIZE)
1051 zcomp_stream_put(zram->comp);
1052 zs_unmap_object(zram->mem_pool, handle);
1053 atomic64_add(comp_len, &zram->stats.compr_data_size);
1056 * Free memory associated with this sector
1057 * before overwriting unused sectors.
1059 zram_slot_lock(zram, index);
1060 zram_free_page(zram, index);
1063 zram_set_flag(zram, index, flags);
1064 zram_set_element(zram, index, element);
1066 zram_set_handle(zram, index, handle);
1067 zram_set_obj_size(zram, index, comp_len);
1069 zram_slot_unlock(zram, index);
1072 atomic64_inc(&zram->stats.pages_stored);
1076 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1077 u32 index, int offset, struct bio *bio)
1080 struct page *page = NULL;
1085 if (is_partial_io(bvec)) {
1088 * This is a partial IO. We need to read the full page
1089 * before to write the changes.
1091 page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
1095 ret = __zram_bvec_read(zram, page, index, bio, true);
1099 src = kmap_atomic(bvec->bv_page);
1100 dst = kmap_atomic(page);
1101 memcpy(dst + offset, src + bvec->bv_offset, bvec->bv_len);
1106 vec.bv_len = PAGE_SIZE;
1110 ret = __zram_bvec_write(zram, &vec, index, bio);
1112 if (is_partial_io(bvec))
1118 * zram_bio_discard - handler on discard request
1119 * @index: physical block index in PAGE_SIZE units
1120 * @offset: byte offset within physical block
1122 static void zram_bio_discard(struct zram *zram, u32 index,
1123 int offset, struct bio *bio)
1125 size_t n = bio->bi_iter.bi_size;
1128 * zram manages data in physical block size units. Because logical block
1129 * size isn't identical with physical block size on some arch, we
1130 * could get a discard request pointing to a specific offset within a
1131 * certain physical block. Although we can handle this request by
1132 * reading that physiclal block and decompressing and partially zeroing
1133 * and re-compressing and then re-storing it, this isn't reasonable
1134 * because our intent with a discard request is to save memory. So
1135 * skipping this logical block is appropriate here.
1138 if (n <= (PAGE_SIZE - offset))
1141 n -= (PAGE_SIZE - offset);
1145 while (n >= PAGE_SIZE) {
1146 zram_slot_lock(zram, index);
1147 zram_free_page(zram, index);
1148 zram_slot_unlock(zram, index);
1149 atomic64_inc(&zram->stats.notify_free);
1156 * Returns errno if it has some problem. Otherwise return 0 or 1.
1157 * Returns 0 if IO request was done synchronously
1158 * Returns 1 if IO request was successfully submitted.
1160 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
1161 int offset, bool is_write, struct bio *bio)
1163 unsigned long start_time = jiffies;
1164 int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
1165 struct request_queue *q = zram->disk->queue;
1168 generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
1169 &zram->disk->part0);
1172 atomic64_inc(&zram->stats.num_reads);
1173 ret = zram_bvec_read(zram, bvec, index, offset, bio);
1174 flush_dcache_page(bvec->bv_page);
1176 atomic64_inc(&zram->stats.num_writes);
1177 ret = zram_bvec_write(zram, bvec, index, offset, bio);
1180 generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
1182 if (unlikely(ret < 0)) {
1184 atomic64_inc(&zram->stats.failed_reads);
1186 atomic64_inc(&zram->stats.failed_writes);
1192 static void __zram_make_request(struct zram *zram, struct bio *bio)
1196 struct bio_vec bvec;
1197 struct bvec_iter iter;
1199 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
1200 offset = (bio->bi_iter.bi_sector &
1201 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1203 switch (bio_op(bio)) {
1204 case REQ_OP_DISCARD:
1205 case REQ_OP_WRITE_ZEROES:
1206 zram_bio_discard(zram, index, offset, bio);
1213 bio_for_each_segment(bvec, bio, iter) {
1214 struct bio_vec bv = bvec;
1215 unsigned int unwritten = bvec.bv_len;
1218 bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
1220 if (zram_bvec_rw(zram, &bv, index, offset,
1221 op_is_write(bio_op(bio)), bio) < 0)
1224 bv.bv_offset += bv.bv_len;
1225 unwritten -= bv.bv_len;
1227 update_position(&index, &offset, &bv);
1228 } while (unwritten);
1239 * Handler function for all zram I/O requests.
1241 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
1243 struct zram *zram = queue->queuedata;
1245 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
1246 bio->bi_iter.bi_size)) {
1247 atomic64_inc(&zram->stats.invalid_io);
1251 __zram_make_request(zram, bio);
1252 return BLK_QC_T_NONE;
1256 return BLK_QC_T_NONE;
1259 static void zram_slot_free_notify(struct block_device *bdev,
1260 unsigned long index)
1264 zram = bdev->bd_disk->private_data;
1266 zram_slot_lock(zram, index);
1267 zram_free_page(zram, index);
1268 zram_slot_unlock(zram, index);
1269 atomic64_inc(&zram->stats.notify_free);
1272 static int zram_rw_page(struct block_device *bdev, sector_t sector,
1273 struct page *page, bool is_write)
1280 if (PageTransHuge(page))
1282 zram = bdev->bd_disk->private_data;
1284 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
1285 atomic64_inc(&zram->stats.invalid_io);
1290 index = sector >> SECTORS_PER_PAGE_SHIFT;
1291 offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
1294 bv.bv_len = PAGE_SIZE;
1297 ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
1300 * If I/O fails, just return error(ie, non-zero) without
1301 * calling page_endio.
1302 * It causes resubmit the I/O with bio request by upper functions
1303 * of rw_page(e.g., swap_readpage, __swap_writepage) and
1304 * bio->bi_end_io does things to handle the error
1305 * (e.g., SetPageError, set_page_dirty and extra works).
1307 if (unlikely(ret < 0))
1312 page_endio(page, is_write, 0);
1323 static void zram_reset_device(struct zram *zram)
1328 down_write(&zram->init_lock);
1330 zram->limit_pages = 0;
1332 if (!init_done(zram)) {
1333 up_write(&zram->init_lock);
1338 disksize = zram->disksize;
1341 set_capacity(zram->disk, 0);
1342 part_stat_set_all(&zram->disk->part0, 0);
1344 up_write(&zram->init_lock);
1345 /* I/O operation under all of CPU are done so let's free */
1346 zram_meta_free(zram, disksize);
1347 memset(&zram->stats, 0, sizeof(zram->stats));
1348 zcomp_destroy(comp);
1352 static ssize_t disksize_store(struct device *dev,
1353 struct device_attribute *attr, const char *buf, size_t len)
1357 struct zram *zram = dev_to_zram(dev);
1360 disksize = memparse(buf, NULL);
1364 down_write(&zram->init_lock);
1365 if (init_done(zram)) {
1366 pr_info("Cannot change disksize for initialized device\n");
1371 disksize = PAGE_ALIGN(disksize);
1372 if (!zram_meta_alloc(zram, disksize)) {
1377 comp = zcomp_create(zram->compressor);
1379 pr_err("Cannot initialise %s compressing backend\n",
1381 err = PTR_ERR(comp);
1386 zram->disksize = disksize;
1387 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1388 zram_revalidate_disk(zram);
1389 up_write(&zram->init_lock);
1394 zram_meta_free(zram, disksize);
1396 up_write(&zram->init_lock);
1400 static ssize_t reset_store(struct device *dev,
1401 struct device_attribute *attr, const char *buf, size_t len)
1404 unsigned short do_reset;
1406 struct block_device *bdev;
1408 ret = kstrtou16(buf, 10, &do_reset);
1415 zram = dev_to_zram(dev);
1416 bdev = bdget_disk(zram->disk, 0);
1420 mutex_lock(&bdev->bd_mutex);
1421 /* Do not reset an active device or claimed device */
1422 if (bdev->bd_openers || zram->claim) {
1423 mutex_unlock(&bdev->bd_mutex);
1428 /* From now on, anyone can't open /dev/zram[0-9] */
1430 mutex_unlock(&bdev->bd_mutex);
1432 /* Make sure all the pending I/O are finished */
1434 zram_reset_device(zram);
1435 zram_revalidate_disk(zram);
1438 mutex_lock(&bdev->bd_mutex);
1439 zram->claim = false;
1440 mutex_unlock(&bdev->bd_mutex);
1445 static int zram_open(struct block_device *bdev, fmode_t mode)
1450 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1452 zram = bdev->bd_disk->private_data;
1453 /* zram was claimed to reset so open request fails */
1460 static const struct block_device_operations zram_devops = {
1462 .swap_slot_free_notify = zram_slot_free_notify,
1463 .rw_page = zram_rw_page,
1464 .owner = THIS_MODULE
1467 static DEVICE_ATTR_WO(compact);
1468 static DEVICE_ATTR_RW(disksize);
1469 static DEVICE_ATTR_RO(initstate);
1470 static DEVICE_ATTR_WO(reset);
1471 static DEVICE_ATTR_WO(mem_limit);
1472 static DEVICE_ATTR_WO(mem_used_max);
1473 static DEVICE_ATTR_RW(max_comp_streams);
1474 static DEVICE_ATTR_RW(comp_algorithm);
1475 #ifdef CONFIG_ZRAM_WRITEBACK
1476 static DEVICE_ATTR_RW(backing_dev);
1479 static struct attribute *zram_disk_attrs[] = {
1480 &dev_attr_disksize.attr,
1481 &dev_attr_initstate.attr,
1482 &dev_attr_reset.attr,
1483 &dev_attr_compact.attr,
1484 &dev_attr_mem_limit.attr,
1485 &dev_attr_mem_used_max.attr,
1486 &dev_attr_max_comp_streams.attr,
1487 &dev_attr_comp_algorithm.attr,
1488 #ifdef CONFIG_ZRAM_WRITEBACK
1489 &dev_attr_backing_dev.attr,
1491 &dev_attr_io_stat.attr,
1492 &dev_attr_mm_stat.attr,
1493 &dev_attr_debug_stat.attr,
1497 static const struct attribute_group zram_disk_attr_group = {
1498 .attrs = zram_disk_attrs,
1502 * Allocate and initialize new zram device. the function returns
1503 * '>= 0' device_id upon success, and negative value otherwise.
1505 static int zram_add(void)
1508 struct request_queue *queue;
1511 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1515 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1520 init_rwsem(&zram->init_lock);
1522 queue = blk_alloc_queue(GFP_KERNEL);
1524 pr_err("Error allocating disk queue for device %d\n",
1530 blk_queue_make_request(queue, zram_make_request);
1532 /* gendisk structure */
1533 zram->disk = alloc_disk(1);
1535 pr_err("Error allocating disk structure for device %d\n",
1538 goto out_free_queue;
1541 zram->disk->major = zram_major;
1542 zram->disk->first_minor = device_id;
1543 zram->disk->fops = &zram_devops;
1544 zram->disk->queue = queue;
1545 zram->disk->queue->queuedata = zram;
1546 zram->disk->private_data = zram;
1547 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1549 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1550 set_capacity(zram->disk, 0);
1551 /* zram devices sort of resembles non-rotational disks */
1552 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1553 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1555 * To ensure that we always get PAGE_SIZE aligned
1556 * and n*PAGE_SIZED sized I/O requests.
1558 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1559 blk_queue_logical_block_size(zram->disk->queue,
1560 ZRAM_LOGICAL_BLOCK_SIZE);
1561 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1562 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1563 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1564 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1565 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1568 * zram_bio_discard() will clear all logical blocks if logical block
1569 * size is identical with physical block size(PAGE_SIZE). But if it is
1570 * different, we will skip discarding some parts of logical blocks in
1571 * the part of the request range which isn't aligned to physical block
1572 * size. So we can't ensure that all discarded logical blocks are
1575 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1576 blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
1578 add_disk(zram->disk);
1580 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1581 &zram_disk_attr_group);
1583 pr_err("Error creating sysfs group for device %d\n",
1587 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1589 pr_info("Added device: %s\n", zram->disk->disk_name);
1593 del_gendisk(zram->disk);
1594 put_disk(zram->disk);
1596 blk_cleanup_queue(queue);
1598 idr_remove(&zram_index_idr, device_id);
1604 static int zram_remove(struct zram *zram)
1606 struct block_device *bdev;
1608 bdev = bdget_disk(zram->disk, 0);
1612 mutex_lock(&bdev->bd_mutex);
1613 if (bdev->bd_openers || zram->claim) {
1614 mutex_unlock(&bdev->bd_mutex);
1620 mutex_unlock(&bdev->bd_mutex);
1623 * Remove sysfs first, so no one will perform a disksize
1624 * store while we destroy the devices. This also helps during
1625 * hot_remove -- zram_reset_device() is the last holder of
1626 * ->init_lock, no later/concurrent disksize_store() or any
1627 * other sysfs handlers are possible.
1629 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1630 &zram_disk_attr_group);
1632 /* Make sure all the pending I/O are finished */
1634 zram_reset_device(zram);
1637 pr_info("Removed device: %s\n", zram->disk->disk_name);
1639 blk_cleanup_queue(zram->disk->queue);
1640 del_gendisk(zram->disk);
1641 put_disk(zram->disk);
1646 /* zram-control sysfs attributes */
1649 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1650 * sense that reading from this file does alter the state of your system -- it
1651 * creates a new un-initialized zram device and returns back this device's
1652 * device_id (or an error code if it fails to create a new device).
1654 static ssize_t hot_add_show(struct class *class,
1655 struct class_attribute *attr,
1660 mutex_lock(&zram_index_mutex);
1662 mutex_unlock(&zram_index_mutex);
1666 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1668 static CLASS_ATTR_RO(hot_add);
1670 static ssize_t hot_remove_store(struct class *class,
1671 struct class_attribute *attr,
1678 /* dev_id is gendisk->first_minor, which is `int' */
1679 ret = kstrtoint(buf, 10, &dev_id);
1685 mutex_lock(&zram_index_mutex);
1687 zram = idr_find(&zram_index_idr, dev_id);
1689 ret = zram_remove(zram);
1691 idr_remove(&zram_index_idr, dev_id);
1696 mutex_unlock(&zram_index_mutex);
1697 return ret ? ret : count;
1699 static CLASS_ATTR_WO(hot_remove);
1701 static struct attribute *zram_control_class_attrs[] = {
1702 &class_attr_hot_add.attr,
1703 &class_attr_hot_remove.attr,
1706 ATTRIBUTE_GROUPS(zram_control_class);
1708 static struct class zram_control_class = {
1709 .name = "zram-control",
1710 .owner = THIS_MODULE,
1711 .class_groups = zram_control_class_groups,
1714 static int zram_remove_cb(int id, void *ptr, void *data)
1720 static void destroy_devices(void)
1722 class_unregister(&zram_control_class);
1723 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1724 idr_destroy(&zram_index_idr);
1725 unregister_blkdev(zram_major, "zram");
1726 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1729 static int __init zram_init(void)
1733 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
1734 zcomp_cpu_up_prepare, zcomp_cpu_dead);
1738 ret = class_register(&zram_control_class);
1740 pr_err("Unable to register zram-control class\n");
1741 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1745 zram_major = register_blkdev(0, "zram");
1746 if (zram_major <= 0) {
1747 pr_err("Unable to get major number\n");
1748 class_unregister(&zram_control_class);
1749 cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
1753 while (num_devices != 0) {
1754 mutex_lock(&zram_index_mutex);
1756 mutex_unlock(&zram_index_mutex);
1769 static void __exit zram_exit(void)
1774 module_init(zram_init);
1775 module_exit(zram_exit);
1777 module_param(num_devices, uint, 0);
1778 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1780 MODULE_LICENSE("Dual BSD/GPL");
1781 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1782 MODULE_DESCRIPTION("Compressed RAM Block Device");