2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
41 static const char *default_compressor
= "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices
= 1;
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t zram_attr_##name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
54 static struct device_attribute dev_attr_##name = \
55 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
57 static inline int init_done(struct zram
*zram
)
59 return zram
->meta
!= NULL
;
62 static inline struct zram
*dev_to_zram(struct device
*dev
)
64 return (struct zram
*)dev_to_disk(dev
)->private_data
;
67 static ssize_t
disksize_show(struct device
*dev
,
68 struct device_attribute
*attr
, char *buf
)
70 struct zram
*zram
= dev_to_zram(dev
);
72 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", zram
->disksize
);
75 static ssize_t
initstate_show(struct device
*dev
,
76 struct device_attribute
*attr
, char *buf
)
79 struct zram
*zram
= dev_to_zram(dev
);
81 down_read(&zram
->init_lock
);
82 val
= init_done(zram
);
83 up_read(&zram
->init_lock
);
85 return scnprintf(buf
, PAGE_SIZE
, "%u\n", val
);
88 static ssize_t
orig_data_size_show(struct device
*dev
,
89 struct device_attribute
*attr
, char *buf
)
91 struct zram
*zram
= dev_to_zram(dev
);
93 return scnprintf(buf
, PAGE_SIZE
, "%llu\n",
94 (u64
)(atomic64_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
97 static ssize_t
mem_used_total_show(struct device
*dev
,
98 struct device_attribute
*attr
, char *buf
)
101 struct zram
*zram
= dev_to_zram(dev
);
102 struct zram_meta
*meta
= zram
->meta
;
104 down_read(&zram
->init_lock
);
106 val
= zs_get_total_size_bytes(meta
->mem_pool
);
107 up_read(&zram
->init_lock
);
109 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
);
112 static ssize_t
max_comp_streams_show(struct device
*dev
,
113 struct device_attribute
*attr
, char *buf
)
116 struct zram
*zram
= dev_to_zram(dev
);
118 down_read(&zram
->init_lock
);
119 val
= zram
->max_comp_streams
;
120 up_read(&zram
->init_lock
);
122 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
125 static ssize_t
max_comp_streams_store(struct device
*dev
,
126 struct device_attribute
*attr
, const char *buf
, size_t len
)
129 struct zram
*zram
= dev_to_zram(dev
);
132 ret
= kstrtoint(buf
, 0, &num
);
138 down_write(&zram
->init_lock
);
139 if (init_done(zram
)) {
140 if (!zcomp_set_max_streams(zram
->comp
, num
)) {
141 pr_info("Cannot change max compression streams\n");
147 zram
->max_comp_streams
= num
;
150 up_write(&zram
->init_lock
);
154 static ssize_t
comp_algorithm_show(struct device
*dev
,
155 struct device_attribute
*attr
, char *buf
)
158 struct zram
*zram
= dev_to_zram(dev
);
160 down_read(&zram
->init_lock
);
161 sz
= zcomp_available_show(zram
->compressor
, buf
);
162 up_read(&zram
->init_lock
);
167 static ssize_t
comp_algorithm_store(struct device
*dev
,
168 struct device_attribute
*attr
, const char *buf
, size_t len
)
170 struct zram
*zram
= dev_to_zram(dev
);
171 down_write(&zram
->init_lock
);
172 if (init_done(zram
)) {
173 up_write(&zram
->init_lock
);
174 pr_info("Can't change algorithm for initialized device\n");
177 strlcpy(zram
->compressor
, buf
, sizeof(zram
->compressor
));
178 up_write(&zram
->init_lock
);
182 /* flag operations needs meta->tb_lock */
183 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
184 enum zram_pageflags flag
)
186 return meta
->table
[index
].value
& BIT(flag
);
189 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
190 enum zram_pageflags flag
)
192 meta
->table
[index
].value
|= BIT(flag
);
195 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
196 enum zram_pageflags flag
)
198 meta
->table
[index
].value
&= ~BIT(flag
);
201 static size_t zram_get_obj_size(struct zram_meta
*meta
, u32 index
)
203 return meta
->table
[index
].value
& (BIT(ZRAM_FLAG_SHIFT
) - 1);
206 static void zram_set_obj_size(struct zram_meta
*meta
,
207 u32 index
, size_t size
)
209 unsigned long flags
= meta
->table
[index
].value
>> ZRAM_FLAG_SHIFT
;
211 meta
->table
[index
].value
= (flags
<< ZRAM_FLAG_SHIFT
) | size
;
214 static inline int is_partial_io(struct bio_vec
*bvec
)
216 return bvec
->bv_len
!= PAGE_SIZE
;
220 * Check if request is within bounds and aligned on zram logical blocks.
222 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
224 u64 start
, end
, bound
;
226 /* unaligned request */
227 if (unlikely(bio
->bi_iter
.bi_sector
&
228 (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
230 if (unlikely(bio
->bi_iter
.bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
233 start
= bio
->bi_iter
.bi_sector
;
234 end
= start
+ (bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
);
235 bound
= zram
->disksize
>> SECTOR_SHIFT
;
236 /* out of range range */
237 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
240 /* I/O request is valid */
244 static void zram_meta_free(struct zram_meta
*meta
)
246 zs_destroy_pool(meta
->mem_pool
);
251 static struct zram_meta
*zram_meta_alloc(u64 disksize
)
254 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
258 num_pages
= disksize
>> PAGE_SHIFT
;
259 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
261 pr_err("Error allocating zram address table\n");
265 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
266 if (!meta
->mem_pool
) {
267 pr_err("Error creating memory pool\n");
282 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
284 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
286 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
289 static int page_zero_filled(void *ptr
)
294 page
= (unsigned long *)ptr
;
296 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
304 static void handle_zero_page(struct bio_vec
*bvec
)
306 struct page
*page
= bvec
->bv_page
;
309 user_mem
= kmap_atomic(page
);
310 if (is_partial_io(bvec
))
311 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
313 clear_page(user_mem
);
314 kunmap_atomic(user_mem
);
316 flush_dcache_page(page
);
321 * To protect concurrent access to the same index entry,
322 * caller should hold this table index entry's bit_spinlock to
323 * indicate this index entry is accessing.
325 static void zram_free_page(struct zram
*zram
, size_t index
)
327 struct zram_meta
*meta
= zram
->meta
;
328 unsigned long handle
= meta
->table
[index
].handle
;
330 if (unlikely(!handle
)) {
332 * No memory is allocated for zero filled pages.
333 * Simply clear zero page flag.
335 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
336 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
337 atomic64_dec(&zram
->stats
.zero_pages
);
342 zs_free(meta
->mem_pool
, handle
);
344 atomic64_sub(zram_get_obj_size(meta
, index
),
345 &zram
->stats
.compr_data_size
);
346 atomic64_dec(&zram
->stats
.pages_stored
);
348 meta
->table
[index
].handle
= 0;
349 zram_set_obj_size(meta
, index
, 0);
352 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
356 struct zram_meta
*meta
= zram
->meta
;
357 unsigned long handle
;
360 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
361 handle
= meta
->table
[index
].handle
;
362 size
= zram_get_obj_size(meta
, index
);
364 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
365 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
370 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
371 if (size
== PAGE_SIZE
)
372 copy_page(mem
, cmem
);
374 ret
= zcomp_decompress(zram
->comp
, cmem
, size
, mem
);
375 zs_unmap_object(meta
->mem_pool
, handle
);
376 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
378 /* Should NEVER happen. Return bio error if it does. */
380 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
387 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
388 u32 index
, int offset
, struct bio
*bio
)
392 unsigned char *user_mem
, *uncmem
= NULL
;
393 struct zram_meta
*meta
= zram
->meta
;
394 page
= bvec
->bv_page
;
396 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
397 if (unlikely(!meta
->table
[index
].handle
) ||
398 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
399 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
400 handle_zero_page(bvec
);
403 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
405 if (is_partial_io(bvec
))
406 /* Use a temporary buffer to decompress the page */
407 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
409 user_mem
= kmap_atomic(page
);
410 if (!is_partial_io(bvec
))
414 pr_info("Unable to allocate temp memory\n");
419 ret
= zram_decompress_page(zram
, uncmem
, index
);
420 /* Should NEVER happen. Return bio error if it does. */
424 if (is_partial_io(bvec
))
425 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
428 flush_dcache_page(page
);
431 kunmap_atomic(user_mem
);
432 if (is_partial_io(bvec
))
437 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
442 unsigned long handle
;
444 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
445 struct zram_meta
*meta
= zram
->meta
;
446 struct zcomp_strm
*zstrm
;
449 page
= bvec
->bv_page
;
450 if (is_partial_io(bvec
)) {
452 * This is a partial IO. We need to read the full page
453 * before to write the changes.
455 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
460 ret
= zram_decompress_page(zram
, uncmem
, index
);
465 zstrm
= zcomp_strm_find(zram
->comp
);
467 user_mem
= kmap_atomic(page
);
469 if (is_partial_io(bvec
)) {
470 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
472 kunmap_atomic(user_mem
);
478 if (page_zero_filled(uncmem
)) {
479 kunmap_atomic(user_mem
);
480 /* Free memory associated with this sector now. */
481 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
482 zram_free_page(zram
, index
);
483 zram_set_flag(meta
, index
, ZRAM_ZERO
);
484 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
486 atomic64_inc(&zram
->stats
.zero_pages
);
491 ret
= zcomp_compress(zram
->comp
, zstrm
, uncmem
, &clen
);
492 if (!is_partial_io(bvec
)) {
493 kunmap_atomic(user_mem
);
499 pr_err("Compression failed! err=%d\n", ret
);
503 if (unlikely(clen
> max_zpage_size
)) {
505 if (is_partial_io(bvec
))
509 handle
= zs_malloc(meta
->mem_pool
, clen
);
511 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
516 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
518 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
519 src
= kmap_atomic(page
);
520 copy_page(cmem
, src
);
523 memcpy(cmem
, src
, clen
);
526 zcomp_strm_release(zram
->comp
, zstrm
);
528 zs_unmap_object(meta
->mem_pool
, handle
);
531 * Free memory associated with this sector
532 * before overwriting unused sectors.
534 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
535 zram_free_page(zram
, index
);
537 meta
->table
[index
].handle
= handle
;
538 zram_set_obj_size(meta
, index
, clen
);
539 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
542 atomic64_add(clen
, &zram
->stats
.compr_data_size
);
543 atomic64_inc(&zram
->stats
.pages_stored
);
546 zcomp_strm_release(zram
->comp
, zstrm
);
547 if (is_partial_io(bvec
))
552 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
553 int offset
, struct bio
*bio
)
556 int rw
= bio_data_dir(bio
);
559 atomic64_inc(&zram
->stats
.num_reads
);
560 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
562 atomic64_inc(&zram
->stats
.num_writes
);
563 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
568 atomic64_inc(&zram
->stats
.failed_reads
);
570 atomic64_inc(&zram
->stats
.failed_writes
);
577 * zram_bio_discard - handler on discard request
578 * @index: physical block index in PAGE_SIZE units
579 * @offset: byte offset within physical block
581 static void zram_bio_discard(struct zram
*zram
, u32 index
,
582 int offset
, struct bio
*bio
)
584 size_t n
= bio
->bi_iter
.bi_size
;
585 struct zram_meta
*meta
= zram
->meta
;
588 * zram manages data in physical block size units. Because logical block
589 * size isn't identical with physical block size on some arch, we
590 * could get a discard request pointing to a specific offset within a
591 * certain physical block. Although we can handle this request by
592 * reading that physiclal block and decompressing and partially zeroing
593 * and re-compressing and then re-storing it, this isn't reasonable
594 * because our intent with a discard request is to save memory. So
595 * skipping this logical block is appropriate here.
598 if (n
<= (PAGE_SIZE
- offset
))
601 n
-= (PAGE_SIZE
- offset
);
605 while (n
>= PAGE_SIZE
) {
606 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
607 zram_free_page(zram
, index
);
608 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
614 static void zram_reset_device(struct zram
*zram
, bool reset_capacity
)
617 struct zram_meta
*meta
;
619 down_write(&zram
->init_lock
);
620 if (!init_done(zram
)) {
621 up_write(&zram
->init_lock
);
626 /* Free all pages that are still in this zram device */
627 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
628 unsigned long handle
= meta
->table
[index
].handle
;
632 zs_free(meta
->mem_pool
, handle
);
635 zcomp_destroy(zram
->comp
);
636 zram
->max_comp_streams
= 1;
638 zram_meta_free(zram
->meta
);
641 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
645 set_capacity(zram
->disk
, 0);
647 up_write(&zram
->init_lock
);
650 * Revalidate disk out of the init_lock to avoid lockdep splat.
651 * It's okay because disk's capacity is protected by init_lock
652 * so that revalidate_disk always sees up-to-date capacity.
655 revalidate_disk(zram
->disk
);
658 static ssize_t
disksize_store(struct device
*dev
,
659 struct device_attribute
*attr
, const char *buf
, size_t len
)
663 struct zram_meta
*meta
;
664 struct zram
*zram
= dev_to_zram(dev
);
667 disksize
= memparse(buf
, NULL
);
671 disksize
= PAGE_ALIGN(disksize
);
672 meta
= zram_meta_alloc(disksize
);
676 comp
= zcomp_create(zram
->compressor
, zram
->max_comp_streams
);
678 pr_info("Cannot initialise %s compressing backend\n",
684 down_write(&zram
->init_lock
);
685 if (init_done(zram
)) {
686 pr_info("Cannot change disksize for initialized device\n");
688 goto out_destroy_comp
;
693 zram
->disksize
= disksize
;
694 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
695 up_write(&zram
->init_lock
);
698 * Revalidate disk out of the init_lock to avoid lockdep splat.
699 * It's okay because disk's capacity is protected by init_lock
700 * so that revalidate_disk always sees up-to-date capacity.
702 revalidate_disk(zram
->disk
);
707 up_write(&zram
->init_lock
);
710 zram_meta_free(meta
);
714 static ssize_t
reset_store(struct device
*dev
,
715 struct device_attribute
*attr
, const char *buf
, size_t len
)
718 unsigned short do_reset
;
720 struct block_device
*bdev
;
722 zram
= dev_to_zram(dev
);
723 bdev
= bdget_disk(zram
->disk
, 0);
728 /* Do not reset an active device! */
729 if (bdev
->bd_holders
) {
734 ret
= kstrtou16(buf
, 10, &do_reset
);
743 /* Make sure all pending I/O is finished */
747 zram_reset_device(zram
, true);
755 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
)
760 struct bvec_iter iter
;
762 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
763 offset
= (bio
->bi_iter
.bi_sector
&
764 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
766 if (unlikely(bio
->bi_rw
& REQ_DISCARD
)) {
767 zram_bio_discard(zram
, index
, offset
, bio
);
772 bio_for_each_segment(bvec
, bio
, iter
) {
773 int max_transfer_size
= PAGE_SIZE
- offset
;
775 if (bvec
.bv_len
> max_transfer_size
) {
777 * zram_bvec_rw() can only make operation on a single
778 * zram page. Split the bio vector.
782 bv
.bv_page
= bvec
.bv_page
;
783 bv
.bv_len
= max_transfer_size
;
784 bv
.bv_offset
= bvec
.bv_offset
;
786 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
) < 0)
789 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
790 bv
.bv_offset
+= max_transfer_size
;
791 if (zram_bvec_rw(zram
, &bv
, index
+ 1, 0, bio
) < 0)
794 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, bio
) < 0)
797 update_position(&index
, &offset
, &bvec
);
800 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
809 * Handler function for all zram I/O requests.
811 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
813 struct zram
*zram
= queue
->queuedata
;
815 down_read(&zram
->init_lock
);
816 if (unlikely(!init_done(zram
)))
819 if (!valid_io_request(zram
, bio
)) {
820 atomic64_inc(&zram
->stats
.invalid_io
);
824 __zram_make_request(zram
, bio
);
825 up_read(&zram
->init_lock
);
830 up_read(&zram
->init_lock
);
834 static void zram_slot_free_notify(struct block_device
*bdev
,
838 struct zram_meta
*meta
;
840 zram
= bdev
->bd_disk
->private_data
;
843 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
844 zram_free_page(zram
, index
);
845 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
846 atomic64_inc(&zram
->stats
.notify_free
);
849 static const struct block_device_operations zram_devops
= {
850 .swap_slot_free_notify
= zram_slot_free_notify
,
854 static DEVICE_ATTR(disksize
, S_IRUGO
| S_IWUSR
,
855 disksize_show
, disksize_store
);
856 static DEVICE_ATTR(initstate
, S_IRUGO
, initstate_show
, NULL
);
857 static DEVICE_ATTR(reset
, S_IWUSR
, NULL
, reset_store
);
858 static DEVICE_ATTR(orig_data_size
, S_IRUGO
, orig_data_size_show
, NULL
);
859 static DEVICE_ATTR(mem_used_total
, S_IRUGO
, mem_used_total_show
, NULL
);
860 static DEVICE_ATTR(max_comp_streams
, S_IRUGO
| S_IWUSR
,
861 max_comp_streams_show
, max_comp_streams_store
);
862 static DEVICE_ATTR(comp_algorithm
, S_IRUGO
| S_IWUSR
,
863 comp_algorithm_show
, comp_algorithm_store
);
865 ZRAM_ATTR_RO(num_reads
);
866 ZRAM_ATTR_RO(num_writes
);
867 ZRAM_ATTR_RO(failed_reads
);
868 ZRAM_ATTR_RO(failed_writes
);
869 ZRAM_ATTR_RO(invalid_io
);
870 ZRAM_ATTR_RO(notify_free
);
871 ZRAM_ATTR_RO(zero_pages
);
872 ZRAM_ATTR_RO(compr_data_size
);
874 static struct attribute
*zram_disk_attrs
[] = {
875 &dev_attr_disksize
.attr
,
876 &dev_attr_initstate
.attr
,
877 &dev_attr_reset
.attr
,
878 &dev_attr_num_reads
.attr
,
879 &dev_attr_num_writes
.attr
,
880 &dev_attr_failed_reads
.attr
,
881 &dev_attr_failed_writes
.attr
,
882 &dev_attr_invalid_io
.attr
,
883 &dev_attr_notify_free
.attr
,
884 &dev_attr_zero_pages
.attr
,
885 &dev_attr_orig_data_size
.attr
,
886 &dev_attr_compr_data_size
.attr
,
887 &dev_attr_mem_used_total
.attr
,
888 &dev_attr_max_comp_streams
.attr
,
889 &dev_attr_comp_algorithm
.attr
,
893 static struct attribute_group zram_disk_attr_group
= {
894 .attrs
= zram_disk_attrs
,
897 static int create_device(struct zram
*zram
, int device_id
)
901 init_rwsem(&zram
->init_lock
);
903 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
905 pr_err("Error allocating disk queue for device %d\n",
910 blk_queue_make_request(zram
->queue
, zram_make_request
);
911 zram
->queue
->queuedata
= zram
;
913 /* gendisk structure */
914 zram
->disk
= alloc_disk(1);
916 pr_warn("Error allocating disk structure for device %d\n",
921 zram
->disk
->major
= zram_major
;
922 zram
->disk
->first_minor
= device_id
;
923 zram
->disk
->fops
= &zram_devops
;
924 zram
->disk
->queue
= zram
->queue
;
925 zram
->disk
->private_data
= zram
;
926 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
928 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
929 set_capacity(zram
->disk
, 0);
930 /* zram devices sort of resembles non-rotational disks */
931 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
933 * To ensure that we always get PAGE_SIZE aligned
934 * and n*PAGE_SIZED sized I/O requests.
936 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
937 blk_queue_logical_block_size(zram
->disk
->queue
,
938 ZRAM_LOGICAL_BLOCK_SIZE
);
939 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
940 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
941 zram
->disk
->queue
->limits
.discard_granularity
= PAGE_SIZE
;
942 zram
->disk
->queue
->limits
.max_discard_sectors
= UINT_MAX
;
944 * zram_bio_discard() will clear all logical blocks if logical block
945 * size is identical with physical block size(PAGE_SIZE). But if it is
946 * different, we will skip discarding some parts of logical blocks in
947 * the part of the request range which isn't aligned to physical block
948 * size. So we can't ensure that all discarded logical blocks are
951 if (ZRAM_LOGICAL_BLOCK_SIZE
== PAGE_SIZE
)
952 zram
->disk
->queue
->limits
.discard_zeroes_data
= 1;
954 zram
->disk
->queue
->limits
.discard_zeroes_data
= 0;
955 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, zram
->disk
->queue
);
957 add_disk(zram
->disk
);
959 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
960 &zram_disk_attr_group
);
962 pr_warn("Error creating sysfs group");
965 strlcpy(zram
->compressor
, default_compressor
, sizeof(zram
->compressor
));
967 zram
->max_comp_streams
= 1;
971 del_gendisk(zram
->disk
);
972 put_disk(zram
->disk
);
974 blk_cleanup_queue(zram
->queue
);
979 static void destroy_device(struct zram
*zram
)
981 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
982 &zram_disk_attr_group
);
984 del_gendisk(zram
->disk
);
985 put_disk(zram
->disk
);
987 blk_cleanup_queue(zram
->queue
);
990 static int __init
zram_init(void)
994 if (num_devices
> max_num_devices
) {
995 pr_warn("Invalid value for num_devices: %u\n",
1001 zram_major
= register_blkdev(0, "zram");
1002 if (zram_major
<= 0) {
1003 pr_warn("Unable to get major number\n");
1008 /* Allocate the device array and initialize each one */
1009 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
1010 if (!zram_devices
) {
1015 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
1016 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
1021 pr_info("Created %u device(s) ...\n", num_devices
);
1027 destroy_device(&zram_devices
[--dev_id
]);
1028 kfree(zram_devices
);
1030 unregister_blkdev(zram_major
, "zram");
1035 static void __exit
zram_exit(void)
1040 for (i
= 0; i
< num_devices
; i
++) {
1041 zram
= &zram_devices
[i
];
1043 destroy_device(zram
);
1045 * Shouldn't access zram->disk after destroy_device
1046 * because destroy_device already released zram->disk.
1048 zram_reset_device(zram
, false);
1051 unregister_blkdev(zram_major
, "zram");
1053 kfree(zram_devices
);
1054 pr_debug("Cleanup done!\n");
1057 module_init(zram_init
);
1058 module_exit(zram_exit
);
1060 module_param(num_devices
, uint
, 0);
1061 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
1063 MODULE_LICENSE("Dual BSD/GPL");
1064 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1065 MODULE_DESCRIPTION("Compressed RAM Block Device");