2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major
;
40 static struct zram
*zram_devices
;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices
= 1;
45 static inline int init_done(struct zram
*zram
)
47 return zram
->meta
!= NULL
;
50 static inline struct zram
*dev_to_zram(struct device
*dev
)
52 return (struct zram
*)dev_to_disk(dev
)->private_data
;
55 static ssize_t
disksize_show(struct device
*dev
,
56 struct device_attribute
*attr
, char *buf
)
58 struct zram
*zram
= dev_to_zram(dev
);
60 return sprintf(buf
, "%llu\n", zram
->disksize
);
63 static ssize_t
initstate_show(struct device
*dev
,
64 struct device_attribute
*attr
, char *buf
)
66 struct zram
*zram
= dev_to_zram(dev
);
68 return sprintf(buf
, "%u\n", init_done(zram
));
71 static ssize_t
num_reads_show(struct device
*dev
,
72 struct device_attribute
*attr
, char *buf
)
74 struct zram
*zram
= dev_to_zram(dev
);
76 return sprintf(buf
, "%llu\n",
77 (u64
)atomic64_read(&zram
->stats
.num_reads
));
80 static ssize_t
num_writes_show(struct device
*dev
,
81 struct device_attribute
*attr
, char *buf
)
83 struct zram
*zram
= dev_to_zram(dev
);
85 return sprintf(buf
, "%llu\n",
86 (u64
)atomic64_read(&zram
->stats
.num_writes
));
89 static ssize_t
invalid_io_show(struct device
*dev
,
90 struct device_attribute
*attr
, char *buf
)
92 struct zram
*zram
= dev_to_zram(dev
);
94 return sprintf(buf
, "%llu\n",
95 (u64
)atomic64_read(&zram
->stats
.invalid_io
));
98 static ssize_t
notify_free_show(struct device
*dev
,
99 struct device_attribute
*attr
, char *buf
)
101 struct zram
*zram
= dev_to_zram(dev
);
103 return sprintf(buf
, "%llu\n",
104 (u64
)atomic64_read(&zram
->stats
.notify_free
));
107 static ssize_t
zero_pages_show(struct device
*dev
,
108 struct device_attribute
*attr
, char *buf
)
110 struct zram
*zram
= dev_to_zram(dev
);
112 return sprintf(buf
, "%llu\n", (u64
)atomic64_read(&zram
->stats
.zero_pages
));
115 static ssize_t
orig_data_size_show(struct device
*dev
,
116 struct device_attribute
*attr
, char *buf
)
118 struct zram
*zram
= dev_to_zram(dev
);
120 return sprintf(buf
, "%llu\n",
121 (u64
)(atomic64_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
124 static ssize_t
compr_data_size_show(struct device
*dev
,
125 struct device_attribute
*attr
, char *buf
)
127 struct zram
*zram
= dev_to_zram(dev
);
129 return sprintf(buf
, "%llu\n",
130 (u64
)atomic64_read(&zram
->stats
.compr_data_size
));
133 static ssize_t
mem_used_total_show(struct device
*dev
,
134 struct device_attribute
*attr
, char *buf
)
137 struct zram
*zram
= dev_to_zram(dev
);
138 struct zram_meta
*meta
= zram
->meta
;
140 down_read(&zram
->init_lock
);
142 val
= zs_get_total_size_bytes(meta
->mem_pool
);
143 up_read(&zram
->init_lock
);
145 return sprintf(buf
, "%llu\n", val
);
148 /* flag operations needs meta->tb_lock */
149 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
150 enum zram_pageflags flag
)
152 return meta
->table
[index
].flags
& BIT(flag
);
155 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
156 enum zram_pageflags flag
)
158 meta
->table
[index
].flags
|= BIT(flag
);
161 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
162 enum zram_pageflags flag
)
164 meta
->table
[index
].flags
&= ~BIT(flag
);
167 static inline int is_partial_io(struct bio_vec
*bvec
)
169 return bvec
->bv_len
!= PAGE_SIZE
;
173 * Check if request is within bounds and aligned on zram logical blocks.
175 static inline int valid_io_request(struct zram
*zram
, struct bio
*bio
)
177 u64 start
, end
, bound
;
179 /* unaligned request */
180 if (unlikely(bio
->bi_iter
.bi_sector
&
181 (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
183 if (unlikely(bio
->bi_iter
.bi_size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
186 start
= bio
->bi_iter
.bi_sector
;
187 end
= start
+ (bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
);
188 bound
= zram
->disksize
>> SECTOR_SHIFT
;
189 /* out of range range */
190 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
193 /* I/O request is valid */
197 static void zram_meta_free(struct zram_meta
*meta
)
199 zs_destroy_pool(meta
->mem_pool
);
200 kfree(meta
->compress_workmem
);
201 free_pages((unsigned long)meta
->compress_buffer
, 1);
206 static struct zram_meta
*zram_meta_alloc(u64 disksize
)
209 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
213 meta
->compress_workmem
= kzalloc(LZO1X_MEM_COMPRESS
, GFP_KERNEL
);
214 if (!meta
->compress_workmem
)
217 meta
->compress_buffer
=
218 (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
219 if (!meta
->compress_buffer
) {
220 pr_err("Error allocating compressor buffer space\n");
224 num_pages
= disksize
>> PAGE_SHIFT
;
225 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
227 pr_err("Error allocating zram address table\n");
231 meta
->mem_pool
= zs_create_pool(GFP_NOIO
| __GFP_HIGHMEM
);
232 if (!meta
->mem_pool
) {
233 pr_err("Error creating memory pool\n");
237 rwlock_init(&meta
->tb_lock
);
238 mutex_init(&meta
->buffer_lock
);
244 free_pages((unsigned long)meta
->compress_buffer
, 1);
246 kfree(meta
->compress_workmem
);
254 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
256 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
258 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
261 static int page_zero_filled(void *ptr
)
266 page
= (unsigned long *)ptr
;
268 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
276 static void handle_zero_page(struct bio_vec
*bvec
)
278 struct page
*page
= bvec
->bv_page
;
281 user_mem
= kmap_atomic(page
);
282 if (is_partial_io(bvec
))
283 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
285 clear_page(user_mem
);
286 kunmap_atomic(user_mem
);
288 flush_dcache_page(page
);
291 /* NOTE: caller should hold meta->tb_lock with write-side */
292 static void zram_free_page(struct zram
*zram
, size_t index
)
294 struct zram_meta
*meta
= zram
->meta
;
295 unsigned long handle
= meta
->table
[index
].handle
;
297 if (unlikely(!handle
)) {
299 * No memory is allocated for zero filled pages.
300 * Simply clear zero page flag.
302 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
303 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
304 atomic64_dec(&zram
->stats
.zero_pages
);
309 zs_free(meta
->mem_pool
, handle
);
311 atomic64_sub(meta
->table
[index
].size
, &zram
->stats
.compr_data_size
);
312 atomic64_dec(&zram
->stats
.pages_stored
);
314 meta
->table
[index
].handle
= 0;
315 meta
->table
[index
].size
= 0;
318 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
321 size_t clen
= PAGE_SIZE
;
323 struct zram_meta
*meta
= zram
->meta
;
324 unsigned long handle
;
327 read_lock(&meta
->tb_lock
);
328 handle
= meta
->table
[index
].handle
;
329 size
= meta
->table
[index
].size
;
331 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
332 read_unlock(&meta
->tb_lock
);
337 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
338 if (size
== PAGE_SIZE
)
339 copy_page(mem
, cmem
);
341 ret
= lzo1x_decompress_safe(cmem
, size
, mem
, &clen
);
342 zs_unmap_object(meta
->mem_pool
, handle
);
343 read_unlock(&meta
->tb_lock
);
345 /* Should NEVER happen. Return bio error if it does. */
346 if (unlikely(ret
!= LZO_E_OK
)) {
347 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
348 atomic64_inc(&zram
->stats
.failed_reads
);
355 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
356 u32 index
, int offset
, struct bio
*bio
)
360 unsigned char *user_mem
, *uncmem
= NULL
;
361 struct zram_meta
*meta
= zram
->meta
;
362 page
= bvec
->bv_page
;
364 read_lock(&meta
->tb_lock
);
365 if (unlikely(!meta
->table
[index
].handle
) ||
366 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
367 read_unlock(&meta
->tb_lock
);
368 handle_zero_page(bvec
);
371 read_unlock(&meta
->tb_lock
);
373 if (is_partial_io(bvec
))
374 /* Use a temporary buffer to decompress the page */
375 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
377 user_mem
= kmap_atomic(page
);
378 if (!is_partial_io(bvec
))
382 pr_info("Unable to allocate temp memory\n");
387 ret
= zram_decompress_page(zram
, uncmem
, index
);
388 /* Should NEVER happen. Return bio error if it does. */
389 if (unlikely(ret
!= LZO_E_OK
))
392 if (is_partial_io(bvec
))
393 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
396 flush_dcache_page(page
);
399 kunmap_atomic(user_mem
);
400 if (is_partial_io(bvec
))
405 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
410 unsigned long handle
;
412 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
413 struct zram_meta
*meta
= zram
->meta
;
416 page
= bvec
->bv_page
;
417 src
= meta
->compress_buffer
;
419 if (is_partial_io(bvec
)) {
421 * This is a partial IO. We need to read the full page
422 * before to write the changes.
424 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
429 ret
= zram_decompress_page(zram
, uncmem
, index
);
434 mutex_lock(&meta
->buffer_lock
);
436 user_mem
= kmap_atomic(page
);
438 if (is_partial_io(bvec
)) {
439 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
441 kunmap_atomic(user_mem
);
447 if (page_zero_filled(uncmem
)) {
448 kunmap_atomic(user_mem
);
449 /* Free memory associated with this sector now. */
450 write_lock(&zram
->meta
->tb_lock
);
451 zram_free_page(zram
, index
);
452 zram_set_flag(meta
, index
, ZRAM_ZERO
);
453 write_unlock(&zram
->meta
->tb_lock
);
455 atomic64_inc(&zram
->stats
.zero_pages
);
460 ret
= lzo1x_1_compress(uncmem
, PAGE_SIZE
, src
, &clen
,
461 meta
->compress_workmem
);
462 if (!is_partial_io(bvec
)) {
463 kunmap_atomic(user_mem
);
468 if (unlikely(ret
!= LZO_E_OK
)) {
469 pr_err("Compression failed! err=%d\n", ret
);
473 if (unlikely(clen
> max_zpage_size
)) {
476 if (is_partial_io(bvec
))
480 handle
= zs_malloc(meta
->mem_pool
, clen
);
482 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
487 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
489 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
490 src
= kmap_atomic(page
);
491 copy_page(cmem
, src
);
494 memcpy(cmem
, src
, clen
);
497 zs_unmap_object(meta
->mem_pool
, handle
);
500 * Free memory associated with this sector
501 * before overwriting unused sectors.
503 write_lock(&zram
->meta
->tb_lock
);
504 zram_free_page(zram
, index
);
506 meta
->table
[index
].handle
= handle
;
507 meta
->table
[index
].size
= clen
;
508 write_unlock(&zram
->meta
->tb_lock
);
511 atomic64_add(clen
, &zram
->stats
.compr_data_size
);
512 atomic64_inc(&zram
->stats
.pages_stored
);
515 mutex_unlock(&meta
->buffer_lock
);
516 if (is_partial_io(bvec
))
520 atomic64_inc(&zram
->stats
.failed_writes
);
524 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
525 int offset
, struct bio
*bio
)
528 int rw
= bio_data_dir(bio
);
531 atomic64_inc(&zram
->stats
.num_reads
);
532 ret
= zram_bvec_read(zram
, bvec
, index
, offset
, bio
);
534 atomic64_inc(&zram
->stats
.num_writes
);
535 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
541 static void zram_reset_device(struct zram
*zram
, bool reset_capacity
)
544 struct zram_meta
*meta
;
546 down_write(&zram
->init_lock
);
547 if (!init_done(zram
)) {
548 up_write(&zram
->init_lock
);
553 /* Free all pages that are still in this zram device */
554 for (index
= 0; index
< zram
->disksize
>> PAGE_SHIFT
; index
++) {
555 unsigned long handle
= meta
->table
[index
].handle
;
559 zs_free(meta
->mem_pool
, handle
);
562 zram_meta_free(zram
->meta
);
565 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
569 set_capacity(zram
->disk
, 0);
570 up_write(&zram
->init_lock
);
573 static void zram_init_device(struct zram
*zram
, struct zram_meta
*meta
)
575 if (zram
->disksize
> 2 * (totalram_pages
<< PAGE_SHIFT
)) {
577 "There is little point creating a zram of greater than "
578 "twice the size of memory since we expect a 2:1 compression "
579 "ratio. Note that zram uses about 0.1%% of the size of "
580 "the disk when not in use so a huge zram is "
582 "\tMemory Size: %lu kB\n"
583 "\tSize you selected: %llu kB\n"
584 "Continuing anyway ...\n",
585 (totalram_pages
<< PAGE_SHIFT
) >> 10, zram
->disksize
>> 10
589 /* zram devices sort of resembles non-rotational disks */
590 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
593 pr_debug("Initialization done!\n");
596 static ssize_t
disksize_store(struct device
*dev
,
597 struct device_attribute
*attr
, const char *buf
, size_t len
)
600 struct zram_meta
*meta
;
601 struct zram
*zram
= dev_to_zram(dev
);
603 disksize
= memparse(buf
, NULL
);
607 disksize
= PAGE_ALIGN(disksize
);
608 meta
= zram_meta_alloc(disksize
);
611 down_write(&zram
->init_lock
);
612 if (init_done(zram
)) {
613 up_write(&zram
->init_lock
);
614 zram_meta_free(meta
);
615 pr_info("Cannot change disksize for initialized device\n");
619 zram
->disksize
= disksize
;
620 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
621 zram_init_device(zram
, meta
);
622 up_write(&zram
->init_lock
);
627 static ssize_t
reset_store(struct device
*dev
,
628 struct device_attribute
*attr
, const char *buf
, size_t len
)
631 unsigned short do_reset
;
633 struct block_device
*bdev
;
635 zram
= dev_to_zram(dev
);
636 bdev
= bdget_disk(zram
->disk
, 0);
641 /* Do not reset an active device! */
642 if (bdev
->bd_holders
) {
647 ret
= kstrtou16(buf
, 10, &do_reset
);
656 /* Make sure all pending I/O is finished */
660 zram_reset_device(zram
, true);
668 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
)
673 struct bvec_iter iter
;
675 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
676 offset
= (bio
->bi_iter
.bi_sector
&
677 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
679 bio_for_each_segment(bvec
, bio
, iter
) {
680 int max_transfer_size
= PAGE_SIZE
- offset
;
682 if (bvec
.bv_len
> max_transfer_size
) {
684 * zram_bvec_rw() can only make operation on a single
685 * zram page. Split the bio vector.
689 bv
.bv_page
= bvec
.bv_page
;
690 bv
.bv_len
= max_transfer_size
;
691 bv
.bv_offset
= bvec
.bv_offset
;
693 if (zram_bvec_rw(zram
, &bv
, index
, offset
, bio
) < 0)
696 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
697 bv
.bv_offset
+= max_transfer_size
;
698 if (zram_bvec_rw(zram
, &bv
, index
+ 1, 0, bio
) < 0)
701 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, bio
) < 0)
704 update_position(&index
, &offset
, &bvec
);
707 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
716 * Handler function for all zram I/O requests.
718 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
720 struct zram
*zram
= queue
->queuedata
;
722 down_read(&zram
->init_lock
);
723 if (unlikely(!init_done(zram
)))
726 if (!valid_io_request(zram
, bio
)) {
727 atomic64_inc(&zram
->stats
.invalid_io
);
731 __zram_make_request(zram
, bio
);
732 up_read(&zram
->init_lock
);
737 up_read(&zram
->init_lock
);
741 static void zram_slot_free_notify(struct block_device
*bdev
,
745 struct zram_meta
*meta
;
747 zram
= bdev
->bd_disk
->private_data
;
750 write_lock(&meta
->tb_lock
);
751 zram_free_page(zram
, index
);
752 write_unlock(&meta
->tb_lock
);
753 atomic64_inc(&zram
->stats
.notify_free
);
756 static const struct block_device_operations zram_devops
= {
757 .swap_slot_free_notify
= zram_slot_free_notify
,
761 static DEVICE_ATTR(disksize
, S_IRUGO
| S_IWUSR
,
762 disksize_show
, disksize_store
);
763 static DEVICE_ATTR(initstate
, S_IRUGO
, initstate_show
, NULL
);
764 static DEVICE_ATTR(reset
, S_IWUSR
, NULL
, reset_store
);
765 static DEVICE_ATTR(num_reads
, S_IRUGO
, num_reads_show
, NULL
);
766 static DEVICE_ATTR(num_writes
, S_IRUGO
, num_writes_show
, NULL
);
767 static DEVICE_ATTR(invalid_io
, S_IRUGO
, invalid_io_show
, NULL
);
768 static DEVICE_ATTR(notify_free
, S_IRUGO
, notify_free_show
, NULL
);
769 static DEVICE_ATTR(zero_pages
, S_IRUGO
, zero_pages_show
, NULL
);
770 static DEVICE_ATTR(orig_data_size
, S_IRUGO
, orig_data_size_show
, NULL
);
771 static DEVICE_ATTR(compr_data_size
, S_IRUGO
, compr_data_size_show
, NULL
);
772 static DEVICE_ATTR(mem_used_total
, S_IRUGO
, mem_used_total_show
, NULL
);
774 static struct attribute
*zram_disk_attrs
[] = {
775 &dev_attr_disksize
.attr
,
776 &dev_attr_initstate
.attr
,
777 &dev_attr_reset
.attr
,
778 &dev_attr_num_reads
.attr
,
779 &dev_attr_num_writes
.attr
,
780 &dev_attr_invalid_io
.attr
,
781 &dev_attr_notify_free
.attr
,
782 &dev_attr_zero_pages
.attr
,
783 &dev_attr_orig_data_size
.attr
,
784 &dev_attr_compr_data_size
.attr
,
785 &dev_attr_mem_used_total
.attr
,
789 static struct attribute_group zram_disk_attr_group
= {
790 .attrs
= zram_disk_attrs
,
793 static int create_device(struct zram
*zram
, int device_id
)
797 init_rwsem(&zram
->init_lock
);
799 zram
->queue
= blk_alloc_queue(GFP_KERNEL
);
801 pr_err("Error allocating disk queue for device %d\n",
806 blk_queue_make_request(zram
->queue
, zram_make_request
);
807 zram
->queue
->queuedata
= zram
;
809 /* gendisk structure */
810 zram
->disk
= alloc_disk(1);
812 pr_warn("Error allocating disk structure for device %d\n",
817 zram
->disk
->major
= zram_major
;
818 zram
->disk
->first_minor
= device_id
;
819 zram
->disk
->fops
= &zram_devops
;
820 zram
->disk
->queue
= zram
->queue
;
821 zram
->disk
->private_data
= zram
;
822 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
824 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
825 set_capacity(zram
->disk
, 0);
828 * To ensure that we always get PAGE_SIZE aligned
829 * and n*PAGE_SIZED sized I/O requests.
831 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
832 blk_queue_logical_block_size(zram
->disk
->queue
,
833 ZRAM_LOGICAL_BLOCK_SIZE
);
834 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
835 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
837 add_disk(zram
->disk
);
839 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
840 &zram_disk_attr_group
);
842 pr_warn("Error creating sysfs group");
850 del_gendisk(zram
->disk
);
851 put_disk(zram
->disk
);
853 blk_cleanup_queue(zram
->queue
);
858 static void destroy_device(struct zram
*zram
)
860 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
861 &zram_disk_attr_group
);
863 del_gendisk(zram
->disk
);
864 put_disk(zram
->disk
);
866 blk_cleanup_queue(zram
->queue
);
869 static int __init
zram_init(void)
873 if (num_devices
> max_num_devices
) {
874 pr_warn("Invalid value for num_devices: %u\n",
880 zram_major
= register_blkdev(0, "zram");
881 if (zram_major
<= 0) {
882 pr_warn("Unable to get major number\n");
887 /* Allocate the device array and initialize each one */
888 zram_devices
= kzalloc(num_devices
* sizeof(struct zram
), GFP_KERNEL
);
894 for (dev_id
= 0; dev_id
< num_devices
; dev_id
++) {
895 ret
= create_device(&zram_devices
[dev_id
], dev_id
);
900 pr_info("Created %u device(s) ...\n", num_devices
);
906 destroy_device(&zram_devices
[--dev_id
]);
909 unregister_blkdev(zram_major
, "zram");
914 static void __exit
zram_exit(void)
919 for (i
= 0; i
< num_devices
; i
++) {
920 zram
= &zram_devices
[i
];
922 destroy_device(zram
);
924 * Shouldn't access zram->disk after destroy_device
925 * because destroy_device already released zram->disk.
927 zram_reset_device(zram
, false);
930 unregister_blkdev(zram_major
, "zram");
933 pr_debug("Cleanup done!\n");
936 module_init(zram_init
);
937 module_exit(zram_exit
);
939 module_param(num_devices
, uint
, 0);
940 MODULE_PARM_DESC(num_devices
, "Number of zram devices");
942 MODULE_LICENSE("Dual BSD/GPL");
943 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
944 MODULE_DESCRIPTION("Compressed RAM Block Device");