2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
35 static DEFINE_IDR(zram_index_idr
);
36 static int zram_major
;
37 static const char *default_compressor
= "lzo";
39 /* Module params (documentation at end) */
40 static unsigned int num_devices
= 1;
42 static inline void deprecated_attr_warn(const char *name
)
44 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
48 "See zram documentation.");
51 #define ZRAM_ATTR_RO(name) \
52 static ssize_t name##_show(struct device *d, \
53 struct device_attribute *attr, char *b) \
55 struct zram *zram = dev_to_zram(d); \
57 deprecated_attr_warn(__stringify(name)); \
58 return scnprintf(b, PAGE_SIZE, "%llu\n", \
59 (u64)atomic64_read(&zram->stats.name)); \
61 static DEVICE_ATTR_RO(name);
63 static inline bool init_done(struct zram
*zram
)
65 return zram
->disksize
;
68 static inline struct zram
*dev_to_zram(struct device
*dev
)
70 return (struct zram
*)dev_to_disk(dev
)->private_data
;
73 /* flag operations require table entry bit_spin_lock() being held */
74 static int zram_test_flag(struct zram_meta
*meta
, u32 index
,
75 enum zram_pageflags flag
)
77 return meta
->table
[index
].value
& BIT(flag
);
80 static void zram_set_flag(struct zram_meta
*meta
, u32 index
,
81 enum zram_pageflags flag
)
83 meta
->table
[index
].value
|= BIT(flag
);
86 static void zram_clear_flag(struct zram_meta
*meta
, u32 index
,
87 enum zram_pageflags flag
)
89 meta
->table
[index
].value
&= ~BIT(flag
);
92 static size_t zram_get_obj_size(struct zram_meta
*meta
, u32 index
)
94 return meta
->table
[index
].value
& (BIT(ZRAM_FLAG_SHIFT
) - 1);
97 static void zram_set_obj_size(struct zram_meta
*meta
,
98 u32 index
, size_t size
)
100 unsigned long flags
= meta
->table
[index
].value
>> ZRAM_FLAG_SHIFT
;
102 meta
->table
[index
].value
= (flags
<< ZRAM_FLAG_SHIFT
) | size
;
105 static inline int is_partial_io(struct bio_vec
*bvec
)
107 return bvec
->bv_len
!= PAGE_SIZE
;
111 * Check if request is within bounds and aligned on zram logical blocks.
113 static inline int valid_io_request(struct zram
*zram
,
114 sector_t start
, unsigned int size
)
118 /* unaligned request */
119 if (unlikely(start
& (ZRAM_SECTOR_PER_LOGICAL_BLOCK
- 1)))
121 if (unlikely(size
& (ZRAM_LOGICAL_BLOCK_SIZE
- 1)))
124 end
= start
+ (size
>> SECTOR_SHIFT
);
125 bound
= zram
->disksize
>> SECTOR_SHIFT
;
126 /* out of range range */
127 if (unlikely(start
>= bound
|| end
> bound
|| start
> end
))
130 /* I/O request is valid */
134 static void update_position(u32
*index
, int *offset
, struct bio_vec
*bvec
)
136 if (*offset
+ bvec
->bv_len
>= PAGE_SIZE
)
138 *offset
= (*offset
+ bvec
->bv_len
) % PAGE_SIZE
;
141 static inline void update_used_max(struct zram
*zram
,
142 const unsigned long pages
)
144 unsigned long old_max
, cur_max
;
146 old_max
= atomic_long_read(&zram
->stats
.max_used_pages
);
151 old_max
= atomic_long_cmpxchg(
152 &zram
->stats
.max_used_pages
, cur_max
, pages
);
153 } while (old_max
!= cur_max
);
156 static int page_zero_filled(void *ptr
)
161 page
= (unsigned long *)ptr
;
163 for (pos
= 0; pos
!= PAGE_SIZE
/ sizeof(*page
); pos
++) {
171 static void handle_zero_page(struct bio_vec
*bvec
)
173 struct page
*page
= bvec
->bv_page
;
176 user_mem
= kmap_atomic(page
);
177 if (is_partial_io(bvec
))
178 memset(user_mem
+ bvec
->bv_offset
, 0, bvec
->bv_len
);
180 clear_page(user_mem
);
181 kunmap_atomic(user_mem
);
183 flush_dcache_page(page
);
186 static ssize_t
initstate_show(struct device
*dev
,
187 struct device_attribute
*attr
, char *buf
)
190 struct zram
*zram
= dev_to_zram(dev
);
192 down_read(&zram
->init_lock
);
193 val
= init_done(zram
);
194 up_read(&zram
->init_lock
);
196 return scnprintf(buf
, PAGE_SIZE
, "%u\n", val
);
199 static ssize_t
disksize_show(struct device
*dev
,
200 struct device_attribute
*attr
, char *buf
)
202 struct zram
*zram
= dev_to_zram(dev
);
204 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", zram
->disksize
);
207 static ssize_t
orig_data_size_show(struct device
*dev
,
208 struct device_attribute
*attr
, char *buf
)
210 struct zram
*zram
= dev_to_zram(dev
);
212 deprecated_attr_warn("orig_data_size");
213 return scnprintf(buf
, PAGE_SIZE
, "%llu\n",
214 (u64
)(atomic64_read(&zram
->stats
.pages_stored
)) << PAGE_SHIFT
);
217 static ssize_t
mem_used_total_show(struct device
*dev
,
218 struct device_attribute
*attr
, char *buf
)
221 struct zram
*zram
= dev_to_zram(dev
);
223 deprecated_attr_warn("mem_used_total");
224 down_read(&zram
->init_lock
);
225 if (init_done(zram
)) {
226 struct zram_meta
*meta
= zram
->meta
;
227 val
= zs_get_total_pages(meta
->mem_pool
);
229 up_read(&zram
->init_lock
);
231 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
234 static ssize_t
mem_limit_show(struct device
*dev
,
235 struct device_attribute
*attr
, char *buf
)
238 struct zram
*zram
= dev_to_zram(dev
);
240 deprecated_attr_warn("mem_limit");
241 down_read(&zram
->init_lock
);
242 val
= zram
->limit_pages
;
243 up_read(&zram
->init_lock
);
245 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
248 static ssize_t
mem_limit_store(struct device
*dev
,
249 struct device_attribute
*attr
, const char *buf
, size_t len
)
253 struct zram
*zram
= dev_to_zram(dev
);
255 limit
= memparse(buf
, &tmp
);
256 if (buf
== tmp
) /* no chars parsed, invalid input */
259 down_write(&zram
->init_lock
);
260 zram
->limit_pages
= PAGE_ALIGN(limit
) >> PAGE_SHIFT
;
261 up_write(&zram
->init_lock
);
266 static ssize_t
mem_used_max_show(struct device
*dev
,
267 struct device_attribute
*attr
, char *buf
)
270 struct zram
*zram
= dev_to_zram(dev
);
272 deprecated_attr_warn("mem_used_max");
273 down_read(&zram
->init_lock
);
275 val
= atomic_long_read(&zram
->stats
.max_used_pages
);
276 up_read(&zram
->init_lock
);
278 return scnprintf(buf
, PAGE_SIZE
, "%llu\n", val
<< PAGE_SHIFT
);
281 static ssize_t
mem_used_max_store(struct device
*dev
,
282 struct device_attribute
*attr
, const char *buf
, size_t len
)
286 struct zram
*zram
= dev_to_zram(dev
);
288 err
= kstrtoul(buf
, 10, &val
);
292 down_read(&zram
->init_lock
);
293 if (init_done(zram
)) {
294 struct zram_meta
*meta
= zram
->meta
;
295 atomic_long_set(&zram
->stats
.max_used_pages
,
296 zs_get_total_pages(meta
->mem_pool
));
298 up_read(&zram
->init_lock
);
303 static ssize_t
max_comp_streams_show(struct device
*dev
,
304 struct device_attribute
*attr
, char *buf
)
307 struct zram
*zram
= dev_to_zram(dev
);
309 down_read(&zram
->init_lock
);
310 val
= zram
->max_comp_streams
;
311 up_read(&zram
->init_lock
);
313 return scnprintf(buf
, PAGE_SIZE
, "%d\n", val
);
316 static ssize_t
max_comp_streams_store(struct device
*dev
,
317 struct device_attribute
*attr
, const char *buf
, size_t len
)
320 struct zram
*zram
= dev_to_zram(dev
);
323 ret
= kstrtoint(buf
, 0, &num
);
329 down_write(&zram
->init_lock
);
330 if (init_done(zram
)) {
331 if (!zcomp_set_max_streams(zram
->comp
, num
)) {
332 pr_info("Cannot change max compression streams\n");
338 zram
->max_comp_streams
= num
;
341 up_write(&zram
->init_lock
);
345 static ssize_t
comp_algorithm_show(struct device
*dev
,
346 struct device_attribute
*attr
, char *buf
)
349 struct zram
*zram
= dev_to_zram(dev
);
351 down_read(&zram
->init_lock
);
352 sz
= zcomp_available_show(zram
->compressor
, buf
);
353 up_read(&zram
->init_lock
);
358 static ssize_t
comp_algorithm_store(struct device
*dev
,
359 struct device_attribute
*attr
, const char *buf
, size_t len
)
361 struct zram
*zram
= dev_to_zram(dev
);
362 down_write(&zram
->init_lock
);
363 if (init_done(zram
)) {
364 up_write(&zram
->init_lock
);
365 pr_info("Can't change algorithm for initialized device\n");
368 strlcpy(zram
->compressor
, buf
, sizeof(zram
->compressor
));
369 up_write(&zram
->init_lock
);
373 static ssize_t
compact_store(struct device
*dev
,
374 struct device_attribute
*attr
, const char *buf
, size_t len
)
376 unsigned long nr_migrated
;
377 struct zram
*zram
= dev_to_zram(dev
);
378 struct zram_meta
*meta
;
380 down_read(&zram
->init_lock
);
381 if (!init_done(zram
)) {
382 up_read(&zram
->init_lock
);
387 nr_migrated
= zs_compact(meta
->mem_pool
);
388 atomic64_add(nr_migrated
, &zram
->stats
.num_migrated
);
389 up_read(&zram
->init_lock
);
394 static ssize_t
io_stat_show(struct device
*dev
,
395 struct device_attribute
*attr
, char *buf
)
397 struct zram
*zram
= dev_to_zram(dev
);
400 down_read(&zram
->init_lock
);
401 ret
= scnprintf(buf
, PAGE_SIZE
,
402 "%8llu %8llu %8llu %8llu\n",
403 (u64
)atomic64_read(&zram
->stats
.failed_reads
),
404 (u64
)atomic64_read(&zram
->stats
.failed_writes
),
405 (u64
)atomic64_read(&zram
->stats
.invalid_io
),
406 (u64
)atomic64_read(&zram
->stats
.notify_free
));
407 up_read(&zram
->init_lock
);
412 static ssize_t
mm_stat_show(struct device
*dev
,
413 struct device_attribute
*attr
, char *buf
)
415 struct zram
*zram
= dev_to_zram(dev
);
416 u64 orig_size
, mem_used
= 0;
420 down_read(&zram
->init_lock
);
422 mem_used
= zs_get_total_pages(zram
->meta
->mem_pool
);
424 orig_size
= atomic64_read(&zram
->stats
.pages_stored
);
425 max_used
= atomic_long_read(&zram
->stats
.max_used_pages
);
427 ret
= scnprintf(buf
, PAGE_SIZE
,
428 "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n",
429 orig_size
<< PAGE_SHIFT
,
430 (u64
)atomic64_read(&zram
->stats
.compr_data_size
),
431 mem_used
<< PAGE_SHIFT
,
432 zram
->limit_pages
<< PAGE_SHIFT
,
433 max_used
<< PAGE_SHIFT
,
434 (u64
)atomic64_read(&zram
->stats
.zero_pages
),
435 (u64
)atomic64_read(&zram
->stats
.num_migrated
));
436 up_read(&zram
->init_lock
);
441 static DEVICE_ATTR_RO(io_stat
);
442 static DEVICE_ATTR_RO(mm_stat
);
443 ZRAM_ATTR_RO(num_reads
);
444 ZRAM_ATTR_RO(num_writes
);
445 ZRAM_ATTR_RO(failed_reads
);
446 ZRAM_ATTR_RO(failed_writes
);
447 ZRAM_ATTR_RO(invalid_io
);
448 ZRAM_ATTR_RO(notify_free
);
449 ZRAM_ATTR_RO(zero_pages
);
450 ZRAM_ATTR_RO(compr_data_size
);
452 static inline bool zram_meta_get(struct zram
*zram
)
454 if (atomic_inc_not_zero(&zram
->refcount
))
459 static inline void zram_meta_put(struct zram
*zram
)
461 atomic_dec(&zram
->refcount
);
464 static void zram_meta_free(struct zram_meta
*meta
, u64 disksize
)
466 size_t num_pages
= disksize
>> PAGE_SHIFT
;
469 /* Free all pages that are still in this zram device */
470 for (index
= 0; index
< num_pages
; index
++) {
471 unsigned long handle
= meta
->table
[index
].handle
;
476 zs_free(meta
->mem_pool
, handle
);
479 zs_destroy_pool(meta
->mem_pool
);
484 static struct zram_meta
*zram_meta_alloc(int device_id
, u64 disksize
)
488 struct zram_meta
*meta
= kmalloc(sizeof(*meta
), GFP_KERNEL
);
493 num_pages
= disksize
>> PAGE_SHIFT
;
494 meta
->table
= vzalloc(num_pages
* sizeof(*meta
->table
));
496 pr_err("Error allocating zram address table\n");
500 snprintf(pool_name
, sizeof(pool_name
), "zram%d", device_id
);
501 meta
->mem_pool
= zs_create_pool(pool_name
, GFP_NOIO
| __GFP_HIGHMEM
);
502 if (!meta
->mem_pool
) {
503 pr_err("Error creating memory pool\n");
516 * To protect concurrent access to the same index entry,
517 * caller should hold this table index entry's bit_spinlock to
518 * indicate this index entry is accessing.
520 static void zram_free_page(struct zram
*zram
, size_t index
)
522 struct zram_meta
*meta
= zram
->meta
;
523 unsigned long handle
= meta
->table
[index
].handle
;
525 if (unlikely(!handle
)) {
527 * No memory is allocated for zero filled pages.
528 * Simply clear zero page flag.
530 if (zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
531 zram_clear_flag(meta
, index
, ZRAM_ZERO
);
532 atomic64_dec(&zram
->stats
.zero_pages
);
537 zs_free(meta
->mem_pool
, handle
);
539 atomic64_sub(zram_get_obj_size(meta
, index
),
540 &zram
->stats
.compr_data_size
);
541 atomic64_dec(&zram
->stats
.pages_stored
);
543 meta
->table
[index
].handle
= 0;
544 zram_set_obj_size(meta
, index
, 0);
547 static int zram_decompress_page(struct zram
*zram
, char *mem
, u32 index
)
551 struct zram_meta
*meta
= zram
->meta
;
552 unsigned long handle
;
555 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
556 handle
= meta
->table
[index
].handle
;
557 size
= zram_get_obj_size(meta
, index
);
559 if (!handle
|| zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
560 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
565 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_RO
);
566 if (size
== PAGE_SIZE
)
567 copy_page(mem
, cmem
);
569 ret
= zcomp_decompress(zram
->comp
, cmem
, size
, mem
);
570 zs_unmap_object(meta
->mem_pool
, handle
);
571 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
573 /* Should NEVER happen. Return bio error if it does. */
575 pr_err("Decompression failed! err=%d, page=%u\n", ret
, index
);
582 static int zram_bvec_read(struct zram
*zram
, struct bio_vec
*bvec
,
583 u32 index
, int offset
)
587 unsigned char *user_mem
, *uncmem
= NULL
;
588 struct zram_meta
*meta
= zram
->meta
;
589 page
= bvec
->bv_page
;
591 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
592 if (unlikely(!meta
->table
[index
].handle
) ||
593 zram_test_flag(meta
, index
, ZRAM_ZERO
)) {
594 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
595 handle_zero_page(bvec
);
598 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
600 if (is_partial_io(bvec
))
601 /* Use a temporary buffer to decompress the page */
602 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
604 user_mem
= kmap_atomic(page
);
605 if (!is_partial_io(bvec
))
609 pr_info("Unable to allocate temp memory\n");
614 ret
= zram_decompress_page(zram
, uncmem
, index
);
615 /* Should NEVER happen. Return bio error if it does. */
619 if (is_partial_io(bvec
))
620 memcpy(user_mem
+ bvec
->bv_offset
, uncmem
+ offset
,
623 flush_dcache_page(page
);
626 kunmap_atomic(user_mem
);
627 if (is_partial_io(bvec
))
632 static int zram_bvec_write(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
637 unsigned long handle
;
639 unsigned char *user_mem
, *cmem
, *src
, *uncmem
= NULL
;
640 struct zram_meta
*meta
= zram
->meta
;
641 struct zcomp_strm
*zstrm
;
643 unsigned long alloced_pages
;
645 page
= bvec
->bv_page
;
646 if (is_partial_io(bvec
)) {
648 * This is a partial IO. We need to read the full page
649 * before to write the changes.
651 uncmem
= kmalloc(PAGE_SIZE
, GFP_NOIO
);
656 ret
= zram_decompress_page(zram
, uncmem
, index
);
661 zstrm
= zcomp_strm_find(zram
->comp
);
663 user_mem
= kmap_atomic(page
);
665 if (is_partial_io(bvec
)) {
666 memcpy(uncmem
+ offset
, user_mem
+ bvec
->bv_offset
,
668 kunmap_atomic(user_mem
);
674 if (page_zero_filled(uncmem
)) {
676 kunmap_atomic(user_mem
);
677 /* Free memory associated with this sector now. */
678 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
679 zram_free_page(zram
, index
);
680 zram_set_flag(meta
, index
, ZRAM_ZERO
);
681 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
683 atomic64_inc(&zram
->stats
.zero_pages
);
688 ret
= zcomp_compress(zram
->comp
, zstrm
, uncmem
, &clen
);
689 if (!is_partial_io(bvec
)) {
690 kunmap_atomic(user_mem
);
696 pr_err("Compression failed! err=%d\n", ret
);
700 if (unlikely(clen
> max_zpage_size
)) {
702 if (is_partial_io(bvec
))
706 handle
= zs_malloc(meta
->mem_pool
, clen
);
708 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
714 alloced_pages
= zs_get_total_pages(meta
->mem_pool
);
715 if (zram
->limit_pages
&& alloced_pages
> zram
->limit_pages
) {
716 zs_free(meta
->mem_pool
, handle
);
721 update_used_max(zram
, alloced_pages
);
723 cmem
= zs_map_object(meta
->mem_pool
, handle
, ZS_MM_WO
);
725 if ((clen
== PAGE_SIZE
) && !is_partial_io(bvec
)) {
726 src
= kmap_atomic(page
);
727 copy_page(cmem
, src
);
730 memcpy(cmem
, src
, clen
);
733 zcomp_strm_release(zram
->comp
, zstrm
);
735 zs_unmap_object(meta
->mem_pool
, handle
);
738 * Free memory associated with this sector
739 * before overwriting unused sectors.
741 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
742 zram_free_page(zram
, index
);
744 meta
->table
[index
].handle
= handle
;
745 zram_set_obj_size(meta
, index
, clen
);
746 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
749 atomic64_add(clen
, &zram
->stats
.compr_data_size
);
750 atomic64_inc(&zram
->stats
.pages_stored
);
753 zcomp_strm_release(zram
->comp
, zstrm
);
754 if (is_partial_io(bvec
))
760 * zram_bio_discard - handler on discard request
761 * @index: physical block index in PAGE_SIZE units
762 * @offset: byte offset within physical block
764 static void zram_bio_discard(struct zram
*zram
, u32 index
,
765 int offset
, struct bio
*bio
)
767 size_t n
= bio
->bi_iter
.bi_size
;
768 struct zram_meta
*meta
= zram
->meta
;
771 * zram manages data in physical block size units. Because logical block
772 * size isn't identical with physical block size on some arch, we
773 * could get a discard request pointing to a specific offset within a
774 * certain physical block. Although we can handle this request by
775 * reading that physiclal block and decompressing and partially zeroing
776 * and re-compressing and then re-storing it, this isn't reasonable
777 * because our intent with a discard request is to save memory. So
778 * skipping this logical block is appropriate here.
781 if (n
<= (PAGE_SIZE
- offset
))
784 n
-= (PAGE_SIZE
- offset
);
788 while (n
>= PAGE_SIZE
) {
789 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
790 zram_free_page(zram
, index
);
791 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
792 atomic64_inc(&zram
->stats
.notify_free
);
798 static int zram_bvec_rw(struct zram
*zram
, struct bio_vec
*bvec
, u32 index
,
801 unsigned long start_time
= jiffies
;
804 generic_start_io_acct(rw
, bvec
->bv_len
>> SECTOR_SHIFT
,
808 atomic64_inc(&zram
->stats
.num_reads
);
809 ret
= zram_bvec_read(zram
, bvec
, index
, offset
);
811 atomic64_inc(&zram
->stats
.num_writes
);
812 ret
= zram_bvec_write(zram
, bvec
, index
, offset
);
815 generic_end_io_acct(rw
, &zram
->disk
->part0
, start_time
);
819 atomic64_inc(&zram
->stats
.failed_reads
);
821 atomic64_inc(&zram
->stats
.failed_writes
);
827 static void __zram_make_request(struct zram
*zram
, struct bio
*bio
)
832 struct bvec_iter iter
;
834 index
= bio
->bi_iter
.bi_sector
>> SECTORS_PER_PAGE_SHIFT
;
835 offset
= (bio
->bi_iter
.bi_sector
&
836 (SECTORS_PER_PAGE
- 1)) << SECTOR_SHIFT
;
838 if (unlikely(bio
->bi_rw
& REQ_DISCARD
)) {
839 zram_bio_discard(zram
, index
, offset
, bio
);
844 rw
= bio_data_dir(bio
);
845 bio_for_each_segment(bvec
, bio
, iter
) {
846 int max_transfer_size
= PAGE_SIZE
- offset
;
848 if (bvec
.bv_len
> max_transfer_size
) {
850 * zram_bvec_rw() can only make operation on a single
851 * zram page. Split the bio vector.
855 bv
.bv_page
= bvec
.bv_page
;
856 bv
.bv_len
= max_transfer_size
;
857 bv
.bv_offset
= bvec
.bv_offset
;
859 if (zram_bvec_rw(zram
, &bv
, index
, offset
, rw
) < 0)
862 bv
.bv_len
= bvec
.bv_len
- max_transfer_size
;
863 bv
.bv_offset
+= max_transfer_size
;
864 if (zram_bvec_rw(zram
, &bv
, index
+ 1, 0, rw
) < 0)
867 if (zram_bvec_rw(zram
, &bvec
, index
, offset
, rw
) < 0)
870 update_position(&index
, &offset
, &bvec
);
873 set_bit(BIO_UPTODATE
, &bio
->bi_flags
);
882 * Handler function for all zram I/O requests.
884 static void zram_make_request(struct request_queue
*queue
, struct bio
*bio
)
886 struct zram
*zram
= queue
->queuedata
;
888 if (unlikely(!zram_meta_get(zram
)))
891 if (!valid_io_request(zram
, bio
->bi_iter
.bi_sector
,
892 bio
->bi_iter
.bi_size
)) {
893 atomic64_inc(&zram
->stats
.invalid_io
);
897 __zram_make_request(zram
, bio
);
906 static void zram_slot_free_notify(struct block_device
*bdev
,
910 struct zram_meta
*meta
;
912 zram
= bdev
->bd_disk
->private_data
;
915 bit_spin_lock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
916 zram_free_page(zram
, index
);
917 bit_spin_unlock(ZRAM_ACCESS
, &meta
->table
[index
].value
);
918 atomic64_inc(&zram
->stats
.notify_free
);
921 static int zram_rw_page(struct block_device
*bdev
, sector_t sector
,
922 struct page
*page
, int rw
)
924 int offset
, err
= -EIO
;
929 zram
= bdev
->bd_disk
->private_data
;
930 if (unlikely(!zram_meta_get(zram
)))
933 if (!valid_io_request(zram
, sector
, PAGE_SIZE
)) {
934 atomic64_inc(&zram
->stats
.invalid_io
);
939 index
= sector
>> SECTORS_PER_PAGE_SHIFT
;
940 offset
= sector
& (SECTORS_PER_PAGE
- 1) << SECTOR_SHIFT
;
943 bv
.bv_len
= PAGE_SIZE
;
946 err
= zram_bvec_rw(zram
, &bv
, index
, offset
, rw
);
951 * If I/O fails, just return error(ie, non-zero) without
952 * calling page_endio.
953 * It causes resubmit the I/O with bio request by upper functions
954 * of rw_page(e.g., swap_readpage, __swap_writepage) and
955 * bio->bi_end_io does things to handle the error
956 * (e.g., SetPageError, set_page_dirty and extra works).
959 page_endio(page
, rw
, 0);
963 static void zram_reset_device(struct zram
*zram
)
965 struct zram_meta
*meta
;
969 down_write(&zram
->init_lock
);
971 zram
->limit_pages
= 0;
973 if (!init_done(zram
)) {
974 up_write(&zram
->init_lock
);
980 disksize
= zram
->disksize
;
982 * Refcount will go down to 0 eventually and r/w handler
983 * cannot handle further I/O so it will bail out by
984 * check zram_meta_get.
988 * We want to free zram_meta in process context to avoid
989 * deadlock between reclaim path and any other locks.
991 wait_event(zram
->io_done
, atomic_read(&zram
->refcount
) == 0);
994 memset(&zram
->stats
, 0, sizeof(zram
->stats
));
996 zram
->max_comp_streams
= 1;
998 set_capacity(zram
->disk
, 0);
999 part_stat_set_all(&zram
->disk
->part0
, 0);
1001 up_write(&zram
->init_lock
);
1002 /* I/O operation under all of CPU are done so let's free */
1003 zram_meta_free(meta
, disksize
);
1004 zcomp_destroy(comp
);
1007 static ssize_t
disksize_store(struct device
*dev
,
1008 struct device_attribute
*attr
, const char *buf
, size_t len
)
1012 struct zram_meta
*meta
;
1013 struct zram
*zram
= dev_to_zram(dev
);
1016 disksize
= memparse(buf
, NULL
);
1020 disksize
= PAGE_ALIGN(disksize
);
1021 meta
= zram_meta_alloc(zram
->disk
->first_minor
, disksize
);
1025 comp
= zcomp_create(zram
->compressor
, zram
->max_comp_streams
);
1027 pr_info("Cannot initialise %s compressing backend\n",
1029 err
= PTR_ERR(comp
);
1033 down_write(&zram
->init_lock
);
1034 if (init_done(zram
)) {
1035 pr_info("Cannot change disksize for initialized device\n");
1037 goto out_destroy_comp
;
1040 init_waitqueue_head(&zram
->io_done
);
1041 atomic_set(&zram
->refcount
, 1);
1044 zram
->disksize
= disksize
;
1045 set_capacity(zram
->disk
, zram
->disksize
>> SECTOR_SHIFT
);
1046 up_write(&zram
->init_lock
);
1049 * Revalidate disk out of the init_lock to avoid lockdep splat.
1050 * It's okay because disk's capacity is protected by init_lock
1051 * so that revalidate_disk always sees up-to-date capacity.
1053 revalidate_disk(zram
->disk
);
1058 up_write(&zram
->init_lock
);
1059 zcomp_destroy(comp
);
1061 zram_meta_free(meta
, disksize
);
1065 static ssize_t
reset_store(struct device
*dev
,
1066 struct device_attribute
*attr
, const char *buf
, size_t len
)
1069 unsigned short do_reset
;
1071 struct block_device
*bdev
;
1073 zram
= dev_to_zram(dev
);
1074 bdev
= bdget_disk(zram
->disk
, 0);
1079 mutex_lock(&bdev
->bd_mutex
);
1080 /* Do not reset an active device! */
1081 if (bdev
->bd_openers
) {
1086 ret
= kstrtou16(buf
, 10, &do_reset
);
1095 /* Make sure all pending I/O is finished */
1097 zram_reset_device(zram
);
1099 mutex_unlock(&bdev
->bd_mutex
);
1100 revalidate_disk(zram
->disk
);
1106 mutex_unlock(&bdev
->bd_mutex
);
1111 static const struct block_device_operations zram_devops
= {
1112 .swap_slot_free_notify
= zram_slot_free_notify
,
1113 .rw_page
= zram_rw_page
,
1114 .owner
= THIS_MODULE
1117 static DEVICE_ATTR_WO(compact
);
1118 static DEVICE_ATTR_RW(disksize
);
1119 static DEVICE_ATTR_RO(initstate
);
1120 static DEVICE_ATTR_WO(reset
);
1121 static DEVICE_ATTR_RO(orig_data_size
);
1122 static DEVICE_ATTR_RO(mem_used_total
);
1123 static DEVICE_ATTR_RW(mem_limit
);
1124 static DEVICE_ATTR_RW(mem_used_max
);
1125 static DEVICE_ATTR_RW(max_comp_streams
);
1126 static DEVICE_ATTR_RW(comp_algorithm
);
1128 static struct attribute
*zram_disk_attrs
[] = {
1129 &dev_attr_disksize
.attr
,
1130 &dev_attr_initstate
.attr
,
1131 &dev_attr_reset
.attr
,
1132 &dev_attr_num_reads
.attr
,
1133 &dev_attr_num_writes
.attr
,
1134 &dev_attr_failed_reads
.attr
,
1135 &dev_attr_failed_writes
.attr
,
1136 &dev_attr_compact
.attr
,
1137 &dev_attr_invalid_io
.attr
,
1138 &dev_attr_notify_free
.attr
,
1139 &dev_attr_zero_pages
.attr
,
1140 &dev_attr_orig_data_size
.attr
,
1141 &dev_attr_compr_data_size
.attr
,
1142 &dev_attr_mem_used_total
.attr
,
1143 &dev_attr_mem_limit
.attr
,
1144 &dev_attr_mem_used_max
.attr
,
1145 &dev_attr_max_comp_streams
.attr
,
1146 &dev_attr_comp_algorithm
.attr
,
1147 &dev_attr_io_stat
.attr
,
1148 &dev_attr_mm_stat
.attr
,
1152 static struct attribute_group zram_disk_attr_group
= {
1153 .attrs
= zram_disk_attrs
,
1157 * Allocate and initialize new zram device. the function returns
1158 * '>= 0' device_id upon success, and negative value otherwise.
1160 static int zram_add(void)
1163 struct request_queue
*queue
;
1166 zram
= kzalloc(sizeof(struct zram
), GFP_KERNEL
);
1170 ret
= idr_alloc(&zram_index_idr
, zram
, 0, 0, GFP_KERNEL
);
1175 init_rwsem(&zram
->init_lock
);
1177 queue
= blk_alloc_queue(GFP_KERNEL
);
1179 pr_err("Error allocating disk queue for device %d\n",
1185 blk_queue_make_request(queue
, zram_make_request
);
1187 /* gendisk structure */
1188 zram
->disk
= alloc_disk(1);
1190 pr_warn("Error allocating disk structure for device %d\n",
1193 goto out_free_queue
;
1196 zram
->disk
->major
= zram_major
;
1197 zram
->disk
->first_minor
= device_id
;
1198 zram
->disk
->fops
= &zram_devops
;
1199 zram
->disk
->queue
= queue
;
1200 zram
->disk
->queue
->queuedata
= zram
;
1201 zram
->disk
->private_data
= zram
;
1202 snprintf(zram
->disk
->disk_name
, 16, "zram%d", device_id
);
1204 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1205 set_capacity(zram
->disk
, 0);
1206 /* zram devices sort of resembles non-rotational disks */
1207 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zram
->disk
->queue
);
1208 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, zram
->disk
->queue
);
1210 * To ensure that we always get PAGE_SIZE aligned
1211 * and n*PAGE_SIZED sized I/O requests.
1213 blk_queue_physical_block_size(zram
->disk
->queue
, PAGE_SIZE
);
1214 blk_queue_logical_block_size(zram
->disk
->queue
,
1215 ZRAM_LOGICAL_BLOCK_SIZE
);
1216 blk_queue_io_min(zram
->disk
->queue
, PAGE_SIZE
);
1217 blk_queue_io_opt(zram
->disk
->queue
, PAGE_SIZE
);
1218 zram
->disk
->queue
->limits
.discard_granularity
= PAGE_SIZE
;
1219 zram
->disk
->queue
->limits
.max_discard_sectors
= UINT_MAX
;
1221 * zram_bio_discard() will clear all logical blocks if logical block
1222 * size is identical with physical block size(PAGE_SIZE). But if it is
1223 * different, we will skip discarding some parts of logical blocks in
1224 * the part of the request range which isn't aligned to physical block
1225 * size. So we can't ensure that all discarded logical blocks are
1228 if (ZRAM_LOGICAL_BLOCK_SIZE
== PAGE_SIZE
)
1229 zram
->disk
->queue
->limits
.discard_zeroes_data
= 1;
1231 zram
->disk
->queue
->limits
.discard_zeroes_data
= 0;
1232 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, zram
->disk
->queue
);
1234 add_disk(zram
->disk
);
1236 ret
= sysfs_create_group(&disk_to_dev(zram
->disk
)->kobj
,
1237 &zram_disk_attr_group
);
1239 pr_warn("Error creating sysfs group");
1242 strlcpy(zram
->compressor
, default_compressor
, sizeof(zram
->compressor
));
1244 zram
->max_comp_streams
= 1;
1246 pr_info("Added device: %s\n", zram
->disk
->disk_name
);
1250 del_gendisk(zram
->disk
);
1251 put_disk(zram
->disk
);
1253 blk_cleanup_queue(queue
);
1255 idr_remove(&zram_index_idr
, device_id
);
1261 static void zram_remove(struct zram
*zram
)
1263 pr_info("Removed device: %s\n", zram
->disk
->disk_name
);
1265 * Remove sysfs first, so no one will perform a disksize
1266 * store while we destroy the devices
1268 sysfs_remove_group(&disk_to_dev(zram
->disk
)->kobj
,
1269 &zram_disk_attr_group
);
1271 zram_reset_device(zram
);
1272 idr_remove(&zram_index_idr
, zram
->disk
->first_minor
);
1273 blk_cleanup_queue(zram
->disk
->queue
);
1274 del_gendisk(zram
->disk
);
1275 put_disk(zram
->disk
);
1279 static int zram_remove_cb(int id
, void *ptr
, void *data
)
1285 static void destroy_devices(void)
1287 idr_for_each(&zram_index_idr
, &zram_remove_cb
, NULL
);
1288 idr_destroy(&zram_index_idr
);
1289 unregister_blkdev(zram_major
, "zram");
1292 static int __init
zram_init(void)
1296 zram_major
= register_blkdev(0, "zram");
1297 if (zram_major
<= 0) {
1298 pr_warn("Unable to get major number\n");
1302 while (num_devices
!= 0) {
1316 static void __exit
zram_exit(void)
1321 module_init(zram_init
);
1322 module_exit(zram_exit
);
1324 module_param(num_devices
, uint
, 0);
1325 MODULE_PARM_DESC(num_devices
, "Number of pre-created zram devices");
1327 MODULE_LICENSE("Dual BSD/GPL");
1328 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1329 MODULE_DESCRIPTION("Compressed RAM Block Device");