]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/block/zram/zram_drv.c
zram: report maximum used memory
[mirror_ubuntu-zesty-kernel.git] / drivers / block / zram / zram_drv.c
1 /*
2 * Compressed RAM block device
3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
13 */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
42
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45
46 #define ZRAM_ATTR_RO(name) \
47 static ssize_t zram_attr_##name##_show(struct device *d, \
48 struct device_attribute *attr, char *b) \
49 { \
50 struct zram *zram = dev_to_zram(d); \
51 return scnprintf(b, PAGE_SIZE, "%llu\n", \
52 (u64)atomic64_read(&zram->stats.name)); \
53 } \
54 static struct device_attribute dev_attr_##name = \
55 __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
56
57 static inline int init_done(struct zram *zram)
58 {
59 return zram->meta != NULL;
60 }
61
62 static inline struct zram *dev_to_zram(struct device *dev)
63 {
64 return (struct zram *)dev_to_disk(dev)->private_data;
65 }
66
67 static ssize_t disksize_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69 {
70 struct zram *zram = dev_to_zram(dev);
71
72 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 }
74
75 static ssize_t initstate_show(struct device *dev,
76 struct device_attribute *attr, char *buf)
77 {
78 u32 val;
79 struct zram *zram = dev_to_zram(dev);
80
81 down_read(&zram->init_lock);
82 val = init_done(zram);
83 up_read(&zram->init_lock);
84
85 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 }
87
88 static ssize_t orig_data_size_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90 {
91 struct zram *zram = dev_to_zram(dev);
92
93 return scnprintf(buf, PAGE_SIZE, "%llu\n",
94 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 }
96
97 static ssize_t mem_used_total_show(struct device *dev,
98 struct device_attribute *attr, char *buf)
99 {
100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103
104 down_read(&zram->init_lock);
105 if (init_done(zram))
106 val = zs_get_total_pages(meta->mem_pool);
107 up_read(&zram->init_lock);
108
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 }
111
112 static ssize_t max_comp_streams_show(struct device *dev,
113 struct device_attribute *attr, char *buf)
114 {
115 int val;
116 struct zram *zram = dev_to_zram(dev);
117
118 down_read(&zram->init_lock);
119 val = zram->max_comp_streams;
120 up_read(&zram->init_lock);
121
122 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 }
124
125 static ssize_t mem_limit_show(struct device *dev,
126 struct device_attribute *attr, char *buf)
127 {
128 u64 val;
129 struct zram *zram = dev_to_zram(dev);
130
131 down_read(&zram->init_lock);
132 val = zram->limit_pages;
133 up_read(&zram->init_lock);
134
135 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
136 }
137
138 static ssize_t mem_limit_store(struct device *dev,
139 struct device_attribute *attr, const char *buf, size_t len)
140 {
141 u64 limit;
142 char *tmp;
143 struct zram *zram = dev_to_zram(dev);
144
145 limit = memparse(buf, &tmp);
146 if (buf == tmp) /* no chars parsed, invalid input */
147 return -EINVAL;
148
149 down_write(&zram->init_lock);
150 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
151 up_write(&zram->init_lock);
152
153 return len;
154 }
155
156 static ssize_t mem_used_max_show(struct device *dev,
157 struct device_attribute *attr, char *buf)
158 {
159 u64 val = 0;
160 struct zram *zram = dev_to_zram(dev);
161
162 down_read(&zram->init_lock);
163 if (init_done(zram))
164 val = atomic_long_read(&zram->stats.max_used_pages);
165 up_read(&zram->init_lock);
166
167 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
168 }
169
170 static ssize_t mem_used_max_store(struct device *dev,
171 struct device_attribute *attr, const char *buf, size_t len)
172 {
173 int err;
174 unsigned long val;
175 struct zram *zram = dev_to_zram(dev);
176 struct zram_meta *meta = zram->meta;
177
178 err = kstrtoul(buf, 10, &val);
179 if (err || val != 0)
180 return -EINVAL;
181
182 down_read(&zram->init_lock);
183 if (init_done(zram))
184 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool));
186 up_read(&zram->init_lock);
187
188 return len;
189 }
190
191 static ssize_t max_comp_streams_store(struct device *dev,
192 struct device_attribute *attr, const char *buf, size_t len)
193 {
194 int num;
195 struct zram *zram = dev_to_zram(dev);
196 int ret;
197
198 ret = kstrtoint(buf, 0, &num);
199 if (ret < 0)
200 return ret;
201 if (num < 1)
202 return -EINVAL;
203
204 down_write(&zram->init_lock);
205 if (init_done(zram)) {
206 if (!zcomp_set_max_streams(zram->comp, num)) {
207 pr_info("Cannot change max compression streams\n");
208 ret = -EINVAL;
209 goto out;
210 }
211 }
212
213 zram->max_comp_streams = num;
214 ret = len;
215 out:
216 up_write(&zram->init_lock);
217 return ret;
218 }
219
220 static ssize_t comp_algorithm_show(struct device *dev,
221 struct device_attribute *attr, char *buf)
222 {
223 size_t sz;
224 struct zram *zram = dev_to_zram(dev);
225
226 down_read(&zram->init_lock);
227 sz = zcomp_available_show(zram->compressor, buf);
228 up_read(&zram->init_lock);
229
230 return sz;
231 }
232
233 static ssize_t comp_algorithm_store(struct device *dev,
234 struct device_attribute *attr, const char *buf, size_t len)
235 {
236 struct zram *zram = dev_to_zram(dev);
237 down_write(&zram->init_lock);
238 if (init_done(zram)) {
239 up_write(&zram->init_lock);
240 pr_info("Can't change algorithm for initialized device\n");
241 return -EBUSY;
242 }
243 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
244 up_write(&zram->init_lock);
245 return len;
246 }
247
248 /* flag operations needs meta->tb_lock */
249 static int zram_test_flag(struct zram_meta *meta, u32 index,
250 enum zram_pageflags flag)
251 {
252 return meta->table[index].value & BIT(flag);
253 }
254
255 static void zram_set_flag(struct zram_meta *meta, u32 index,
256 enum zram_pageflags flag)
257 {
258 meta->table[index].value |= BIT(flag);
259 }
260
261 static void zram_clear_flag(struct zram_meta *meta, u32 index,
262 enum zram_pageflags flag)
263 {
264 meta->table[index].value &= ~BIT(flag);
265 }
266
267 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
268 {
269 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
270 }
271
272 static void zram_set_obj_size(struct zram_meta *meta,
273 u32 index, size_t size)
274 {
275 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
276
277 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
278 }
279
280 static inline int is_partial_io(struct bio_vec *bvec)
281 {
282 return bvec->bv_len != PAGE_SIZE;
283 }
284
285 /*
286 * Check if request is within bounds and aligned on zram logical blocks.
287 */
288 static inline int valid_io_request(struct zram *zram, struct bio *bio)
289 {
290 u64 start, end, bound;
291
292 /* unaligned request */
293 if (unlikely(bio->bi_iter.bi_sector &
294 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
295 return 0;
296 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
297 return 0;
298
299 start = bio->bi_iter.bi_sector;
300 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
301 bound = zram->disksize >> SECTOR_SHIFT;
302 /* out of range range */
303 if (unlikely(start >= bound || end > bound || start > end))
304 return 0;
305
306 /* I/O request is valid */
307 return 1;
308 }
309
310 static void zram_meta_free(struct zram_meta *meta)
311 {
312 zs_destroy_pool(meta->mem_pool);
313 vfree(meta->table);
314 kfree(meta);
315 }
316
317 static struct zram_meta *zram_meta_alloc(u64 disksize)
318 {
319 size_t num_pages;
320 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
321 if (!meta)
322 goto out;
323
324 num_pages = disksize >> PAGE_SHIFT;
325 meta->table = vzalloc(num_pages * sizeof(*meta->table));
326 if (!meta->table) {
327 pr_err("Error allocating zram address table\n");
328 goto free_meta;
329 }
330
331 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
332 if (!meta->mem_pool) {
333 pr_err("Error creating memory pool\n");
334 goto free_table;
335 }
336
337 return meta;
338
339 free_table:
340 vfree(meta->table);
341 free_meta:
342 kfree(meta);
343 meta = NULL;
344 out:
345 return meta;
346 }
347
348 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
349 {
350 if (*offset + bvec->bv_len >= PAGE_SIZE)
351 (*index)++;
352 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
353 }
354
355 static int page_zero_filled(void *ptr)
356 {
357 unsigned int pos;
358 unsigned long *page;
359
360 page = (unsigned long *)ptr;
361
362 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
363 if (page[pos])
364 return 0;
365 }
366
367 return 1;
368 }
369
370 static void handle_zero_page(struct bio_vec *bvec)
371 {
372 struct page *page = bvec->bv_page;
373 void *user_mem;
374
375 user_mem = kmap_atomic(page);
376 if (is_partial_io(bvec))
377 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
378 else
379 clear_page(user_mem);
380 kunmap_atomic(user_mem);
381
382 flush_dcache_page(page);
383 }
384
385
386 /*
387 * To protect concurrent access to the same index entry,
388 * caller should hold this table index entry's bit_spinlock to
389 * indicate this index entry is accessing.
390 */
391 static void zram_free_page(struct zram *zram, size_t index)
392 {
393 struct zram_meta *meta = zram->meta;
394 unsigned long handle = meta->table[index].handle;
395
396 if (unlikely(!handle)) {
397 /*
398 * No memory is allocated for zero filled pages.
399 * Simply clear zero page flag.
400 */
401 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
402 zram_clear_flag(meta, index, ZRAM_ZERO);
403 atomic64_dec(&zram->stats.zero_pages);
404 }
405 return;
406 }
407
408 zs_free(meta->mem_pool, handle);
409
410 atomic64_sub(zram_get_obj_size(meta, index),
411 &zram->stats.compr_data_size);
412 atomic64_dec(&zram->stats.pages_stored);
413
414 meta->table[index].handle = 0;
415 zram_set_obj_size(meta, index, 0);
416 }
417
418 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
419 {
420 int ret = 0;
421 unsigned char *cmem;
422 struct zram_meta *meta = zram->meta;
423 unsigned long handle;
424 size_t size;
425
426 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
427 handle = meta->table[index].handle;
428 size = zram_get_obj_size(meta, index);
429
430 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
431 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
432 clear_page(mem);
433 return 0;
434 }
435
436 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
437 if (size == PAGE_SIZE)
438 copy_page(mem, cmem);
439 else
440 ret = zcomp_decompress(zram->comp, cmem, size, mem);
441 zs_unmap_object(meta->mem_pool, handle);
442 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
443
444 /* Should NEVER happen. Return bio error if it does. */
445 if (unlikely(ret)) {
446 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
447 return ret;
448 }
449
450 return 0;
451 }
452
453 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
454 u32 index, int offset, struct bio *bio)
455 {
456 int ret;
457 struct page *page;
458 unsigned char *user_mem, *uncmem = NULL;
459 struct zram_meta *meta = zram->meta;
460 page = bvec->bv_page;
461
462 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
463 if (unlikely(!meta->table[index].handle) ||
464 zram_test_flag(meta, index, ZRAM_ZERO)) {
465 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
466 handle_zero_page(bvec);
467 return 0;
468 }
469 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
470
471 if (is_partial_io(bvec))
472 /* Use a temporary buffer to decompress the page */
473 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
474
475 user_mem = kmap_atomic(page);
476 if (!is_partial_io(bvec))
477 uncmem = user_mem;
478
479 if (!uncmem) {
480 pr_info("Unable to allocate temp memory\n");
481 ret = -ENOMEM;
482 goto out_cleanup;
483 }
484
485 ret = zram_decompress_page(zram, uncmem, index);
486 /* Should NEVER happen. Return bio error if it does. */
487 if (unlikely(ret))
488 goto out_cleanup;
489
490 if (is_partial_io(bvec))
491 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
492 bvec->bv_len);
493
494 flush_dcache_page(page);
495 ret = 0;
496 out_cleanup:
497 kunmap_atomic(user_mem);
498 if (is_partial_io(bvec))
499 kfree(uncmem);
500 return ret;
501 }
502
503 static inline void update_used_max(struct zram *zram,
504 const unsigned long pages)
505 {
506 int old_max, cur_max;
507
508 old_max = atomic_long_read(&zram->stats.max_used_pages);
509
510 do {
511 cur_max = old_max;
512 if (pages > cur_max)
513 old_max = atomic_long_cmpxchg(
514 &zram->stats.max_used_pages, cur_max, pages);
515 } while (old_max != cur_max);
516 }
517
518 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
519 int offset)
520 {
521 int ret = 0;
522 size_t clen;
523 unsigned long handle;
524 struct page *page;
525 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
526 struct zram_meta *meta = zram->meta;
527 struct zcomp_strm *zstrm;
528 bool locked = false;
529 unsigned long alloced_pages;
530
531 page = bvec->bv_page;
532 if (is_partial_io(bvec)) {
533 /*
534 * This is a partial IO. We need to read the full page
535 * before to write the changes.
536 */
537 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
538 if (!uncmem) {
539 ret = -ENOMEM;
540 goto out;
541 }
542 ret = zram_decompress_page(zram, uncmem, index);
543 if (ret)
544 goto out;
545 }
546
547 zstrm = zcomp_strm_find(zram->comp);
548 locked = true;
549 user_mem = kmap_atomic(page);
550
551 if (is_partial_io(bvec)) {
552 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
553 bvec->bv_len);
554 kunmap_atomic(user_mem);
555 user_mem = NULL;
556 } else {
557 uncmem = user_mem;
558 }
559
560 if (page_zero_filled(uncmem)) {
561 kunmap_atomic(user_mem);
562 /* Free memory associated with this sector now. */
563 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
564 zram_free_page(zram, index);
565 zram_set_flag(meta, index, ZRAM_ZERO);
566 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
567
568 atomic64_inc(&zram->stats.zero_pages);
569 ret = 0;
570 goto out;
571 }
572
573 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
574 if (!is_partial_io(bvec)) {
575 kunmap_atomic(user_mem);
576 user_mem = NULL;
577 uncmem = NULL;
578 }
579
580 if (unlikely(ret)) {
581 pr_err("Compression failed! err=%d\n", ret);
582 goto out;
583 }
584 src = zstrm->buffer;
585 if (unlikely(clen > max_zpage_size)) {
586 clen = PAGE_SIZE;
587 if (is_partial_io(bvec))
588 src = uncmem;
589 }
590
591 handle = zs_malloc(meta->mem_pool, clen);
592 if (!handle) {
593 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
594 index, clen);
595 ret = -ENOMEM;
596 goto out;
597 }
598
599 alloced_pages = zs_get_total_pages(meta->mem_pool);
600 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
601 zs_free(meta->mem_pool, handle);
602 ret = -ENOMEM;
603 goto out;
604 }
605
606 update_used_max(zram, alloced_pages);
607
608 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
609
610 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
611 src = kmap_atomic(page);
612 copy_page(cmem, src);
613 kunmap_atomic(src);
614 } else {
615 memcpy(cmem, src, clen);
616 }
617
618 zcomp_strm_release(zram->comp, zstrm);
619 locked = false;
620 zs_unmap_object(meta->mem_pool, handle);
621
622 /*
623 * Free memory associated with this sector
624 * before overwriting unused sectors.
625 */
626 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
627 zram_free_page(zram, index);
628
629 meta->table[index].handle = handle;
630 zram_set_obj_size(meta, index, clen);
631 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
632
633 /* Update stats */
634 atomic64_add(clen, &zram->stats.compr_data_size);
635 atomic64_inc(&zram->stats.pages_stored);
636 out:
637 if (locked)
638 zcomp_strm_release(zram->comp, zstrm);
639 if (is_partial_io(bvec))
640 kfree(uncmem);
641 return ret;
642 }
643
644 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
645 int offset, struct bio *bio)
646 {
647 int ret;
648 int rw = bio_data_dir(bio);
649
650 if (rw == READ) {
651 atomic64_inc(&zram->stats.num_reads);
652 ret = zram_bvec_read(zram, bvec, index, offset, bio);
653 } else {
654 atomic64_inc(&zram->stats.num_writes);
655 ret = zram_bvec_write(zram, bvec, index, offset);
656 }
657
658 if (unlikely(ret)) {
659 if (rw == READ)
660 atomic64_inc(&zram->stats.failed_reads);
661 else
662 atomic64_inc(&zram->stats.failed_writes);
663 }
664
665 return ret;
666 }
667
668 /*
669 * zram_bio_discard - handler on discard request
670 * @index: physical block index in PAGE_SIZE units
671 * @offset: byte offset within physical block
672 */
673 static void zram_bio_discard(struct zram *zram, u32 index,
674 int offset, struct bio *bio)
675 {
676 size_t n = bio->bi_iter.bi_size;
677 struct zram_meta *meta = zram->meta;
678
679 /*
680 * zram manages data in physical block size units. Because logical block
681 * size isn't identical with physical block size on some arch, we
682 * could get a discard request pointing to a specific offset within a
683 * certain physical block. Although we can handle this request by
684 * reading that physiclal block and decompressing and partially zeroing
685 * and re-compressing and then re-storing it, this isn't reasonable
686 * because our intent with a discard request is to save memory. So
687 * skipping this logical block is appropriate here.
688 */
689 if (offset) {
690 if (n <= (PAGE_SIZE - offset))
691 return;
692
693 n -= (PAGE_SIZE - offset);
694 index++;
695 }
696
697 while (n >= PAGE_SIZE) {
698 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
699 zram_free_page(zram, index);
700 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
701 index++;
702 n -= PAGE_SIZE;
703 }
704 }
705
706 static void zram_reset_device(struct zram *zram, bool reset_capacity)
707 {
708 size_t index;
709 struct zram_meta *meta;
710
711 down_write(&zram->init_lock);
712
713 zram->limit_pages = 0;
714
715 if (!init_done(zram)) {
716 up_write(&zram->init_lock);
717 return;
718 }
719
720 meta = zram->meta;
721 /* Free all pages that are still in this zram device */
722 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
723 unsigned long handle = meta->table[index].handle;
724 if (!handle)
725 continue;
726
727 zs_free(meta->mem_pool, handle);
728 }
729
730 zcomp_destroy(zram->comp);
731 zram->max_comp_streams = 1;
732
733 zram_meta_free(zram->meta);
734 zram->meta = NULL;
735 /* Reset stats */
736 memset(&zram->stats, 0, sizeof(zram->stats));
737
738 zram->disksize = 0;
739 if (reset_capacity)
740 set_capacity(zram->disk, 0);
741
742 up_write(&zram->init_lock);
743
744 /*
745 * Revalidate disk out of the init_lock to avoid lockdep splat.
746 * It's okay because disk's capacity is protected by init_lock
747 * so that revalidate_disk always sees up-to-date capacity.
748 */
749 if (reset_capacity)
750 revalidate_disk(zram->disk);
751 }
752
753 static ssize_t disksize_store(struct device *dev,
754 struct device_attribute *attr, const char *buf, size_t len)
755 {
756 u64 disksize;
757 struct zcomp *comp;
758 struct zram_meta *meta;
759 struct zram *zram = dev_to_zram(dev);
760 int err;
761
762 disksize = memparse(buf, NULL);
763 if (!disksize)
764 return -EINVAL;
765
766 disksize = PAGE_ALIGN(disksize);
767 meta = zram_meta_alloc(disksize);
768 if (!meta)
769 return -ENOMEM;
770
771 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
772 if (IS_ERR(comp)) {
773 pr_info("Cannot initialise %s compressing backend\n",
774 zram->compressor);
775 err = PTR_ERR(comp);
776 goto out_free_meta;
777 }
778
779 down_write(&zram->init_lock);
780 if (init_done(zram)) {
781 pr_info("Cannot change disksize for initialized device\n");
782 err = -EBUSY;
783 goto out_destroy_comp;
784 }
785
786 zram->meta = meta;
787 zram->comp = comp;
788 zram->disksize = disksize;
789 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
790 up_write(&zram->init_lock);
791
792 /*
793 * Revalidate disk out of the init_lock to avoid lockdep splat.
794 * It's okay because disk's capacity is protected by init_lock
795 * so that revalidate_disk always sees up-to-date capacity.
796 */
797 revalidate_disk(zram->disk);
798
799 return len;
800
801 out_destroy_comp:
802 up_write(&zram->init_lock);
803 zcomp_destroy(comp);
804 out_free_meta:
805 zram_meta_free(meta);
806 return err;
807 }
808
809 static ssize_t reset_store(struct device *dev,
810 struct device_attribute *attr, const char *buf, size_t len)
811 {
812 int ret;
813 unsigned short do_reset;
814 struct zram *zram;
815 struct block_device *bdev;
816
817 zram = dev_to_zram(dev);
818 bdev = bdget_disk(zram->disk, 0);
819
820 if (!bdev)
821 return -ENOMEM;
822
823 /* Do not reset an active device! */
824 if (bdev->bd_holders) {
825 ret = -EBUSY;
826 goto out;
827 }
828
829 ret = kstrtou16(buf, 10, &do_reset);
830 if (ret)
831 goto out;
832
833 if (!do_reset) {
834 ret = -EINVAL;
835 goto out;
836 }
837
838 /* Make sure all pending I/O is finished */
839 fsync_bdev(bdev);
840 bdput(bdev);
841
842 zram_reset_device(zram, true);
843 return len;
844
845 out:
846 bdput(bdev);
847 return ret;
848 }
849
850 static void __zram_make_request(struct zram *zram, struct bio *bio)
851 {
852 int offset;
853 u32 index;
854 struct bio_vec bvec;
855 struct bvec_iter iter;
856
857 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
858 offset = (bio->bi_iter.bi_sector &
859 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
860
861 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
862 zram_bio_discard(zram, index, offset, bio);
863 bio_endio(bio, 0);
864 return;
865 }
866
867 bio_for_each_segment(bvec, bio, iter) {
868 int max_transfer_size = PAGE_SIZE - offset;
869
870 if (bvec.bv_len > max_transfer_size) {
871 /*
872 * zram_bvec_rw() can only make operation on a single
873 * zram page. Split the bio vector.
874 */
875 struct bio_vec bv;
876
877 bv.bv_page = bvec.bv_page;
878 bv.bv_len = max_transfer_size;
879 bv.bv_offset = bvec.bv_offset;
880
881 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
882 goto out;
883
884 bv.bv_len = bvec.bv_len - max_transfer_size;
885 bv.bv_offset += max_transfer_size;
886 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
887 goto out;
888 } else
889 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
890 goto out;
891
892 update_position(&index, &offset, &bvec);
893 }
894
895 set_bit(BIO_UPTODATE, &bio->bi_flags);
896 bio_endio(bio, 0);
897 return;
898
899 out:
900 bio_io_error(bio);
901 }
902
903 /*
904 * Handler function for all zram I/O requests.
905 */
906 static void zram_make_request(struct request_queue *queue, struct bio *bio)
907 {
908 struct zram *zram = queue->queuedata;
909
910 down_read(&zram->init_lock);
911 if (unlikely(!init_done(zram)))
912 goto error;
913
914 if (!valid_io_request(zram, bio)) {
915 atomic64_inc(&zram->stats.invalid_io);
916 goto error;
917 }
918
919 __zram_make_request(zram, bio);
920 up_read(&zram->init_lock);
921
922 return;
923
924 error:
925 up_read(&zram->init_lock);
926 bio_io_error(bio);
927 }
928
929 static void zram_slot_free_notify(struct block_device *bdev,
930 unsigned long index)
931 {
932 struct zram *zram;
933 struct zram_meta *meta;
934
935 zram = bdev->bd_disk->private_data;
936 meta = zram->meta;
937
938 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
939 zram_free_page(zram, index);
940 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
941 atomic64_inc(&zram->stats.notify_free);
942 }
943
944 static const struct block_device_operations zram_devops = {
945 .swap_slot_free_notify = zram_slot_free_notify,
946 .owner = THIS_MODULE
947 };
948
949 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
950 disksize_show, disksize_store);
951 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
952 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
953 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
954 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
955 static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
956 mem_limit_store);
957 static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
958 mem_used_max_store);
959 static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
960 max_comp_streams_show, max_comp_streams_store);
961 static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
962 comp_algorithm_show, comp_algorithm_store);
963
964 ZRAM_ATTR_RO(num_reads);
965 ZRAM_ATTR_RO(num_writes);
966 ZRAM_ATTR_RO(failed_reads);
967 ZRAM_ATTR_RO(failed_writes);
968 ZRAM_ATTR_RO(invalid_io);
969 ZRAM_ATTR_RO(notify_free);
970 ZRAM_ATTR_RO(zero_pages);
971 ZRAM_ATTR_RO(compr_data_size);
972
973 static struct attribute *zram_disk_attrs[] = {
974 &dev_attr_disksize.attr,
975 &dev_attr_initstate.attr,
976 &dev_attr_reset.attr,
977 &dev_attr_num_reads.attr,
978 &dev_attr_num_writes.attr,
979 &dev_attr_failed_reads.attr,
980 &dev_attr_failed_writes.attr,
981 &dev_attr_invalid_io.attr,
982 &dev_attr_notify_free.attr,
983 &dev_attr_zero_pages.attr,
984 &dev_attr_orig_data_size.attr,
985 &dev_attr_compr_data_size.attr,
986 &dev_attr_mem_used_total.attr,
987 &dev_attr_mem_limit.attr,
988 &dev_attr_mem_used_max.attr,
989 &dev_attr_max_comp_streams.attr,
990 &dev_attr_comp_algorithm.attr,
991 NULL,
992 };
993
994 static struct attribute_group zram_disk_attr_group = {
995 .attrs = zram_disk_attrs,
996 };
997
998 static int create_device(struct zram *zram, int device_id)
999 {
1000 int ret = -ENOMEM;
1001
1002 init_rwsem(&zram->init_lock);
1003
1004 zram->queue = blk_alloc_queue(GFP_KERNEL);
1005 if (!zram->queue) {
1006 pr_err("Error allocating disk queue for device %d\n",
1007 device_id);
1008 goto out;
1009 }
1010
1011 blk_queue_make_request(zram->queue, zram_make_request);
1012 zram->queue->queuedata = zram;
1013
1014 /* gendisk structure */
1015 zram->disk = alloc_disk(1);
1016 if (!zram->disk) {
1017 pr_warn("Error allocating disk structure for device %d\n",
1018 device_id);
1019 goto out_free_queue;
1020 }
1021
1022 zram->disk->major = zram_major;
1023 zram->disk->first_minor = device_id;
1024 zram->disk->fops = &zram_devops;
1025 zram->disk->queue = zram->queue;
1026 zram->disk->private_data = zram;
1027 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1028
1029 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1030 set_capacity(zram->disk, 0);
1031 /* zram devices sort of resembles non-rotational disks */
1032 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1033 /*
1034 * To ensure that we always get PAGE_SIZE aligned
1035 * and n*PAGE_SIZED sized I/O requests.
1036 */
1037 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1038 blk_queue_logical_block_size(zram->disk->queue,
1039 ZRAM_LOGICAL_BLOCK_SIZE);
1040 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1041 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1042 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1043 zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
1044 /*
1045 * zram_bio_discard() will clear all logical blocks if logical block
1046 * size is identical with physical block size(PAGE_SIZE). But if it is
1047 * different, we will skip discarding some parts of logical blocks in
1048 * the part of the request range which isn't aligned to physical block
1049 * size. So we can't ensure that all discarded logical blocks are
1050 * zeroed.
1051 */
1052 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1053 zram->disk->queue->limits.discard_zeroes_data = 1;
1054 else
1055 zram->disk->queue->limits.discard_zeroes_data = 0;
1056 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1057
1058 add_disk(zram->disk);
1059
1060 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1061 &zram_disk_attr_group);
1062 if (ret < 0) {
1063 pr_warn("Error creating sysfs group");
1064 goto out_free_disk;
1065 }
1066 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1067 zram->meta = NULL;
1068 zram->max_comp_streams = 1;
1069 return 0;
1070
1071 out_free_disk:
1072 del_gendisk(zram->disk);
1073 put_disk(zram->disk);
1074 out_free_queue:
1075 blk_cleanup_queue(zram->queue);
1076 out:
1077 return ret;
1078 }
1079
1080 static void destroy_device(struct zram *zram)
1081 {
1082 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1083 &zram_disk_attr_group);
1084
1085 del_gendisk(zram->disk);
1086 put_disk(zram->disk);
1087
1088 blk_cleanup_queue(zram->queue);
1089 }
1090
1091 static int __init zram_init(void)
1092 {
1093 int ret, dev_id;
1094
1095 if (num_devices > max_num_devices) {
1096 pr_warn("Invalid value for num_devices: %u\n",
1097 num_devices);
1098 ret = -EINVAL;
1099 goto out;
1100 }
1101
1102 zram_major = register_blkdev(0, "zram");
1103 if (zram_major <= 0) {
1104 pr_warn("Unable to get major number\n");
1105 ret = -EBUSY;
1106 goto out;
1107 }
1108
1109 /* Allocate the device array and initialize each one */
1110 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1111 if (!zram_devices) {
1112 ret = -ENOMEM;
1113 goto unregister;
1114 }
1115
1116 for (dev_id = 0; dev_id < num_devices; dev_id++) {
1117 ret = create_device(&zram_devices[dev_id], dev_id);
1118 if (ret)
1119 goto free_devices;
1120 }
1121
1122 pr_info("Created %u device(s) ...\n", num_devices);
1123
1124 return 0;
1125
1126 free_devices:
1127 while (dev_id)
1128 destroy_device(&zram_devices[--dev_id]);
1129 kfree(zram_devices);
1130 unregister:
1131 unregister_blkdev(zram_major, "zram");
1132 out:
1133 return ret;
1134 }
1135
1136 static void __exit zram_exit(void)
1137 {
1138 int i;
1139 struct zram *zram;
1140
1141 for (i = 0; i < num_devices; i++) {
1142 zram = &zram_devices[i];
1143
1144 destroy_device(zram);
1145 /*
1146 * Shouldn't access zram->disk after destroy_device
1147 * because destroy_device already released zram->disk.
1148 */
1149 zram_reset_device(zram, false);
1150 }
1151
1152 unregister_blkdev(zram_major, "zram");
1153
1154 kfree(zram_devices);
1155 pr_debug("Cleanup done!\n");
1156 }
1157
1158 module_init(zram_init);
1159 module_exit(zram_exit);
1160
1161 module_param(num_devices, uint, 0);
1162 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1163
1164 MODULE_LICENSE("Dual BSD/GPL");
1165 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1166 MODULE_DESCRIPTION("Compressed RAM Block Device");