]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/staging/zram/zram_drv.c
staging: add zsmalloc to Kconfig/Makefile
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
306b0c95
NG
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
b1f5b81e
RJ
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
306b0c95
NG
22#include <linux/module.h>
23#include <linux/kernel.h>
8946a086 24#include <linux/bio.h>
306b0c95
NG
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
5a0e3ad6 31#include <linux/slab.h>
306b0c95 32#include <linux/lzo.h>
306b0c95 33#include <linux/string.h>
306b0c95 34#include <linux/vmalloc.h>
306b0c95 35
16a4bfb9 36#include "zram_drv.h"
306b0c95
NG
37
38/* Globals */
f1e3cfff 39static int zram_major;
43801f6e 40struct zram *zram_devices;
306b0c95 41
306b0c95 42/* Module params (documentation at end) */
efd54f43 43unsigned int zram_num_devices;
33863c21
NG
44
45static void zram_stat_inc(u32 *v)
46{
47 *v = *v + 1;
48}
49
50static void zram_stat_dec(u32 *v)
51{
52 *v = *v - 1;
53}
54
55static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56{
57 spin_lock(&zram->stat64_lock);
58 *v = *v + inc;
59 spin_unlock(&zram->stat64_lock);
60}
61
62static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63{
64 spin_lock(&zram->stat64_lock);
65 *v = *v - dec;
66 spin_unlock(&zram->stat64_lock);
67}
68
69static void zram_stat64_inc(struct zram *zram, u64 *v)
70{
71 zram_stat64_add(zram, v, 1);
72}
306b0c95 73
f1e3cfff
NG
74static int zram_test_flag(struct zram *zram, u32 index,
75 enum zram_pageflags flag)
306b0c95 76{
f1e3cfff 77 return zram->table[index].flags & BIT(flag);
306b0c95
NG
78}
79
f1e3cfff
NG
80static void zram_set_flag(struct zram *zram, u32 index,
81 enum zram_pageflags flag)
306b0c95 82{
f1e3cfff 83 zram->table[index].flags |= BIT(flag);
306b0c95
NG
84}
85
f1e3cfff
NG
86static void zram_clear_flag(struct zram *zram, u32 index,
87 enum zram_pageflags flag)
306b0c95 88{
f1e3cfff 89 zram->table[index].flags &= ~BIT(flag);
306b0c95
NG
90}
91
92static int page_zero_filled(void *ptr)
93{
94 unsigned int pos;
95 unsigned long *page;
96
97 page = (unsigned long *)ptr;
98
99 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100 if (page[pos])
101 return 0;
102 }
103
104 return 1;
105}
106
f1e3cfff 107static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
306b0c95 108{
f1e3cfff 109 if (!zram->disksize) {
306b0c95
NG
110 pr_info(
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
114 );
f1e3cfff 115 zram->disksize = default_disksize_perc_ram *
306b0c95
NG
116 (totalram_bytes / 100);
117 }
118
f1e3cfff 119 if (zram->disksize > 2 * (totalram_bytes)) {
306b0c95 120 pr_info(
f1e3cfff 121 "There is little point creating a zram of greater than "
306b0c95 122 "twice the size of memory since we expect a 2:1 compression "
f1e3cfff
NG
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
306b0c95
NG
125 "wasteful.\n"
126 "\tMemory Size: %zu kB\n"
33863c21 127 "\tSize you selected: %llu kB\n"
306b0c95 128 "Continuing anyway ...\n",
f1e3cfff 129 totalram_bytes >> 10, zram->disksize
306b0c95
NG
130 );
131 }
132
f1e3cfff 133 zram->disksize &= PAGE_MASK;
306b0c95
NG
134}
135
f1e3cfff 136static void zram_free_page(struct zram *zram, size_t index)
306b0c95
NG
137{
138 u32 clen;
139 void *obj;
140
f1e3cfff
NG
141 struct page *page = zram->table[index].page;
142 u32 offset = zram->table[index].offset;
306b0c95
NG
143
144 if (unlikely(!page)) {
2e882281
NG
145 /*
146 * No memory is allocated for zero filled pages.
147 * Simply clear zero page flag.
148 */
f1e3cfff
NG
149 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
150 zram_clear_flag(zram, index, ZRAM_ZERO);
151 zram_stat_dec(&zram->stats.pages_zero);
306b0c95
NG
152 }
153 return;
154 }
155
f1e3cfff 156 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
306b0c95
NG
157 clen = PAGE_SIZE;
158 __free_page(page);
f1e3cfff
NG
159 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
160 zram_stat_dec(&zram->stats.pages_expand);
306b0c95
NG
161 goto out;
162 }
163
164 obj = kmap_atomic(page, KM_USER0) + offset;
165 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
166 kunmap_atomic(obj, KM_USER0);
167
f1e3cfff 168 xv_free(zram->mem_pool, page, offset);
306b0c95 169 if (clen <= PAGE_SIZE / 2)
f1e3cfff 170 zram_stat_dec(&zram->stats.good_compress);
306b0c95
NG
171
172out:
33863c21 173 zram_stat64_sub(zram, &zram->stats.compr_size, clen);
f1e3cfff 174 zram_stat_dec(&zram->stats.pages_stored);
306b0c95 175
f1e3cfff
NG
176 zram->table[index].page = NULL;
177 zram->table[index].offset = 0;
306b0c95
NG
178}
179
924bd88d 180static void handle_zero_page(struct bio_vec *bvec)
306b0c95 181{
924bd88d 182 struct page *page = bvec->bv_page;
306b0c95 183 void *user_mem;
306b0c95
NG
184
185 user_mem = kmap_atomic(page, KM_USER0);
924bd88d 186 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
306b0c95
NG
187 kunmap_atomic(user_mem, KM_USER0);
188
30fb8a71 189 flush_dcache_page(page);
306b0c95
NG
190}
191
924bd88d
JM
192static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
193 u32 index, int offset)
306b0c95 194{
924bd88d 195 struct page *page = bvec->bv_page;
306b0c95
NG
196 unsigned char *user_mem, *cmem;
197
306b0c95 198 user_mem = kmap_atomic(page, KM_USER0);
6a587e83 199 cmem = kmap_atomic(zram->table[index].page, KM_USER1);
306b0c95 200
924bd88d 201 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
306b0c95 202 kunmap_atomic(cmem, KM_USER1);
dffbb44d 203 kunmap_atomic(user_mem, KM_USER0);
306b0c95 204
30fb8a71 205 flush_dcache_page(page);
306b0c95
NG
206}
207
924bd88d
JM
208static inline int is_partial_io(struct bio_vec *bvec)
209{
210 return bvec->bv_len != PAGE_SIZE;
211}
212
8c921b2b 213static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
924bd88d 214 u32 index, int offset, struct bio *bio)
306b0c95 215{
8c921b2b
JM
216 int ret;
217 size_t clen;
218 struct page *page;
219 struct zobj_header *zheader;
924bd88d 220 unsigned char *user_mem, *cmem, *uncmem = NULL;
a1dd52af 221
8c921b2b 222 page = bvec->bv_page;
306b0c95 223
8c921b2b 224 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
924bd88d 225 handle_zero_page(bvec);
8c921b2b
JM
226 return 0;
227 }
306b0c95 228
8c921b2b
JM
229 /* Requested page is not present in compressed area */
230 if (unlikely(!zram->table[index].page)) {
231 pr_debug("Read before write: sector=%lu, size=%u",
232 (ulong)(bio->bi_sector), bio->bi_size);
924bd88d 233 handle_zero_page(bvec);
8c921b2b
JM
234 return 0;
235 }
306b0c95 236
8c921b2b
JM
237 /* Page is stored uncompressed since it's incompressible */
238 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
924bd88d 239 handle_uncompressed_page(zram, bvec, index, offset);
8c921b2b
JM
240 return 0;
241 }
306b0c95 242
924bd88d
JM
243 if (is_partial_io(bvec)) {
244 /* Use a temporary buffer to decompress the page */
245 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
246 if (!uncmem) {
247 pr_info("Error allocating temp memory!\n");
248 return -ENOMEM;
249 }
250 }
251
8c921b2b 252 user_mem = kmap_atomic(page, KM_USER0);
924bd88d
JM
253 if (!is_partial_io(bvec))
254 uncmem = user_mem;
8c921b2b 255 clen = PAGE_SIZE;
306b0c95 256
8c921b2b
JM
257 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
258 zram->table[index].offset;
306b0c95 259
8c921b2b
JM
260 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
261 xv_get_object_size(cmem) - sizeof(*zheader),
924bd88d
JM
262 uncmem, &clen);
263
264 if (is_partial_io(bvec)) {
265 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
266 bvec->bv_len);
267 kfree(uncmem);
268 }
306b0c95 269
8c921b2b 270 kunmap_atomic(cmem, KM_USER1);
dffbb44d 271 kunmap_atomic(user_mem, KM_USER0);
a1dd52af 272
8c921b2b
JM
273 /* Should NEVER happen. Return bio error if it does. */
274 if (unlikely(ret != LZO_E_OK)) {
275 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
276 zram_stat64_inc(zram, &zram->stats.failed_reads);
277 return ret;
a1dd52af 278 }
306b0c95 279
8c921b2b 280 flush_dcache_page(page);
306b0c95 281
8c921b2b 282 return 0;
306b0c95
NG
283}
284
924bd88d
JM
285static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
286{
287 int ret;
288 size_t clen = PAGE_SIZE;
289 struct zobj_header *zheader;
290 unsigned char *cmem;
291
292 if (zram_test_flag(zram, index, ZRAM_ZERO) ||
293 !zram->table[index].page) {
294 memset(mem, 0, PAGE_SIZE);
295 return 0;
296 }
297
298 cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
299 zram->table[index].offset;
300
301 /* Page is stored uncompressed since it's incompressible */
302 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
303 memcpy(mem, cmem, PAGE_SIZE);
304 kunmap_atomic(cmem, KM_USER0);
305 return 0;
306 }
307
308 ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
309 xv_get_object_size(cmem) - sizeof(*zheader),
310 mem, &clen);
311 kunmap_atomic(cmem, KM_USER0);
312
313 /* Should NEVER happen. Return bio error if it does. */
314 if (unlikely(ret != LZO_E_OK)) {
315 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
316 zram_stat64_inc(zram, &zram->stats.failed_reads);
317 return ret;
318 }
319
320 return 0;
321}
322
323static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
324 int offset)
306b0c95 325{
8c921b2b 326 int ret;
924bd88d 327 u32 store_offset;
8c921b2b
JM
328 size_t clen;
329 struct zobj_header *zheader;
330 struct page *page, *page_store;
924bd88d 331 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
306b0c95 332
8c921b2b
JM
333 page = bvec->bv_page;
334 src = zram->compress_buffer;
306b0c95 335
924bd88d
JM
336 if (is_partial_io(bvec)) {
337 /*
338 * This is a partial IO. We need to read the full page
339 * before to write the changes.
340 */
341 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
342 if (!uncmem) {
343 pr_info("Error allocating temp memory!\n");
344 ret = -ENOMEM;
345 goto out;
346 }
347 ret = zram_read_before_write(zram, uncmem, index);
348 if (ret) {
349 kfree(uncmem);
350 goto out;
351 }
352 }
353
8c921b2b
JM
354 /*
355 * System overwrites unused sectors. Free memory associated
356 * with this sector now.
357 */
358 if (zram->table[index].page ||
359 zram_test_flag(zram, index, ZRAM_ZERO))
360 zram_free_page(zram, index);
306b0c95 361
8c921b2b 362 user_mem = kmap_atomic(page, KM_USER0);
924bd88d
JM
363
364 if (is_partial_io(bvec))
365 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
366 bvec->bv_len);
367 else
368 uncmem = user_mem;
369
370 if (page_zero_filled(uncmem)) {
8c921b2b 371 kunmap_atomic(user_mem, KM_USER0);
924bd88d
JM
372 if (is_partial_io(bvec))
373 kfree(uncmem);
8c921b2b
JM
374 zram_stat_inc(&zram->stats.pages_zero);
375 zram_set_flag(zram, index, ZRAM_ZERO);
924bd88d
JM
376 ret = 0;
377 goto out;
8c921b2b 378 }
306b0c95 379
924bd88d 380 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
8c921b2b 381 zram->compress_workmem);
306b0c95 382
8c921b2b 383 kunmap_atomic(user_mem, KM_USER0);
924bd88d
JM
384 if (is_partial_io(bvec))
385 kfree(uncmem);
306b0c95 386
8c921b2b 387 if (unlikely(ret != LZO_E_OK)) {
8c921b2b 388 pr_err("Compression failed! err=%d\n", ret);
924bd88d 389 goto out;
8c921b2b 390 }
306b0c95 391
8c921b2b
JM
392 /*
393 * Page is incompressible. Store it as-is (uncompressed)
394 * since we do not want to return too many disk write
395 * errors which has side effect of hanging the system.
396 */
397 if (unlikely(clen > max_zpage_size)) {
398 clen = PAGE_SIZE;
399 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
400 if (unlikely(!page_store)) {
8c921b2b
JM
401 pr_info("Error allocating memory for "
402 "incompressible page: %u\n", index);
924bd88d
JM
403 ret = -ENOMEM;
404 goto out;
405 }
a1dd52af 406
924bd88d 407 store_offset = 0;
8c921b2b
JM
408 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
409 zram_stat_inc(&zram->stats.pages_expand);
410 zram->table[index].page = page_store;
411 src = kmap_atomic(page, KM_USER0);
412 goto memstore;
413 }
306b0c95 414
8c921b2b 415 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
924bd88d 416 &zram->table[index].page, &store_offset,
8c921b2b 417 GFP_NOIO | __GFP_HIGHMEM)) {
8c921b2b
JM
418 pr_info("Error allocating memory for compressed "
419 "page: %u, size=%zu\n", index, clen);
924bd88d
JM
420 ret = -ENOMEM;
421 goto out;
8c921b2b 422 }
306b0c95
NG
423
424memstore:
924bd88d 425 zram->table[index].offset = store_offset;
306b0c95 426
8c921b2b
JM
427 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
428 zram->table[index].offset;
306b0c95
NG
429
430#if 0
8c921b2b
JM
431 /* Back-reference needed for memory defragmentation */
432 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
433 zheader = (struct zobj_header *)cmem;
434 zheader->table_idx = index;
435 cmem += sizeof(*zheader);
436 }
306b0c95
NG
437#endif
438
8c921b2b 439 memcpy(cmem, src, clen);
306b0c95 440
8c921b2b
JM
441 kunmap_atomic(cmem, KM_USER1);
442 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
443 kunmap_atomic(src, KM_USER0);
306b0c95 444
8c921b2b
JM
445 /* Update stats */
446 zram_stat64_add(zram, &zram->stats.compr_size, clen);
447 zram_stat_inc(&zram->stats.pages_stored);
448 if (clen <= PAGE_SIZE / 2)
449 zram_stat_inc(&zram->stats.good_compress);
306b0c95 450
8c921b2b 451 return 0;
924bd88d
JM
452
453out:
454 if (ret)
455 zram_stat64_inc(zram, &zram->stats.failed_writes);
456 return ret;
8c921b2b
JM
457}
458
459static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
924bd88d 460 int offset, struct bio *bio, int rw)
8c921b2b 461{
c5bde238 462 int ret;
8c921b2b 463
c5bde238
JM
464 if (rw == READ) {
465 down_read(&zram->lock);
466 ret = zram_bvec_read(zram, bvec, index, offset, bio);
467 up_read(&zram->lock);
468 } else {
469 down_write(&zram->lock);
470 ret = zram_bvec_write(zram, bvec, index, offset);
471 up_write(&zram->lock);
472 }
473
474 return ret;
924bd88d
JM
475}
476
477static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
478{
479 if (*offset + bvec->bv_len >= PAGE_SIZE)
480 (*index)++;
481 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
8c921b2b
JM
482}
483
484static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
485{
924bd88d 486 int i, offset;
8c921b2b
JM
487 u32 index;
488 struct bio_vec *bvec;
489
490 switch (rw) {
491 case READ:
492 zram_stat64_inc(zram, &zram->stats.num_reads);
493 break;
494 case WRITE:
495 zram_stat64_inc(zram, &zram->stats.num_writes);
496 break;
497 }
498
499 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
924bd88d 500 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
8c921b2b
JM
501
502 bio_for_each_segment(bvec, bio, i) {
924bd88d
JM
503 int max_transfer_size = PAGE_SIZE - offset;
504
505 if (bvec->bv_len > max_transfer_size) {
506 /*
507 * zram_bvec_rw() can only make operation on a single
508 * zram page. Split the bio vector.
509 */
510 struct bio_vec bv;
511
512 bv.bv_page = bvec->bv_page;
513 bv.bv_len = max_transfer_size;
514 bv.bv_offset = bvec->bv_offset;
515
516 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
517 goto out;
518
519 bv.bv_len = bvec->bv_len - max_transfer_size;
520 bv.bv_offset += max_transfer_size;
521 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
522 goto out;
523 } else
524 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
525 < 0)
526 goto out;
527
528 update_position(&index, &offset, bvec);
a1dd52af 529 }
306b0c95
NG
530
531 set_bit(BIO_UPTODATE, &bio->bi_flags);
532 bio_endio(bio, 0);
7d7854b4 533 return;
306b0c95
NG
534
535out:
306b0c95 536 bio_io_error(bio);
306b0c95
NG
537}
538
306b0c95 539/*
924bd88d 540 * Check if request is within bounds and aligned on zram logical blocks.
306b0c95 541 */
f1e3cfff 542static inline int valid_io_request(struct zram *zram, struct bio *bio)
306b0c95
NG
543{
544 if (unlikely(
f1e3cfff 545 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
924bd88d
JM
546 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
547 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
306b0c95
NG
548
549 return 0;
550 }
551
a1dd52af 552 /* I/O request is valid */
306b0c95
NG
553 return 1;
554}
555
556/*
f1e3cfff 557 * Handler function for all zram I/O requests.
306b0c95 558 */
5a7bbad2 559static void zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95 560{
f1e3cfff 561 struct zram *zram = queue->queuedata;
306b0c95 562
0900beae
JM
563 if (unlikely(!zram->init_done) && zram_init_device(zram))
564 goto error;
565
566 down_read(&zram->init_lock);
567 if (unlikely(!zram->init_done))
568 goto error_unlock;
569
f1e3cfff
NG
570 if (!valid_io_request(zram, bio)) {
571 zram_stat64_inc(zram, &zram->stats.invalid_io);
0900beae 572 goto error_unlock;
6642a67c
JM
573 }
574
8c921b2b 575 __zram_make_request(zram, bio, bio_data_dir(bio));
0900beae 576 up_read(&zram->init_lock);
306b0c95 577
b4fdcb02 578 return;
0900beae
JM
579
580error_unlock:
581 up_read(&zram->init_lock);
582error:
583 bio_io_error(bio);
306b0c95
NG
584}
585
0900beae 586void __zram_reset_device(struct zram *zram)
306b0c95 587{
97a06382 588 size_t index;
306b0c95 589
f1e3cfff 590 zram->init_done = 0;
7eef7533 591
306b0c95 592 /* Free various per-device buffers */
f1e3cfff
NG
593 kfree(zram->compress_workmem);
594 free_pages((unsigned long)zram->compress_buffer, 1);
306b0c95 595
f1e3cfff
NG
596 zram->compress_workmem = NULL;
597 zram->compress_buffer = NULL;
306b0c95 598
f1e3cfff
NG
599 /* Free all pages that are still in this zram device */
600 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
306b0c95
NG
601 struct page *page;
602 u16 offset;
603
f1e3cfff
NG
604 page = zram->table[index].page;
605 offset = zram->table[index].offset;
306b0c95
NG
606
607 if (!page)
608 continue;
609
f1e3cfff 610 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
306b0c95
NG
611 __free_page(page);
612 else
f1e3cfff 613 xv_free(zram->mem_pool, page, offset);
306b0c95
NG
614 }
615
f1e3cfff
NG
616 vfree(zram->table);
617 zram->table = NULL;
306b0c95 618
f1e3cfff
NG
619 xv_destroy_pool(zram->mem_pool);
620 zram->mem_pool = NULL;
306b0c95 621
306b0c95 622 /* Reset stats */
f1e3cfff 623 memset(&zram->stats, 0, sizeof(zram->stats));
306b0c95 624
f1e3cfff 625 zram->disksize = 0;
0900beae
JM
626}
627
628void zram_reset_device(struct zram *zram)
629{
630 down_write(&zram->init_lock);
631 __zram_reset_device(zram);
632 up_write(&zram->init_lock);
306b0c95
NG
633}
634
33863c21 635int zram_init_device(struct zram *zram)
306b0c95
NG
636{
637 int ret;
638 size_t num_pages;
306b0c95 639
0900beae 640 down_write(&zram->init_lock);
484875ad 641
f1e3cfff 642 if (zram->init_done) {
0900beae 643 up_write(&zram->init_lock);
484875ad 644 return 0;
306b0c95
NG
645 }
646
f1e3cfff 647 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
306b0c95 648
f1e3cfff
NG
649 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
650 if (!zram->compress_workmem) {
306b0c95
NG
651 pr_err("Error allocating compressor working memory!\n");
652 ret = -ENOMEM;
5a18c531 653 goto fail_no_table;
306b0c95
NG
654 }
655
fb927284
JM
656 zram->compress_buffer =
657 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
f1e3cfff 658 if (!zram->compress_buffer) {
306b0c95
NG
659 pr_err("Error allocating compressor buffer space\n");
660 ret = -ENOMEM;
5a18c531 661 goto fail_no_table;
306b0c95
NG
662 }
663
f1e3cfff 664 num_pages = zram->disksize >> PAGE_SHIFT;
5b84cc78 665 zram->table = vzalloc(num_pages * sizeof(*zram->table));
f1e3cfff
NG
666 if (!zram->table) {
667 pr_err("Error allocating zram address table\n");
306b0c95 668 ret = -ENOMEM;
5a18c531 669 goto fail_no_table;
306b0c95 670 }
306b0c95 671
f1e3cfff 672 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
306b0c95 673
f1e3cfff
NG
674 /* zram devices sort of resembles non-rotational disks */
675 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
306b0c95 676
f1e3cfff
NG
677 zram->mem_pool = xv_create_pool();
678 if (!zram->mem_pool) {
306b0c95
NG
679 pr_err("Error creating memory pool\n");
680 ret = -ENOMEM;
681 goto fail;
682 }
683
f1e3cfff 684 zram->init_done = 1;
0900beae 685 up_write(&zram->init_lock);
306b0c95
NG
686
687 pr_debug("Initialization done!\n");
688 return 0;
689
5a18c531
JM
690fail_no_table:
691 /* To prevent accessing table entries during cleanup */
692 zram->disksize = 0;
306b0c95 693fail:
0900beae
JM
694 __zram_reset_device(zram);
695 up_write(&zram->init_lock);
306b0c95
NG
696 pr_err("Initialization failed: err=%d\n", ret);
697 return ret;
698}
699
2ccbec05
NG
700static void zram_slot_free_notify(struct block_device *bdev,
701 unsigned long index)
107c161b 702{
f1e3cfff 703 struct zram *zram;
107c161b 704
f1e3cfff
NG
705 zram = bdev->bd_disk->private_data;
706 zram_free_page(zram, index);
707 zram_stat64_inc(zram, &zram->stats.notify_free);
107c161b
NG
708}
709
f1e3cfff 710static const struct block_device_operations zram_devops = {
f1e3cfff 711 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 712 .owner = THIS_MODULE
306b0c95
NG
713};
714
f1e3cfff 715static int create_device(struct zram *zram, int device_id)
306b0c95 716{
de1a21a0
NG
717 int ret = 0;
718
c5bde238 719 init_rwsem(&zram->lock);
0900beae 720 init_rwsem(&zram->init_lock);
f1e3cfff 721 spin_lock_init(&zram->stat64_lock);
306b0c95 722
f1e3cfff
NG
723 zram->queue = blk_alloc_queue(GFP_KERNEL);
724 if (!zram->queue) {
306b0c95
NG
725 pr_err("Error allocating disk queue for device %d\n",
726 device_id);
de1a21a0
NG
727 ret = -ENOMEM;
728 goto out;
306b0c95
NG
729 }
730
f1e3cfff
NG
731 blk_queue_make_request(zram->queue, zram_make_request);
732 zram->queue->queuedata = zram;
306b0c95
NG
733
734 /* gendisk structure */
f1e3cfff
NG
735 zram->disk = alloc_disk(1);
736 if (!zram->disk) {
737 blk_cleanup_queue(zram->queue);
306b0c95
NG
738 pr_warning("Error allocating disk structure for device %d\n",
739 device_id);
de1a21a0
NG
740 ret = -ENOMEM;
741 goto out;
306b0c95
NG
742 }
743
f1e3cfff
NG
744 zram->disk->major = zram_major;
745 zram->disk->first_minor = device_id;
746 zram->disk->fops = &zram_devops;
747 zram->disk->queue = zram->queue;
748 zram->disk->private_data = zram;
749 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 750
33863c21 751 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
f1e3cfff 752 set_capacity(zram->disk, 0);
5d83d5a0 753
a1dd52af
NG
754 /*
755 * To ensure that we always get PAGE_SIZE aligned
756 * and n*PAGE_SIZED sized I/O requests.
757 */
f1e3cfff 758 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
7b19b8d4
RJ
759 blk_queue_logical_block_size(zram->disk->queue,
760 ZRAM_LOGICAL_BLOCK_SIZE);
f1e3cfff
NG
761 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
762 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 763
f1e3cfff 764 add_disk(zram->disk);
306b0c95 765
33863c21
NG
766 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
767 &zram_disk_attr_group);
768 if (ret < 0) {
769 pr_warning("Error creating sysfs group");
770 goto out;
771 }
33863c21 772
f1e3cfff 773 zram->init_done = 0;
de1a21a0
NG
774
775out:
776 return ret;
306b0c95
NG
777}
778
f1e3cfff 779static void destroy_device(struct zram *zram)
306b0c95 780{
33863c21
NG
781 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
782 &zram_disk_attr_group);
33863c21 783
f1e3cfff
NG
784 if (zram->disk) {
785 del_gendisk(zram->disk);
786 put_disk(zram->disk);
306b0c95
NG
787 }
788
f1e3cfff
NG
789 if (zram->queue)
790 blk_cleanup_queue(zram->queue);
306b0c95
NG
791}
792
f1e3cfff 793static int __init zram_init(void)
306b0c95 794{
de1a21a0 795 int ret, dev_id;
306b0c95 796
efd54f43 797 if (zram_num_devices > max_num_devices) {
306b0c95 798 pr_warning("Invalid value for num_devices: %u\n",
efd54f43 799 zram_num_devices);
de1a21a0
NG
800 ret = -EINVAL;
801 goto out;
306b0c95
NG
802 }
803
f1e3cfff
NG
804 zram_major = register_blkdev(0, "zram");
805 if (zram_major <= 0) {
306b0c95 806 pr_warning("Unable to get major number\n");
de1a21a0
NG
807 ret = -EBUSY;
808 goto out;
306b0c95
NG
809 }
810
efd54f43 811 if (!zram_num_devices) {
306b0c95 812 pr_info("num_devices not specified. Using default: 1\n");
efd54f43 813 zram_num_devices = 1;
306b0c95
NG
814 }
815
816 /* Allocate the device array and initialize each one */
efd54f43
NW
817 pr_info("Creating %u devices ...\n", zram_num_devices);
818 zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
43801f6e 819 if (!zram_devices) {
de1a21a0
NG
820 ret = -ENOMEM;
821 goto unregister;
822 }
306b0c95 823
efd54f43 824 for (dev_id = 0; dev_id < zram_num_devices; dev_id++) {
43801f6e 825 ret = create_device(&zram_devices[dev_id], dev_id);
de1a21a0 826 if (ret)
3bf040c7 827 goto free_devices;
de1a21a0
NG
828 }
829
306b0c95 830 return 0;
de1a21a0 831
3bf040c7 832free_devices:
de1a21a0 833 while (dev_id)
43801f6e
NW
834 destroy_device(&zram_devices[--dev_id]);
835 kfree(zram_devices);
de1a21a0 836unregister:
f1e3cfff 837 unregister_blkdev(zram_major, "zram");
de1a21a0 838out:
306b0c95
NG
839 return ret;
840}
841
f1e3cfff 842static void __exit zram_exit(void)
306b0c95
NG
843{
844 int i;
f1e3cfff 845 struct zram *zram;
306b0c95 846
efd54f43 847 for (i = 0; i < zram_num_devices; i++) {
43801f6e 848 zram = &zram_devices[i];
306b0c95 849
f1e3cfff
NG
850 destroy_device(zram);
851 if (zram->init_done)
33863c21 852 zram_reset_device(zram);
306b0c95
NG
853 }
854
f1e3cfff 855 unregister_blkdev(zram_major, "zram");
306b0c95 856
43801f6e 857 kfree(zram_devices);
306b0c95
NG
858 pr_debug("Cleanup done!\n");
859}
860
efd54f43
NW
861module_param(zram_num_devices, uint, 0);
862MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
306b0c95 863
f1e3cfff
NG
864module_init(zram_init);
865module_exit(zram_exit);
306b0c95
NG
866
867MODULE_LICENSE("Dual BSD/GPL");
868MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 869MODULE_DESCRIPTION("Compressed RAM Block Device");