]>
Commit | Line | Data |
---|---|---|
306b0c95 | 1 | /* |
f1e3cfff | 2 | * Compressed RAM block device |
306b0c95 | 3 | * |
1130ebba | 4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
7bfb3de8 | 5 | * 2012, 2013 Minchan Kim |
306b0c95 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the licence that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | * | |
306b0c95 NG |
13 | */ |
14 | ||
f1e3cfff | 15 | #define KMSG_COMPONENT "zram" |
306b0c95 NG |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/kernel.h> | |
8946a086 | 20 | #include <linux/bio.h> |
306b0c95 NG |
21 | #include <linux/bitops.h> |
22 | #include <linux/blkdev.h> | |
23 | #include <linux/buffer_head.h> | |
24 | #include <linux/device.h> | |
25 | #include <linux/genhd.h> | |
26 | #include <linux/highmem.h> | |
5a0e3ad6 | 27 | #include <linux/slab.h> |
306b0c95 | 28 | #include <linux/string.h> |
306b0c95 | 29 | #include <linux/vmalloc.h> |
fcfa8d95 | 30 | #include <linux/err.h> |
306b0c95 | 31 | |
16a4bfb9 | 32 | #include "zram_drv.h" |
306b0c95 NG |
33 | |
34 | /* Globals */ | |
f1e3cfff | 35 | static int zram_major; |
0f0e3ba3 | 36 | static struct zram *zram_devices; |
b7ca232e | 37 | static const char *default_compressor = "lzo"; |
306b0c95 | 38 | |
306b0c95 | 39 | /* Module params (documentation at end) */ |
ca3d70bd | 40 | static unsigned int num_devices = 1; |
33863c21 | 41 | |
8f7d282c SS |
42 | static inline void deprecated_attr_warn(const char *name) |
43 | { | |
44 | pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n", | |
45 | task_pid_nr(current), | |
46 | current->comm, | |
47 | name, | |
48 | "See zram documentation."); | |
49 | } | |
50 | ||
a68eb3b6 | 51 | #define ZRAM_ATTR_RO(name) \ |
3bca3ef7 | 52 | static ssize_t name##_show(struct device *d, \ |
a68eb3b6 SS |
53 | struct device_attribute *attr, char *b) \ |
54 | { \ | |
55 | struct zram *zram = dev_to_zram(d); \ | |
8f7d282c SS |
56 | \ |
57 | deprecated_attr_warn(__stringify(name)); \ | |
56b4e8cb | 58 | return scnprintf(b, PAGE_SIZE, "%llu\n", \ |
a68eb3b6 SS |
59 | (u64)atomic64_read(&zram->stats.name)); \ |
60 | } \ | |
083914ea | 61 | static DEVICE_ATTR_RO(name); |
a68eb3b6 | 62 | |
08eee69f | 63 | static inline bool init_done(struct zram *zram) |
be2d1d56 | 64 | { |
08eee69f | 65 | return zram->disksize; |
be2d1d56 SS |
66 | } |
67 | ||
9b3bb7ab SS |
68 | static inline struct zram *dev_to_zram(struct device *dev) |
69 | { | |
70 | return (struct zram *)dev_to_disk(dev)->private_data; | |
71 | } | |
72 | ||
99ebbd30 AM |
73 | static ssize_t compact_store(struct device *dev, |
74 | struct device_attribute *attr, const char *buf, size_t len) | |
75 | { | |
76 | unsigned long nr_migrated; | |
77 | struct zram *zram = dev_to_zram(dev); | |
78 | struct zram_meta *meta; | |
79 | ||
80 | down_read(&zram->init_lock); | |
81 | if (!init_done(zram)) { | |
82 | up_read(&zram->init_lock); | |
83 | return -EINVAL; | |
84 | } | |
85 | ||
86 | meta = zram->meta; | |
87 | nr_migrated = zs_compact(meta->mem_pool); | |
88 | atomic64_add(nr_migrated, &zram->stats.num_migrated); | |
89 | up_read(&zram->init_lock); | |
90 | ||
91 | return len; | |
92 | } | |
93 | ||
9b3bb7ab SS |
94 | static ssize_t disksize_show(struct device *dev, |
95 | struct device_attribute *attr, char *buf) | |
96 | { | |
97 | struct zram *zram = dev_to_zram(dev); | |
98 | ||
56b4e8cb | 99 | return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); |
9b3bb7ab SS |
100 | } |
101 | ||
102 | static ssize_t initstate_show(struct device *dev, | |
103 | struct device_attribute *attr, char *buf) | |
104 | { | |
a68eb3b6 | 105 | u32 val; |
9b3bb7ab SS |
106 | struct zram *zram = dev_to_zram(dev); |
107 | ||
a68eb3b6 SS |
108 | down_read(&zram->init_lock); |
109 | val = init_done(zram); | |
110 | up_read(&zram->init_lock); | |
9b3bb7ab | 111 | |
56b4e8cb | 112 | return scnprintf(buf, PAGE_SIZE, "%u\n", val); |
9b3bb7ab SS |
113 | } |
114 | ||
115 | static ssize_t orig_data_size_show(struct device *dev, | |
116 | struct device_attribute *attr, char *buf) | |
117 | { | |
118 | struct zram *zram = dev_to_zram(dev); | |
119 | ||
8f7d282c | 120 | deprecated_attr_warn("orig_data_size"); |
56b4e8cb | 121 | return scnprintf(buf, PAGE_SIZE, "%llu\n", |
90a7806e | 122 | (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); |
9b3bb7ab SS |
123 | } |
124 | ||
9b3bb7ab SS |
125 | static ssize_t mem_used_total_show(struct device *dev, |
126 | struct device_attribute *attr, char *buf) | |
127 | { | |
128 | u64 val = 0; | |
129 | struct zram *zram = dev_to_zram(dev); | |
9b3bb7ab | 130 | |
8f7d282c | 131 | deprecated_attr_warn("mem_used_total"); |
9b3bb7ab | 132 | down_read(&zram->init_lock); |
5a99e95b WY |
133 | if (init_done(zram)) { |
134 | struct zram_meta *meta = zram->meta; | |
722cdc17 | 135 | val = zs_get_total_pages(meta->mem_pool); |
5a99e95b | 136 | } |
9b3bb7ab SS |
137 | up_read(&zram->init_lock); |
138 | ||
722cdc17 | 139 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); |
9b3bb7ab SS |
140 | } |
141 | ||
beca3ec7 SS |
142 | static ssize_t max_comp_streams_show(struct device *dev, |
143 | struct device_attribute *attr, char *buf) | |
144 | { | |
145 | int val; | |
146 | struct zram *zram = dev_to_zram(dev); | |
147 | ||
148 | down_read(&zram->init_lock); | |
149 | val = zram->max_comp_streams; | |
150 | up_read(&zram->init_lock); | |
151 | ||
56b4e8cb | 152 | return scnprintf(buf, PAGE_SIZE, "%d\n", val); |
beca3ec7 SS |
153 | } |
154 | ||
9ada9da9 MK |
155 | static ssize_t mem_limit_show(struct device *dev, |
156 | struct device_attribute *attr, char *buf) | |
157 | { | |
158 | u64 val; | |
159 | struct zram *zram = dev_to_zram(dev); | |
160 | ||
8f7d282c | 161 | deprecated_attr_warn("mem_limit"); |
9ada9da9 MK |
162 | down_read(&zram->init_lock); |
163 | val = zram->limit_pages; | |
164 | up_read(&zram->init_lock); | |
165 | ||
166 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
167 | } | |
168 | ||
169 | static ssize_t mem_limit_store(struct device *dev, | |
170 | struct device_attribute *attr, const char *buf, size_t len) | |
171 | { | |
172 | u64 limit; | |
173 | char *tmp; | |
174 | struct zram *zram = dev_to_zram(dev); | |
175 | ||
176 | limit = memparse(buf, &tmp); | |
177 | if (buf == tmp) /* no chars parsed, invalid input */ | |
178 | return -EINVAL; | |
179 | ||
180 | down_write(&zram->init_lock); | |
181 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; | |
182 | up_write(&zram->init_lock); | |
183 | ||
184 | return len; | |
185 | } | |
186 | ||
461a8eee MK |
187 | static ssize_t mem_used_max_show(struct device *dev, |
188 | struct device_attribute *attr, char *buf) | |
189 | { | |
190 | u64 val = 0; | |
191 | struct zram *zram = dev_to_zram(dev); | |
192 | ||
8f7d282c | 193 | deprecated_attr_warn("mem_used_max"); |
461a8eee MK |
194 | down_read(&zram->init_lock); |
195 | if (init_done(zram)) | |
196 | val = atomic_long_read(&zram->stats.max_used_pages); | |
197 | up_read(&zram->init_lock); | |
198 | ||
199 | return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); | |
200 | } | |
201 | ||
202 | static ssize_t mem_used_max_store(struct device *dev, | |
203 | struct device_attribute *attr, const char *buf, size_t len) | |
204 | { | |
205 | int err; | |
206 | unsigned long val; | |
207 | struct zram *zram = dev_to_zram(dev); | |
461a8eee MK |
208 | |
209 | err = kstrtoul(buf, 10, &val); | |
210 | if (err || val != 0) | |
211 | return -EINVAL; | |
212 | ||
213 | down_read(&zram->init_lock); | |
5a99e95b WY |
214 | if (init_done(zram)) { |
215 | struct zram_meta *meta = zram->meta; | |
461a8eee MK |
216 | atomic_long_set(&zram->stats.max_used_pages, |
217 | zs_get_total_pages(meta->mem_pool)); | |
5a99e95b | 218 | } |
461a8eee MK |
219 | up_read(&zram->init_lock); |
220 | ||
221 | return len; | |
222 | } | |
223 | ||
beca3ec7 SS |
224 | static ssize_t max_comp_streams_store(struct device *dev, |
225 | struct device_attribute *attr, const char *buf, size_t len) | |
226 | { | |
227 | int num; | |
228 | struct zram *zram = dev_to_zram(dev); | |
60a726e3 | 229 | int ret; |
beca3ec7 | 230 | |
60a726e3 MK |
231 | ret = kstrtoint(buf, 0, &num); |
232 | if (ret < 0) | |
233 | return ret; | |
beca3ec7 SS |
234 | if (num < 1) |
235 | return -EINVAL; | |
60a726e3 | 236 | |
beca3ec7 SS |
237 | down_write(&zram->init_lock); |
238 | if (init_done(zram)) { | |
60a726e3 | 239 | if (!zcomp_set_max_streams(zram->comp, num)) { |
fe8eb122 | 240 | pr_info("Cannot change max compression streams\n"); |
60a726e3 MK |
241 | ret = -EINVAL; |
242 | goto out; | |
243 | } | |
beca3ec7 | 244 | } |
60a726e3 | 245 | |
beca3ec7 | 246 | zram->max_comp_streams = num; |
60a726e3 MK |
247 | ret = len; |
248 | out: | |
beca3ec7 | 249 | up_write(&zram->init_lock); |
60a726e3 | 250 | return ret; |
beca3ec7 SS |
251 | } |
252 | ||
e46b8a03 SS |
253 | static ssize_t comp_algorithm_show(struct device *dev, |
254 | struct device_attribute *attr, char *buf) | |
255 | { | |
256 | size_t sz; | |
257 | struct zram *zram = dev_to_zram(dev); | |
258 | ||
259 | down_read(&zram->init_lock); | |
260 | sz = zcomp_available_show(zram->compressor, buf); | |
261 | up_read(&zram->init_lock); | |
262 | ||
263 | return sz; | |
264 | } | |
265 | ||
266 | static ssize_t comp_algorithm_store(struct device *dev, | |
267 | struct device_attribute *attr, const char *buf, size_t len) | |
268 | { | |
269 | struct zram *zram = dev_to_zram(dev); | |
270 | down_write(&zram->init_lock); | |
271 | if (init_done(zram)) { | |
272 | up_write(&zram->init_lock); | |
273 | pr_info("Can't change algorithm for initialized device\n"); | |
274 | return -EBUSY; | |
275 | } | |
276 | strlcpy(zram->compressor, buf, sizeof(zram->compressor)); | |
277 | up_write(&zram->init_lock); | |
278 | return len; | |
279 | } | |
280 | ||
92967471 | 281 | /* flag operations needs meta->tb_lock */ |
8b3cc3ed | 282 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 283 | enum zram_pageflags flag) |
306b0c95 | 284 | { |
d2d5e762 | 285 | return meta->table[index].value & BIT(flag); |
306b0c95 NG |
286 | } |
287 | ||
8b3cc3ed | 288 | static void zram_set_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 289 | enum zram_pageflags flag) |
306b0c95 | 290 | { |
d2d5e762 | 291 | meta->table[index].value |= BIT(flag); |
306b0c95 NG |
292 | } |
293 | ||
8b3cc3ed | 294 | static void zram_clear_flag(struct zram_meta *meta, u32 index, |
f1e3cfff | 295 | enum zram_pageflags flag) |
306b0c95 | 296 | { |
d2d5e762 WY |
297 | meta->table[index].value &= ~BIT(flag); |
298 | } | |
299 | ||
300 | static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) | |
301 | { | |
302 | return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); | |
303 | } | |
304 | ||
305 | static void zram_set_obj_size(struct zram_meta *meta, | |
306 | u32 index, size_t size) | |
307 | { | |
308 | unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; | |
309 | ||
310 | meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; | |
306b0c95 NG |
311 | } |
312 | ||
9b3bb7ab SS |
313 | static inline int is_partial_io(struct bio_vec *bvec) |
314 | { | |
315 | return bvec->bv_len != PAGE_SIZE; | |
316 | } | |
317 | ||
318 | /* | |
319 | * Check if request is within bounds and aligned on zram logical blocks. | |
320 | */ | |
54850e73 | 321 | static inline int valid_io_request(struct zram *zram, |
322 | sector_t start, unsigned int size) | |
9b3bb7ab | 323 | { |
54850e73 | 324 | u64 end, bound; |
a539c72a | 325 | |
9b3bb7ab | 326 | /* unaligned request */ |
54850e73 | 327 | if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) |
9b3bb7ab | 328 | return 0; |
54850e73 | 329 | if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) |
9b3bb7ab SS |
330 | return 0; |
331 | ||
54850e73 | 332 | end = start + (size >> SECTOR_SHIFT); |
9b3bb7ab SS |
333 | bound = zram->disksize >> SECTOR_SHIFT; |
334 | /* out of range range */ | |
75c7caf5 | 335 | if (unlikely(start >= bound || end > bound || start > end)) |
9b3bb7ab SS |
336 | return 0; |
337 | ||
338 | /* I/O request is valid */ | |
339 | return 1; | |
340 | } | |
341 | ||
1fec1172 | 342 | static void zram_meta_free(struct zram_meta *meta, u64 disksize) |
9b3bb7ab | 343 | { |
1fec1172 GM |
344 | size_t num_pages = disksize >> PAGE_SHIFT; |
345 | size_t index; | |
346 | ||
347 | /* Free all pages that are still in this zram device */ | |
348 | for (index = 0; index < num_pages; index++) { | |
349 | unsigned long handle = meta->table[index].handle; | |
350 | ||
351 | if (!handle) | |
352 | continue; | |
353 | ||
354 | zs_free(meta->mem_pool, handle); | |
355 | } | |
356 | ||
9b3bb7ab | 357 | zs_destroy_pool(meta->mem_pool); |
9b3bb7ab SS |
358 | vfree(meta->table); |
359 | kfree(meta); | |
360 | } | |
361 | ||
3eba0c6a | 362 | static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) |
9b3bb7ab SS |
363 | { |
364 | size_t num_pages; | |
3eba0c6a | 365 | char pool_name[8]; |
9b3bb7ab | 366 | struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); |
b8179958 | 367 | |
9b3bb7ab | 368 | if (!meta) |
b8179958 | 369 | return NULL; |
9b3bb7ab | 370 | |
9b3bb7ab SS |
371 | num_pages = disksize >> PAGE_SHIFT; |
372 | meta->table = vzalloc(num_pages * sizeof(*meta->table)); | |
373 | if (!meta->table) { | |
374 | pr_err("Error allocating zram address table\n"); | |
b8179958 | 375 | goto out_error; |
9b3bb7ab SS |
376 | } |
377 | ||
3eba0c6a GM |
378 | snprintf(pool_name, sizeof(pool_name), "zram%d", device_id); |
379 | meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); | |
9b3bb7ab SS |
380 | if (!meta->mem_pool) { |
381 | pr_err("Error creating memory pool\n"); | |
b8179958 | 382 | goto out_error; |
9b3bb7ab SS |
383 | } |
384 | ||
385 | return meta; | |
386 | ||
b8179958 | 387 | out_error: |
9b3bb7ab | 388 | vfree(meta->table); |
9b3bb7ab | 389 | kfree(meta); |
b8179958 | 390 | return NULL; |
9b3bb7ab SS |
391 | } |
392 | ||
08eee69f MK |
393 | static inline bool zram_meta_get(struct zram *zram) |
394 | { | |
395 | if (atomic_inc_not_zero(&zram->refcount)) | |
396 | return true; | |
397 | return false; | |
398 | } | |
399 | ||
400 | static inline void zram_meta_put(struct zram *zram) | |
401 | { | |
402 | atomic_dec(&zram->refcount); | |
403 | } | |
404 | ||
9b3bb7ab SS |
405 | static void update_position(u32 *index, int *offset, struct bio_vec *bvec) |
406 | { | |
407 | if (*offset + bvec->bv_len >= PAGE_SIZE) | |
408 | (*index)++; | |
409 | *offset = (*offset + bvec->bv_len) % PAGE_SIZE; | |
410 | } | |
411 | ||
306b0c95 NG |
412 | static int page_zero_filled(void *ptr) |
413 | { | |
414 | unsigned int pos; | |
415 | unsigned long *page; | |
416 | ||
417 | page = (unsigned long *)ptr; | |
418 | ||
419 | for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) { | |
420 | if (page[pos]) | |
421 | return 0; | |
422 | } | |
423 | ||
424 | return 1; | |
425 | } | |
426 | ||
9b3bb7ab SS |
427 | static void handle_zero_page(struct bio_vec *bvec) |
428 | { | |
429 | struct page *page = bvec->bv_page; | |
430 | void *user_mem; | |
431 | ||
432 | user_mem = kmap_atomic(page); | |
433 | if (is_partial_io(bvec)) | |
434 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | |
435 | else | |
436 | clear_page(user_mem); | |
437 | kunmap_atomic(user_mem); | |
438 | ||
439 | flush_dcache_page(page); | |
440 | } | |
441 | ||
d2d5e762 WY |
442 | |
443 | /* | |
444 | * To protect concurrent access to the same index entry, | |
445 | * caller should hold this table index entry's bit_spinlock to | |
446 | * indicate this index entry is accessing. | |
447 | */ | |
f1e3cfff | 448 | static void zram_free_page(struct zram *zram, size_t index) |
306b0c95 | 449 | { |
8b3cc3ed MK |
450 | struct zram_meta *meta = zram->meta; |
451 | unsigned long handle = meta->table[index].handle; | |
306b0c95 | 452 | |
fd1a30de | 453 | if (unlikely(!handle)) { |
2e882281 NG |
454 | /* |
455 | * No memory is allocated for zero filled pages. | |
456 | * Simply clear zero page flag. | |
457 | */ | |
8b3cc3ed MK |
458 | if (zram_test_flag(meta, index, ZRAM_ZERO)) { |
459 | zram_clear_flag(meta, index, ZRAM_ZERO); | |
90a7806e | 460 | atomic64_dec(&zram->stats.zero_pages); |
306b0c95 NG |
461 | } |
462 | return; | |
463 | } | |
464 | ||
8b3cc3ed | 465 | zs_free(meta->mem_pool, handle); |
306b0c95 | 466 | |
d2d5e762 WY |
467 | atomic64_sub(zram_get_obj_size(meta, index), |
468 | &zram->stats.compr_data_size); | |
90a7806e | 469 | atomic64_dec(&zram->stats.pages_stored); |
306b0c95 | 470 | |
8b3cc3ed | 471 | meta->table[index].handle = 0; |
d2d5e762 | 472 | zram_set_obj_size(meta, index, 0); |
306b0c95 NG |
473 | } |
474 | ||
37b51fdd | 475 | static int zram_decompress_page(struct zram *zram, char *mem, u32 index) |
306b0c95 | 476 | { |
b7ca232e | 477 | int ret = 0; |
37b51fdd | 478 | unsigned char *cmem; |
8b3cc3ed | 479 | struct zram_meta *meta = zram->meta; |
92967471 | 480 | unsigned long handle; |
023b409f | 481 | size_t size; |
92967471 | 482 | |
d2d5e762 | 483 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
92967471 | 484 | handle = meta->table[index].handle; |
d2d5e762 | 485 | size = zram_get_obj_size(meta, index); |
306b0c95 | 486 | |
8b3cc3ed | 487 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
d2d5e762 | 488 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
42e99bd9 | 489 | clear_page(mem); |
8c921b2b JM |
490 | return 0; |
491 | } | |
306b0c95 | 492 | |
8b3cc3ed | 493 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
92967471 | 494 | if (size == PAGE_SIZE) |
42e99bd9 | 495 | copy_page(mem, cmem); |
37b51fdd | 496 | else |
b7ca232e | 497 | ret = zcomp_decompress(zram->comp, cmem, size, mem); |
8b3cc3ed | 498 | zs_unmap_object(meta->mem_pool, handle); |
d2d5e762 | 499 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
a1dd52af | 500 | |
8c921b2b | 501 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 502 | if (unlikely(ret)) { |
8c921b2b | 503 | pr_err("Decompression failed! err=%d, page=%u\n", ret, index); |
8c921b2b | 504 | return ret; |
a1dd52af | 505 | } |
306b0c95 | 506 | |
8c921b2b | 507 | return 0; |
306b0c95 NG |
508 | } |
509 | ||
37b51fdd | 510 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
b627cff3 | 511 | u32 index, int offset) |
924bd88d JM |
512 | { |
513 | int ret; | |
37b51fdd SS |
514 | struct page *page; |
515 | unsigned char *user_mem, *uncmem = NULL; | |
8b3cc3ed | 516 | struct zram_meta *meta = zram->meta; |
37b51fdd SS |
517 | page = bvec->bv_page; |
518 | ||
d2d5e762 | 519 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
8b3cc3ed MK |
520 | if (unlikely(!meta->table[index].handle) || |
521 | zram_test_flag(meta, index, ZRAM_ZERO)) { | |
d2d5e762 | 522 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
37b51fdd | 523 | handle_zero_page(bvec); |
924bd88d JM |
524 | return 0; |
525 | } | |
d2d5e762 | 526 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
924bd88d | 527 | |
37b51fdd SS |
528 | if (is_partial_io(bvec)) |
529 | /* Use a temporary buffer to decompress the page */ | |
7e5a5104 MK |
530 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
531 | ||
532 | user_mem = kmap_atomic(page); | |
533 | if (!is_partial_io(bvec)) | |
37b51fdd SS |
534 | uncmem = user_mem; |
535 | ||
536 | if (!uncmem) { | |
537 | pr_info("Unable to allocate temp memory\n"); | |
538 | ret = -ENOMEM; | |
539 | goto out_cleanup; | |
540 | } | |
924bd88d | 541 | |
37b51fdd | 542 | ret = zram_decompress_page(zram, uncmem, index); |
924bd88d | 543 | /* Should NEVER happen. Return bio error if it does. */ |
b7ca232e | 544 | if (unlikely(ret)) |
37b51fdd | 545 | goto out_cleanup; |
924bd88d | 546 | |
37b51fdd SS |
547 | if (is_partial_io(bvec)) |
548 | memcpy(user_mem + bvec->bv_offset, uncmem + offset, | |
549 | bvec->bv_len); | |
550 | ||
551 | flush_dcache_page(page); | |
552 | ret = 0; | |
553 | out_cleanup: | |
554 | kunmap_atomic(user_mem); | |
555 | if (is_partial_io(bvec)) | |
556 | kfree(uncmem); | |
557 | return ret; | |
924bd88d JM |
558 | } |
559 | ||
461a8eee MK |
560 | static inline void update_used_max(struct zram *zram, |
561 | const unsigned long pages) | |
562 | { | |
2ea55a2c | 563 | unsigned long old_max, cur_max; |
461a8eee MK |
564 | |
565 | old_max = atomic_long_read(&zram->stats.max_used_pages); | |
566 | ||
567 | do { | |
568 | cur_max = old_max; | |
569 | if (pages > cur_max) | |
570 | old_max = atomic_long_cmpxchg( | |
571 | &zram->stats.max_used_pages, cur_max, pages); | |
572 | } while (old_max != cur_max); | |
573 | } | |
574 | ||
924bd88d JM |
575 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, |
576 | int offset) | |
306b0c95 | 577 | { |
397c6066 | 578 | int ret = 0; |
8c921b2b | 579 | size_t clen; |
c2344348 | 580 | unsigned long handle; |
130f315a | 581 | struct page *page; |
924bd88d | 582 | unsigned char *user_mem, *cmem, *src, *uncmem = NULL; |
8b3cc3ed | 583 | struct zram_meta *meta = zram->meta; |
b7ca232e | 584 | struct zcomp_strm *zstrm; |
e46e3315 | 585 | bool locked = false; |
461a8eee | 586 | unsigned long alloced_pages; |
306b0c95 | 587 | |
8c921b2b | 588 | page = bvec->bv_page; |
924bd88d JM |
589 | if (is_partial_io(bvec)) { |
590 | /* | |
591 | * This is a partial IO. We need to read the full page | |
592 | * before to write the changes. | |
593 | */ | |
7e5a5104 | 594 | uncmem = kmalloc(PAGE_SIZE, GFP_NOIO); |
924bd88d | 595 | if (!uncmem) { |
924bd88d JM |
596 | ret = -ENOMEM; |
597 | goto out; | |
598 | } | |
37b51fdd | 599 | ret = zram_decompress_page(zram, uncmem, index); |
397c6066 | 600 | if (ret) |
924bd88d | 601 | goto out; |
924bd88d JM |
602 | } |
603 | ||
b7ca232e | 604 | zstrm = zcomp_strm_find(zram->comp); |
e46e3315 | 605 | locked = true; |
ba82fe2e | 606 | user_mem = kmap_atomic(page); |
924bd88d | 607 | |
397c6066 | 608 | if (is_partial_io(bvec)) { |
924bd88d JM |
609 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
610 | bvec->bv_len); | |
397c6066 NG |
611 | kunmap_atomic(user_mem); |
612 | user_mem = NULL; | |
613 | } else { | |
924bd88d | 614 | uncmem = user_mem; |
397c6066 | 615 | } |
924bd88d JM |
616 | |
617 | if (page_zero_filled(uncmem)) { | |
c4065152 WY |
618 | if (user_mem) |
619 | kunmap_atomic(user_mem); | |
f40ac2ae | 620 | /* Free memory associated with this sector now. */ |
d2d5e762 | 621 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 622 | zram_free_page(zram, index); |
92967471 | 623 | zram_set_flag(meta, index, ZRAM_ZERO); |
d2d5e762 | 624 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae | 625 | |
90a7806e | 626 | atomic64_inc(&zram->stats.zero_pages); |
924bd88d JM |
627 | ret = 0; |
628 | goto out; | |
8c921b2b | 629 | } |
306b0c95 | 630 | |
b7ca232e | 631 | ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); |
397c6066 NG |
632 | if (!is_partial_io(bvec)) { |
633 | kunmap_atomic(user_mem); | |
634 | user_mem = NULL; | |
635 | uncmem = NULL; | |
636 | } | |
306b0c95 | 637 | |
b7ca232e | 638 | if (unlikely(ret)) { |
8c921b2b | 639 | pr_err("Compression failed! err=%d\n", ret); |
924bd88d | 640 | goto out; |
8c921b2b | 641 | } |
b7ca232e | 642 | src = zstrm->buffer; |
c8f2f0db | 643 | if (unlikely(clen > max_zpage_size)) { |
c8f2f0db | 644 | clen = PAGE_SIZE; |
397c6066 NG |
645 | if (is_partial_io(bvec)) |
646 | src = uncmem; | |
c8f2f0db | 647 | } |
a1dd52af | 648 | |
8b3cc3ed | 649 | handle = zs_malloc(meta->mem_pool, clen); |
fd1a30de | 650 | if (!handle) { |
596b3dd4 MR |
651 | pr_info("Error allocating memory for compressed page: %u, size=%zu\n", |
652 | index, clen); | |
924bd88d JM |
653 | ret = -ENOMEM; |
654 | goto out; | |
8c921b2b | 655 | } |
9ada9da9 | 656 | |
461a8eee MK |
657 | alloced_pages = zs_get_total_pages(meta->mem_pool); |
658 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { | |
9ada9da9 MK |
659 | zs_free(meta->mem_pool, handle); |
660 | ret = -ENOMEM; | |
661 | goto out; | |
662 | } | |
663 | ||
461a8eee MK |
664 | update_used_max(zram, alloced_pages); |
665 | ||
8b3cc3ed | 666 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); |
306b0c95 | 667 | |
42e99bd9 | 668 | if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) { |
397c6066 | 669 | src = kmap_atomic(page); |
42e99bd9 | 670 | copy_page(cmem, src); |
397c6066 | 671 | kunmap_atomic(src); |
42e99bd9 JL |
672 | } else { |
673 | memcpy(cmem, src, clen); | |
674 | } | |
306b0c95 | 675 | |
b7ca232e SS |
676 | zcomp_strm_release(zram->comp, zstrm); |
677 | locked = false; | |
8b3cc3ed | 678 | zs_unmap_object(meta->mem_pool, handle); |
fd1a30de | 679 | |
f40ac2ae SS |
680 | /* |
681 | * Free memory associated with this sector | |
682 | * before overwriting unused sectors. | |
683 | */ | |
d2d5e762 | 684 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f40ac2ae SS |
685 | zram_free_page(zram, index); |
686 | ||
8b3cc3ed | 687 | meta->table[index].handle = handle; |
d2d5e762 WY |
688 | zram_set_obj_size(meta, index, clen); |
689 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); | |
306b0c95 | 690 | |
8c921b2b | 691 | /* Update stats */ |
90a7806e SS |
692 | atomic64_add(clen, &zram->stats.compr_data_size); |
693 | atomic64_inc(&zram->stats.pages_stored); | |
924bd88d | 694 | out: |
e46e3315 | 695 | if (locked) |
b7ca232e | 696 | zcomp_strm_release(zram->comp, zstrm); |
397c6066 NG |
697 | if (is_partial_io(bvec)) |
698 | kfree(uncmem); | |
924bd88d | 699 | return ret; |
8c921b2b JM |
700 | } |
701 | ||
702 | static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, | |
b627cff3 | 703 | int offset, int rw) |
8c921b2b | 704 | { |
8811a942 | 705 | unsigned long start_time = jiffies; |
c5bde238 | 706 | int ret; |
8c921b2b | 707 | |
8811a942 SS |
708 | generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT, |
709 | &zram->disk->part0); | |
710 | ||
be257c61 SS |
711 | if (rw == READ) { |
712 | atomic64_inc(&zram->stats.num_reads); | |
b627cff3 | 713 | ret = zram_bvec_read(zram, bvec, index, offset); |
be257c61 SS |
714 | } else { |
715 | atomic64_inc(&zram->stats.num_writes); | |
c5bde238 | 716 | ret = zram_bvec_write(zram, bvec, index, offset); |
be257c61 | 717 | } |
c5bde238 | 718 | |
8811a942 SS |
719 | generic_end_io_acct(rw, &zram->disk->part0, start_time); |
720 | ||
0cf1e9d6 CY |
721 | if (unlikely(ret)) { |
722 | if (rw == READ) | |
723 | atomic64_inc(&zram->stats.failed_reads); | |
724 | else | |
725 | atomic64_inc(&zram->stats.failed_writes); | |
726 | } | |
727 | ||
c5bde238 | 728 | return ret; |
924bd88d JM |
729 | } |
730 | ||
f4659d8e JK |
731 | /* |
732 | * zram_bio_discard - handler on discard request | |
733 | * @index: physical block index in PAGE_SIZE units | |
734 | * @offset: byte offset within physical block | |
735 | */ | |
736 | static void zram_bio_discard(struct zram *zram, u32 index, | |
737 | int offset, struct bio *bio) | |
738 | { | |
739 | size_t n = bio->bi_iter.bi_size; | |
d2d5e762 | 740 | struct zram_meta *meta = zram->meta; |
f4659d8e JK |
741 | |
742 | /* | |
743 | * zram manages data in physical block size units. Because logical block | |
744 | * size isn't identical with physical block size on some arch, we | |
745 | * could get a discard request pointing to a specific offset within a | |
746 | * certain physical block. Although we can handle this request by | |
747 | * reading that physiclal block and decompressing and partially zeroing | |
748 | * and re-compressing and then re-storing it, this isn't reasonable | |
749 | * because our intent with a discard request is to save memory. So | |
750 | * skipping this logical block is appropriate here. | |
751 | */ | |
752 | if (offset) { | |
38515c73 | 753 | if (n <= (PAGE_SIZE - offset)) |
f4659d8e JK |
754 | return; |
755 | ||
38515c73 | 756 | n -= (PAGE_SIZE - offset); |
f4659d8e JK |
757 | index++; |
758 | } | |
759 | ||
760 | while (n >= PAGE_SIZE) { | |
d2d5e762 | 761 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f4659d8e | 762 | zram_free_page(zram, index); |
d2d5e762 | 763 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
015254da | 764 | atomic64_inc(&zram->stats.notify_free); |
f4659d8e JK |
765 | index++; |
766 | n -= PAGE_SIZE; | |
767 | } | |
768 | } | |
769 | ||
ba6b17d6 | 770 | static void zram_reset_device(struct zram *zram) |
924bd88d | 771 | { |
08eee69f MK |
772 | struct zram_meta *meta; |
773 | struct zcomp *comp; | |
774 | u64 disksize; | |
775 | ||
644d4787 | 776 | down_write(&zram->init_lock); |
9ada9da9 MK |
777 | |
778 | zram->limit_pages = 0; | |
779 | ||
be2d1d56 | 780 | if (!init_done(zram)) { |
644d4787 | 781 | up_write(&zram->init_lock); |
9b3bb7ab | 782 | return; |
644d4787 | 783 | } |
9b3bb7ab | 784 | |
08eee69f MK |
785 | meta = zram->meta; |
786 | comp = zram->comp; | |
787 | disksize = zram->disksize; | |
788 | /* | |
789 | * Refcount will go down to 0 eventually and r/w handler | |
790 | * cannot handle further I/O so it will bail out by | |
791 | * check zram_meta_get. | |
792 | */ | |
793 | zram_meta_put(zram); | |
794 | /* | |
795 | * We want to free zram_meta in process context to avoid | |
796 | * deadlock between reclaim path and any other locks. | |
797 | */ | |
798 | wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); | |
799 | ||
9b3bb7ab SS |
800 | /* Reset stats */ |
801 | memset(&zram->stats, 0, sizeof(zram->stats)); | |
9b3bb7ab | 802 | zram->disksize = 0; |
08eee69f | 803 | zram->max_comp_streams = 1; |
d7ad41a1 | 804 | |
a096cafc | 805 | set_capacity(zram->disk, 0); |
d7ad41a1 | 806 | part_stat_set_all(&zram->disk->part0, 0); |
a096cafc | 807 | |
644d4787 | 808 | up_write(&zram->init_lock); |
08eee69f MK |
809 | /* I/O operation under all of CPU are done so let's free */ |
810 | zram_meta_free(meta, disksize); | |
811 | zcomp_destroy(comp); | |
9b3bb7ab SS |
812 | } |
813 | ||
9b3bb7ab SS |
814 | static ssize_t disksize_store(struct device *dev, |
815 | struct device_attribute *attr, const char *buf, size_t len) | |
816 | { | |
817 | u64 disksize; | |
d61f98c7 | 818 | struct zcomp *comp; |
9b3bb7ab SS |
819 | struct zram_meta *meta; |
820 | struct zram *zram = dev_to_zram(dev); | |
fcfa8d95 | 821 | int err; |
9b3bb7ab SS |
822 | |
823 | disksize = memparse(buf, NULL); | |
824 | if (!disksize) | |
825 | return -EINVAL; | |
826 | ||
827 | disksize = PAGE_ALIGN(disksize); | |
3eba0c6a | 828 | meta = zram_meta_alloc(zram->disk->first_minor, disksize); |
db5d711e MK |
829 | if (!meta) |
830 | return -ENOMEM; | |
b67d1ec1 | 831 | |
d61f98c7 | 832 | comp = zcomp_create(zram->compressor, zram->max_comp_streams); |
fcfa8d95 | 833 | if (IS_ERR(comp)) { |
d61f98c7 SS |
834 | pr_info("Cannot initialise %s compressing backend\n", |
835 | zram->compressor); | |
fcfa8d95 SS |
836 | err = PTR_ERR(comp); |
837 | goto out_free_meta; | |
d61f98c7 SS |
838 | } |
839 | ||
9b3bb7ab | 840 | down_write(&zram->init_lock); |
be2d1d56 | 841 | if (init_done(zram)) { |
9b3bb7ab | 842 | pr_info("Cannot change disksize for initialized device\n"); |
b7ca232e | 843 | err = -EBUSY; |
fcfa8d95 | 844 | goto out_destroy_comp; |
9b3bb7ab SS |
845 | } |
846 | ||
08eee69f MK |
847 | init_waitqueue_head(&zram->io_done); |
848 | atomic_set(&zram->refcount, 1); | |
b67d1ec1 | 849 | zram->meta = meta; |
d61f98c7 | 850 | zram->comp = comp; |
9b3bb7ab SS |
851 | zram->disksize = disksize; |
852 | set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); | |
9b3bb7ab | 853 | up_write(&zram->init_lock); |
b4c5c609 MK |
854 | |
855 | /* | |
856 | * Revalidate disk out of the init_lock to avoid lockdep splat. | |
857 | * It's okay because disk's capacity is protected by init_lock | |
858 | * so that revalidate_disk always sees up-to-date capacity. | |
859 | */ | |
860 | revalidate_disk(zram->disk); | |
861 | ||
9b3bb7ab | 862 | return len; |
b7ca232e | 863 | |
fcfa8d95 SS |
864 | out_destroy_comp: |
865 | up_write(&zram->init_lock); | |
866 | zcomp_destroy(comp); | |
867 | out_free_meta: | |
1fec1172 | 868 | zram_meta_free(meta, disksize); |
b7ca232e | 869 | return err; |
9b3bb7ab SS |
870 | } |
871 | ||
872 | static ssize_t reset_store(struct device *dev, | |
873 | struct device_attribute *attr, const char *buf, size_t len) | |
874 | { | |
875 | int ret; | |
876 | unsigned short do_reset; | |
877 | struct zram *zram; | |
878 | struct block_device *bdev; | |
879 | ||
880 | zram = dev_to_zram(dev); | |
881 | bdev = bdget_disk(zram->disk, 0); | |
882 | ||
46a51c80 RK |
883 | if (!bdev) |
884 | return -ENOMEM; | |
885 | ||
ba6b17d6 | 886 | mutex_lock(&bdev->bd_mutex); |
9b3bb7ab | 887 | /* Do not reset an active device! */ |
2b269ce6 | 888 | if (bdev->bd_openers) { |
1b672224 RK |
889 | ret = -EBUSY; |
890 | goto out; | |
891 | } | |
9b3bb7ab SS |
892 | |
893 | ret = kstrtou16(buf, 10, &do_reset); | |
894 | if (ret) | |
1b672224 | 895 | goto out; |
9b3bb7ab | 896 | |
1b672224 RK |
897 | if (!do_reset) { |
898 | ret = -EINVAL; | |
899 | goto out; | |
900 | } | |
9b3bb7ab SS |
901 | |
902 | /* Make sure all pending I/O is finished */ | |
46a51c80 | 903 | fsync_bdev(bdev); |
ba6b17d6 | 904 | zram_reset_device(zram); |
ba6b17d6 SS |
905 | |
906 | mutex_unlock(&bdev->bd_mutex); | |
907 | revalidate_disk(zram->disk); | |
1b672224 | 908 | bdput(bdev); |
9b3bb7ab | 909 | |
9b3bb7ab | 910 | return len; |
1b672224 RK |
911 | |
912 | out: | |
ba6b17d6 | 913 | mutex_unlock(&bdev->bd_mutex); |
1b672224 RK |
914 | bdput(bdev); |
915 | return ret; | |
8c921b2b JM |
916 | } |
917 | ||
be257c61 | 918 | static void __zram_make_request(struct zram *zram, struct bio *bio) |
8c921b2b | 919 | { |
b627cff3 | 920 | int offset, rw; |
8c921b2b | 921 | u32 index; |
7988613b KO |
922 | struct bio_vec bvec; |
923 | struct bvec_iter iter; | |
8c921b2b | 924 | |
4f024f37 KO |
925 | index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
926 | offset = (bio->bi_iter.bi_sector & | |
927 | (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; | |
8c921b2b | 928 | |
f4659d8e JK |
929 | if (unlikely(bio->bi_rw & REQ_DISCARD)) { |
930 | zram_bio_discard(zram, index, offset, bio); | |
931 | bio_endio(bio, 0); | |
932 | return; | |
933 | } | |
934 | ||
b627cff3 | 935 | rw = bio_data_dir(bio); |
7988613b | 936 | bio_for_each_segment(bvec, bio, iter) { |
924bd88d JM |
937 | int max_transfer_size = PAGE_SIZE - offset; |
938 | ||
7988613b | 939 | if (bvec.bv_len > max_transfer_size) { |
924bd88d JM |
940 | /* |
941 | * zram_bvec_rw() can only make operation on a single | |
942 | * zram page. Split the bio vector. | |
943 | */ | |
944 | struct bio_vec bv; | |
945 | ||
7988613b | 946 | bv.bv_page = bvec.bv_page; |
924bd88d | 947 | bv.bv_len = max_transfer_size; |
7988613b | 948 | bv.bv_offset = bvec.bv_offset; |
924bd88d | 949 | |
b627cff3 | 950 | if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) |
924bd88d JM |
951 | goto out; |
952 | ||
7988613b | 953 | bv.bv_len = bvec.bv_len - max_transfer_size; |
924bd88d | 954 | bv.bv_offset += max_transfer_size; |
b627cff3 | 955 | if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) |
924bd88d JM |
956 | goto out; |
957 | } else | |
b627cff3 | 958 | if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) |
924bd88d JM |
959 | goto out; |
960 | ||
7988613b | 961 | update_position(&index, &offset, &bvec); |
a1dd52af | 962 | } |
306b0c95 NG |
963 | |
964 | set_bit(BIO_UPTODATE, &bio->bi_flags); | |
965 | bio_endio(bio, 0); | |
7d7854b4 | 966 | return; |
306b0c95 NG |
967 | |
968 | out: | |
306b0c95 | 969 | bio_io_error(bio); |
306b0c95 NG |
970 | } |
971 | ||
306b0c95 | 972 | /* |
f1e3cfff | 973 | * Handler function for all zram I/O requests. |
306b0c95 | 974 | */ |
5a7bbad2 | 975 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
306b0c95 | 976 | { |
f1e3cfff | 977 | struct zram *zram = queue->queuedata; |
306b0c95 | 978 | |
08eee69f | 979 | if (unlikely(!zram_meta_get(zram))) |
3de738cd | 980 | goto error; |
0900beae | 981 | |
54850e73 | 982 | if (!valid_io_request(zram, bio->bi_iter.bi_sector, |
983 | bio->bi_iter.bi_size)) { | |
da5cc7d3 | 984 | atomic64_inc(&zram->stats.invalid_io); |
08eee69f | 985 | goto put_zram; |
6642a67c JM |
986 | } |
987 | ||
be257c61 | 988 | __zram_make_request(zram, bio); |
08eee69f | 989 | zram_meta_put(zram); |
b4fdcb02 | 990 | return; |
08eee69f MK |
991 | put_zram: |
992 | zram_meta_put(zram); | |
0900beae JM |
993 | error: |
994 | bio_io_error(bio); | |
306b0c95 NG |
995 | } |
996 | ||
2ccbec05 NG |
997 | static void zram_slot_free_notify(struct block_device *bdev, |
998 | unsigned long index) | |
107c161b | 999 | { |
f1e3cfff | 1000 | struct zram *zram; |
f614a9f4 | 1001 | struct zram_meta *meta; |
107c161b | 1002 | |
f1e3cfff | 1003 | zram = bdev->bd_disk->private_data; |
f614a9f4 | 1004 | meta = zram->meta; |
a0c516cb | 1005 | |
d2d5e762 | 1006 | bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 1007 | zram_free_page(zram, index); |
d2d5e762 | 1008 | bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); |
f614a9f4 | 1009 | atomic64_inc(&zram->stats.notify_free); |
107c161b NG |
1010 | } |
1011 | ||
8c7f0102 | 1012 | static int zram_rw_page(struct block_device *bdev, sector_t sector, |
1013 | struct page *page, int rw) | |
1014 | { | |
08eee69f | 1015 | int offset, err = -EIO; |
8c7f0102 | 1016 | u32 index; |
1017 | struct zram *zram; | |
1018 | struct bio_vec bv; | |
1019 | ||
1020 | zram = bdev->bd_disk->private_data; | |
08eee69f MK |
1021 | if (unlikely(!zram_meta_get(zram))) |
1022 | goto out; | |
1023 | ||
8c7f0102 | 1024 | if (!valid_io_request(zram, sector, PAGE_SIZE)) { |
1025 | atomic64_inc(&zram->stats.invalid_io); | |
08eee69f MK |
1026 | err = -EINVAL; |
1027 | goto put_zram; | |
8c7f0102 | 1028 | } |
1029 | ||
1030 | index = sector >> SECTORS_PER_PAGE_SHIFT; | |
1031 | offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT; | |
1032 | ||
1033 | bv.bv_page = page; | |
1034 | bv.bv_len = PAGE_SIZE; | |
1035 | bv.bv_offset = 0; | |
1036 | ||
1037 | err = zram_bvec_rw(zram, &bv, index, offset, rw); | |
08eee69f MK |
1038 | put_zram: |
1039 | zram_meta_put(zram); | |
1040 | out: | |
8c7f0102 | 1041 | /* |
1042 | * If I/O fails, just return error(ie, non-zero) without | |
1043 | * calling page_endio. | |
1044 | * It causes resubmit the I/O with bio request by upper functions | |
1045 | * of rw_page(e.g., swap_readpage, __swap_writepage) and | |
1046 | * bio->bi_end_io does things to handle the error | |
1047 | * (e.g., SetPageError, set_page_dirty and extra works). | |
1048 | */ | |
1049 | if (err == 0) | |
1050 | page_endio(page, rw, 0); | |
1051 | return err; | |
1052 | } | |
1053 | ||
f1e3cfff | 1054 | static const struct block_device_operations zram_devops = { |
f1e3cfff | 1055 | .swap_slot_free_notify = zram_slot_free_notify, |
8c7f0102 | 1056 | .rw_page = zram_rw_page, |
107c161b | 1057 | .owner = THIS_MODULE |
306b0c95 NG |
1058 | }; |
1059 | ||
99ebbd30 | 1060 | static DEVICE_ATTR_WO(compact); |
083914ea GM |
1061 | static DEVICE_ATTR_RW(disksize); |
1062 | static DEVICE_ATTR_RO(initstate); | |
1063 | static DEVICE_ATTR_WO(reset); | |
1064 | static DEVICE_ATTR_RO(orig_data_size); | |
1065 | static DEVICE_ATTR_RO(mem_used_total); | |
1066 | static DEVICE_ATTR_RW(mem_limit); | |
1067 | static DEVICE_ATTR_RW(mem_used_max); | |
1068 | static DEVICE_ATTR_RW(max_comp_streams); | |
1069 | static DEVICE_ATTR_RW(comp_algorithm); | |
9b3bb7ab | 1070 | |
2f6a3bed SS |
1071 | static ssize_t io_stat_show(struct device *dev, |
1072 | struct device_attribute *attr, char *buf) | |
1073 | { | |
1074 | struct zram *zram = dev_to_zram(dev); | |
1075 | ssize_t ret; | |
1076 | ||
1077 | down_read(&zram->init_lock); | |
1078 | ret = scnprintf(buf, PAGE_SIZE, | |
1079 | "%8llu %8llu %8llu %8llu\n", | |
1080 | (u64)atomic64_read(&zram->stats.failed_reads), | |
1081 | (u64)atomic64_read(&zram->stats.failed_writes), | |
1082 | (u64)atomic64_read(&zram->stats.invalid_io), | |
1083 | (u64)atomic64_read(&zram->stats.notify_free)); | |
1084 | up_read(&zram->init_lock); | |
1085 | ||
1086 | return ret; | |
1087 | } | |
1088 | ||
4f2109f6 SS |
1089 | static ssize_t mm_stat_show(struct device *dev, |
1090 | struct device_attribute *attr, char *buf) | |
1091 | { | |
1092 | struct zram *zram = dev_to_zram(dev); | |
1093 | u64 orig_size, mem_used = 0; | |
1094 | long max_used; | |
1095 | ssize_t ret; | |
1096 | ||
1097 | down_read(&zram->init_lock); | |
1098 | if (init_done(zram)) | |
1099 | mem_used = zs_get_total_pages(zram->meta->mem_pool); | |
1100 | ||
1101 | orig_size = atomic64_read(&zram->stats.pages_stored); | |
1102 | max_used = atomic_long_read(&zram->stats.max_used_pages); | |
1103 | ||
1104 | ret = scnprintf(buf, PAGE_SIZE, | |
1105 | "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", | |
1106 | orig_size << PAGE_SHIFT, | |
1107 | (u64)atomic64_read(&zram->stats.compr_data_size), | |
1108 | mem_used << PAGE_SHIFT, | |
1109 | zram->limit_pages << PAGE_SHIFT, | |
1110 | max_used << PAGE_SHIFT, | |
1111 | (u64)atomic64_read(&zram->stats.zero_pages), | |
1112 | (u64)atomic64_read(&zram->stats.num_migrated)); | |
1113 | up_read(&zram->init_lock); | |
1114 | ||
1115 | return ret; | |
1116 | } | |
1117 | ||
2f6a3bed | 1118 | static DEVICE_ATTR_RO(io_stat); |
4f2109f6 | 1119 | static DEVICE_ATTR_RO(mm_stat); |
a68eb3b6 SS |
1120 | ZRAM_ATTR_RO(num_reads); |
1121 | ZRAM_ATTR_RO(num_writes); | |
64447249 SS |
1122 | ZRAM_ATTR_RO(failed_reads); |
1123 | ZRAM_ATTR_RO(failed_writes); | |
a68eb3b6 SS |
1124 | ZRAM_ATTR_RO(invalid_io); |
1125 | ZRAM_ATTR_RO(notify_free); | |
1126 | ZRAM_ATTR_RO(zero_pages); | |
1127 | ZRAM_ATTR_RO(compr_data_size); | |
1128 | ||
9b3bb7ab SS |
1129 | static struct attribute *zram_disk_attrs[] = { |
1130 | &dev_attr_disksize.attr, | |
1131 | &dev_attr_initstate.attr, | |
1132 | &dev_attr_reset.attr, | |
1133 | &dev_attr_num_reads.attr, | |
1134 | &dev_attr_num_writes.attr, | |
64447249 SS |
1135 | &dev_attr_failed_reads.attr, |
1136 | &dev_attr_failed_writes.attr, | |
99ebbd30 | 1137 | &dev_attr_compact.attr, |
9b3bb7ab SS |
1138 | &dev_attr_invalid_io.attr, |
1139 | &dev_attr_notify_free.attr, | |
1140 | &dev_attr_zero_pages.attr, | |
1141 | &dev_attr_orig_data_size.attr, | |
1142 | &dev_attr_compr_data_size.attr, | |
1143 | &dev_attr_mem_used_total.attr, | |
9ada9da9 | 1144 | &dev_attr_mem_limit.attr, |
461a8eee | 1145 | &dev_attr_mem_used_max.attr, |
beca3ec7 | 1146 | &dev_attr_max_comp_streams.attr, |
e46b8a03 | 1147 | &dev_attr_comp_algorithm.attr, |
2f6a3bed | 1148 | &dev_attr_io_stat.attr, |
4f2109f6 | 1149 | &dev_attr_mm_stat.attr, |
9b3bb7ab SS |
1150 | NULL, |
1151 | }; | |
1152 | ||
1153 | static struct attribute_group zram_disk_attr_group = { | |
1154 | .attrs = zram_disk_attrs, | |
1155 | }; | |
1156 | ||
f1e3cfff | 1157 | static int create_device(struct zram *zram, int device_id) |
306b0c95 | 1158 | { |
ee980160 | 1159 | struct request_queue *queue; |
39a9b8ac | 1160 | int ret = -ENOMEM; |
de1a21a0 | 1161 | |
0900beae | 1162 | init_rwsem(&zram->init_lock); |
306b0c95 | 1163 | |
ee980160 SS |
1164 | queue = blk_alloc_queue(GFP_KERNEL); |
1165 | if (!queue) { | |
306b0c95 NG |
1166 | pr_err("Error allocating disk queue for device %d\n", |
1167 | device_id); | |
de1a21a0 | 1168 | goto out; |
306b0c95 NG |
1169 | } |
1170 | ||
ee980160 | 1171 | blk_queue_make_request(queue, zram_make_request); |
306b0c95 NG |
1172 | |
1173 | /* gendisk structure */ | |
f1e3cfff NG |
1174 | zram->disk = alloc_disk(1); |
1175 | if (!zram->disk) { | |
94b8435f | 1176 | pr_warn("Error allocating disk structure for device %d\n", |
306b0c95 | 1177 | device_id); |
201c7b72 | 1178 | ret = -ENOMEM; |
39a9b8ac | 1179 | goto out_free_queue; |
306b0c95 NG |
1180 | } |
1181 | ||
f1e3cfff NG |
1182 | zram->disk->major = zram_major; |
1183 | zram->disk->first_minor = device_id; | |
1184 | zram->disk->fops = &zram_devops; | |
ee980160 SS |
1185 | zram->disk->queue = queue; |
1186 | zram->disk->queue->queuedata = zram; | |
f1e3cfff NG |
1187 | zram->disk->private_data = zram; |
1188 | snprintf(zram->disk->disk_name, 16, "zram%d", device_id); | |
306b0c95 | 1189 | |
33863c21 | 1190 | /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */ |
f1e3cfff | 1191 | set_capacity(zram->disk, 0); |
b67d1ec1 SS |
1192 | /* zram devices sort of resembles non-rotational disks */ |
1193 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); | |
b277da0a | 1194 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); |
a1dd52af NG |
1195 | /* |
1196 | * To ensure that we always get PAGE_SIZE aligned | |
1197 | * and n*PAGE_SIZED sized I/O requests. | |
1198 | */ | |
f1e3cfff | 1199 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
7b19b8d4 RJ |
1200 | blk_queue_logical_block_size(zram->disk->queue, |
1201 | ZRAM_LOGICAL_BLOCK_SIZE); | |
f1e3cfff NG |
1202 | blk_queue_io_min(zram->disk->queue, PAGE_SIZE); |
1203 | blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); | |
f4659d8e JK |
1204 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
1205 | zram->disk->queue->limits.max_discard_sectors = UINT_MAX; | |
1206 | /* | |
1207 | * zram_bio_discard() will clear all logical blocks if logical block | |
1208 | * size is identical with physical block size(PAGE_SIZE). But if it is | |
1209 | * different, we will skip discarding some parts of logical blocks in | |
1210 | * the part of the request range which isn't aligned to physical block | |
1211 | * size. So we can't ensure that all discarded logical blocks are | |
1212 | * zeroed. | |
1213 | */ | |
1214 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) | |
1215 | zram->disk->queue->limits.discard_zeroes_data = 1; | |
1216 | else | |
1217 | zram->disk->queue->limits.discard_zeroes_data = 0; | |
1218 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); | |
5d83d5a0 | 1219 | |
f1e3cfff | 1220 | add_disk(zram->disk); |
306b0c95 | 1221 | |
33863c21 NG |
1222 | ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, |
1223 | &zram_disk_attr_group); | |
1224 | if (ret < 0) { | |
94b8435f | 1225 | pr_warn("Error creating sysfs group"); |
39a9b8ac | 1226 | goto out_free_disk; |
33863c21 | 1227 | } |
e46b8a03 | 1228 | strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |
be2d1d56 | 1229 | zram->meta = NULL; |
beca3ec7 | 1230 | zram->max_comp_streams = 1; |
39a9b8ac | 1231 | return 0; |
de1a21a0 | 1232 | |
39a9b8ac JL |
1233 | out_free_disk: |
1234 | del_gendisk(zram->disk); | |
1235 | put_disk(zram->disk); | |
1236 | out_free_queue: | |
ee980160 | 1237 | blk_cleanup_queue(queue); |
de1a21a0 NG |
1238 | out: |
1239 | return ret; | |
306b0c95 NG |
1240 | } |
1241 | ||
a096cafc | 1242 | static void destroy_devices(unsigned int nr) |
306b0c95 | 1243 | { |
a096cafc SS |
1244 | struct zram *zram; |
1245 | unsigned int i; | |
33863c21 | 1246 | |
a096cafc SS |
1247 | for (i = 0; i < nr; i++) { |
1248 | zram = &zram_devices[i]; | |
1249 | /* | |
1250 | * Remove sysfs first, so no one will perform a disksize | |
1251 | * store while we destroy the devices | |
1252 | */ | |
1253 | sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, | |
1254 | &zram_disk_attr_group); | |
306b0c95 | 1255 | |
a096cafc SS |
1256 | zram_reset_device(zram); |
1257 | ||
ee980160 | 1258 | blk_cleanup_queue(zram->disk->queue); |
a096cafc SS |
1259 | del_gendisk(zram->disk); |
1260 | put_disk(zram->disk); | |
a096cafc SS |
1261 | } |
1262 | ||
1263 | kfree(zram_devices); | |
1264 | unregister_blkdev(zram_major, "zram"); | |
1265 | pr_info("Destroyed %u device(s)\n", nr); | |
306b0c95 NG |
1266 | } |
1267 | ||
f1e3cfff | 1268 | static int __init zram_init(void) |
306b0c95 | 1269 | { |
de1a21a0 | 1270 | int ret, dev_id; |
306b0c95 | 1271 | |
5fa5a901 | 1272 | if (num_devices > max_num_devices) { |
94b8435f | 1273 | pr_warn("Invalid value for num_devices: %u\n", |
5fa5a901 | 1274 | num_devices); |
a096cafc | 1275 | return -EINVAL; |
306b0c95 NG |
1276 | } |
1277 | ||
f1e3cfff NG |
1278 | zram_major = register_blkdev(0, "zram"); |
1279 | if (zram_major <= 0) { | |
94b8435f | 1280 | pr_warn("Unable to get major number\n"); |
a096cafc | 1281 | return -EBUSY; |
306b0c95 NG |
1282 | } |
1283 | ||
306b0c95 | 1284 | /* Allocate the device array and initialize each one */ |
5fa5a901 | 1285 | zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL); |
43801f6e | 1286 | if (!zram_devices) { |
a096cafc SS |
1287 | unregister_blkdev(zram_major, "zram"); |
1288 | return -ENOMEM; | |
de1a21a0 | 1289 | } |
306b0c95 | 1290 | |
5fa5a901 | 1291 | for (dev_id = 0; dev_id < num_devices; dev_id++) { |
43801f6e | 1292 | ret = create_device(&zram_devices[dev_id], dev_id); |
de1a21a0 | 1293 | if (ret) |
a096cafc | 1294 | goto out_error; |
de1a21a0 NG |
1295 | } |
1296 | ||
a096cafc | 1297 | pr_info("Created %u device(s)\n", num_devices); |
306b0c95 | 1298 | return 0; |
de1a21a0 | 1299 | |
a096cafc SS |
1300 | out_error: |
1301 | destroy_devices(dev_id); | |
306b0c95 NG |
1302 | return ret; |
1303 | } | |
1304 | ||
f1e3cfff | 1305 | static void __exit zram_exit(void) |
306b0c95 | 1306 | { |
a096cafc | 1307 | destroy_devices(num_devices); |
306b0c95 NG |
1308 | } |
1309 | ||
f1e3cfff NG |
1310 | module_init(zram_init); |
1311 | module_exit(zram_exit); | |
306b0c95 | 1312 | |
9b3bb7ab SS |
1313 | module_param(num_devices, uint, 0); |
1314 | MODULE_PARM_DESC(num_devices, "Number of zram devices"); | |
1315 | ||
306b0c95 NG |
1316 | MODULE_LICENSE("Dual BSD/GPL"); |
1317 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); | |
f1e3cfff | 1318 | MODULE_DESCRIPTION("Compressed RAM Block Device"); |