]>
Commit | Line | Data |
---|---|---|
61989a80 NG |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
31fc00bb | 5 | * Copyright (C) 2012, 2013 Minchan Kim |
61989a80 NG |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL | |
8 | * You can choose the license that better fits your requirements. | |
9 | * | |
10 | * Released under the terms of 3-clause BSD License | |
11 | * Released under the terms of GNU General Public License Version 2.0 | |
12 | */ | |
13 | ||
2db51dae | 14 | /* |
2db51dae NG |
15 | * Following is how we use various fields and flags of underlying |
16 | * struct page(s) to form a zspage. | |
17 | * | |
18 | * Usage of struct page fields: | |
19 | * page->first_page: points to the first component (0-order) page | |
20 | * page->index (union with page->freelist): offset of the first object | |
21 | * starting in this page. For the first page, this is | |
22 | * always 0, so we use this field (aka freelist) to point | |
23 | * to the first free object in zspage. | |
24 | * page->lru: links together all component pages (except the first page) | |
25 | * of a zspage | |
26 | * | |
27 | * For _first_ page only: | |
28 | * | |
29 | * page->private (union with page->first_page): refers to the | |
30 | * component page after the first page | |
7b60a685 MK |
31 | * If the page is first_page for huge object, it stores handle. |
32 | * Look at size_class->huge. | |
2db51dae NG |
33 | * page->freelist: points to the first free object in zspage. |
34 | * Free objects are linked together using in-place | |
35 | * metadata. | |
36 | * page->objects: maximum number of objects we can store in this | |
37 | * zspage (class->zspage_order * PAGE_SIZE / class->size) | |
38 | * page->lru: links together first pages of various zspages. | |
39 | * Basically forming list of zspages in a fullness group. | |
40 | * page->mapping: class index and fullness group of the zspage | |
41 | * | |
42 | * Usage of struct page flags: | |
43 | * PG_private: identifies the first component page | |
44 | * PG_private2: identifies the last component page | |
45 | * | |
46 | */ | |
47 | ||
61989a80 NG |
48 | #ifdef CONFIG_ZSMALLOC_DEBUG |
49 | #define DEBUG | |
50 | #endif | |
51 | ||
52 | #include <linux/module.h> | |
53 | #include <linux/kernel.h> | |
312fcae2 | 54 | #include <linux/sched.h> |
61989a80 NG |
55 | #include <linux/bitops.h> |
56 | #include <linux/errno.h> | |
57 | #include <linux/highmem.h> | |
61989a80 NG |
58 | #include <linux/string.h> |
59 | #include <linux/slab.h> | |
60 | #include <asm/tlbflush.h> | |
61 | #include <asm/pgtable.h> | |
62 | #include <linux/cpumask.h> | |
63 | #include <linux/cpu.h> | |
0cbb613f | 64 | #include <linux/vmalloc.h> |
c60369f0 | 65 | #include <linux/hardirq.h> |
0959c63f SJ |
66 | #include <linux/spinlock.h> |
67 | #include <linux/types.h> | |
0f050d99 | 68 | #include <linux/debugfs.h> |
bcf1647d | 69 | #include <linux/zsmalloc.h> |
c795779d | 70 | #include <linux/zpool.h> |
0959c63f SJ |
71 | |
72 | /* | |
73 | * This must be power of 2 and greater than of equal to sizeof(link_free). | |
74 | * These two conditions ensure that any 'struct link_free' itself doesn't | |
75 | * span more than 1 page which avoids complex case of mapping 2 pages simply | |
76 | * to restore link_free pointer values. | |
77 | */ | |
78 | #define ZS_ALIGN 8 | |
79 | ||
80 | /* | |
81 | * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single) | |
82 | * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N. | |
83 | */ | |
84 | #define ZS_MAX_ZSPAGE_ORDER 2 | |
85 | #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) | |
86 | ||
2e40e163 MK |
87 | #define ZS_HANDLE_SIZE (sizeof(unsigned long)) |
88 | ||
0959c63f SJ |
89 | /* |
90 | * Object location (<PFN>, <obj_idx>) is encoded as | |
c3e3e88a | 91 | * as single (unsigned long) handle value. |
0959c63f SJ |
92 | * |
93 | * Note that object index <obj_idx> is relative to system | |
94 | * page <PFN> it is stored in, so for each sub-page belonging | |
95 | * to a zspage, obj_idx starts with 0. | |
96 | * | |
97 | * This is made more complicated by various memory models and PAE. | |
98 | */ | |
99 | ||
100 | #ifndef MAX_PHYSMEM_BITS | |
101 | #ifdef CONFIG_HIGHMEM64G | |
102 | #define MAX_PHYSMEM_BITS 36 | |
103 | #else /* !CONFIG_HIGHMEM64G */ | |
104 | /* | |
105 | * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just | |
106 | * be PAGE_SHIFT | |
107 | */ | |
108 | #define MAX_PHYSMEM_BITS BITS_PER_LONG | |
109 | #endif | |
110 | #endif | |
111 | #define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) | |
312fcae2 MK |
112 | |
113 | /* | |
114 | * Memory for allocating for handle keeps object position by | |
115 | * encoding <page, obj_idx> and the encoded value has a room | |
116 | * in least bit(ie, look at obj_to_location). | |
117 | * We use the bit to synchronize between object access by | |
118 | * user and migration. | |
119 | */ | |
120 | #define HANDLE_PIN_BIT 0 | |
121 | ||
122 | /* | |
123 | * Head in allocated object should have OBJ_ALLOCATED_TAG | |
124 | * to identify the object was allocated or not. | |
125 | * It's okay to add the status bit in the least bit because | |
126 | * header keeps handle which is 4byte-aligned address so we | |
127 | * have room for two bit at least. | |
128 | */ | |
129 | #define OBJ_ALLOCATED_TAG 1 | |
130 | #define OBJ_TAG_BITS 1 | |
131 | #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) | |
0959c63f SJ |
132 | #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) |
133 | ||
134 | #define MAX(a, b) ((a) >= (b) ? (a) : (b)) | |
135 | /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */ | |
136 | #define ZS_MIN_ALLOC_SIZE \ | |
137 | MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS)) | |
2e40e163 | 138 | /* each chunk includes extra space to keep handle */ |
7b60a685 | 139 | #define ZS_MAX_ALLOC_SIZE PAGE_SIZE |
0959c63f SJ |
140 | |
141 | /* | |
7eb52512 | 142 | * On systems with 4K page size, this gives 255 size classes! There is a |
0959c63f SJ |
143 | * trader-off here: |
144 | * - Large number of size classes is potentially wasteful as free page are | |
145 | * spread across these classes | |
146 | * - Small number of size classes causes large internal fragmentation | |
147 | * - Probably its better to use specific size classes (empirically | |
148 | * determined). NOTE: all those class sizes must be set as multiple of | |
149 | * ZS_ALIGN to make sure link_free itself never has to span 2 pages. | |
150 | * | |
151 | * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN | |
152 | * (reason above) | |
153 | */ | |
d662b8eb | 154 | #define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) |
0959c63f SJ |
155 | |
156 | /* | |
157 | * We do not maintain any list for completely empty or full pages | |
158 | */ | |
159 | enum fullness_group { | |
160 | ZS_ALMOST_FULL, | |
161 | ZS_ALMOST_EMPTY, | |
162 | _ZS_NR_FULLNESS_GROUPS, | |
163 | ||
164 | ZS_EMPTY, | |
165 | ZS_FULL | |
166 | }; | |
167 | ||
0f050d99 GM |
168 | enum zs_stat_type { |
169 | OBJ_ALLOCATED, | |
170 | OBJ_USED, | |
248ca1b0 MK |
171 | CLASS_ALMOST_FULL, |
172 | CLASS_ALMOST_EMPTY, | |
0f050d99 GM |
173 | NR_ZS_STAT_TYPE, |
174 | }; | |
175 | ||
176 | #ifdef CONFIG_ZSMALLOC_STAT | |
177 | ||
178 | static struct dentry *zs_stat_root; | |
179 | ||
180 | struct zs_size_stat { | |
181 | unsigned long objs[NR_ZS_STAT_TYPE]; | |
182 | }; | |
183 | ||
184 | #endif | |
185 | ||
40f9fb8c MG |
186 | /* |
187 | * number of size_classes | |
188 | */ | |
189 | static int zs_size_classes; | |
190 | ||
0959c63f SJ |
191 | /* |
192 | * We assign a page to ZS_ALMOST_EMPTY fullness group when: | |
193 | * n <= N / f, where | |
194 | * n = number of allocated objects | |
195 | * N = total number of objects zspage can store | |
6dd9737e | 196 | * f = fullness_threshold_frac |
0959c63f SJ |
197 | * |
198 | * Similarly, we assign zspage to: | |
199 | * ZS_ALMOST_FULL when n > N / f | |
200 | * ZS_EMPTY when n == 0 | |
201 | * ZS_FULL when n == N | |
202 | * | |
203 | * (see: fix_fullness_group()) | |
204 | */ | |
205 | static const int fullness_threshold_frac = 4; | |
206 | ||
207 | struct size_class { | |
208 | /* | |
209 | * Size of objects stored in this class. Must be multiple | |
210 | * of ZS_ALIGN. | |
211 | */ | |
212 | int size; | |
213 | unsigned int index; | |
214 | ||
215 | /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ | |
216 | int pages_per_zspage; | |
7b60a685 MK |
217 | /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ |
218 | bool huge; | |
0959c63f | 219 | |
0f050d99 GM |
220 | #ifdef CONFIG_ZSMALLOC_STAT |
221 | struct zs_size_stat stats; | |
222 | #endif | |
223 | ||
0959c63f SJ |
224 | spinlock_t lock; |
225 | ||
0959c63f SJ |
226 | struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; |
227 | }; | |
228 | ||
229 | /* | |
230 | * Placed within free objects to form a singly linked list. | |
231 | * For every zspage, first_page->freelist gives head of this list. | |
232 | * | |
233 | * This must be power of 2 and less than or equal to ZS_ALIGN | |
234 | */ | |
235 | struct link_free { | |
2e40e163 MK |
236 | union { |
237 | /* | |
238 | * Position of next free chunk (encodes <PFN, obj_idx>) | |
239 | * It's valid for non-allocated object | |
240 | */ | |
241 | void *next; | |
242 | /* | |
243 | * Handle of allocated object. | |
244 | */ | |
245 | unsigned long handle; | |
246 | }; | |
0959c63f SJ |
247 | }; |
248 | ||
249 | struct zs_pool { | |
0f050d99 GM |
250 | char *name; |
251 | ||
40f9fb8c | 252 | struct size_class **size_class; |
2e40e163 | 253 | struct kmem_cache *handle_cachep; |
0959c63f SJ |
254 | |
255 | gfp_t flags; /* allocation flags used when growing pool */ | |
13de8933 | 256 | atomic_long_t pages_allocated; |
0f050d99 GM |
257 | |
258 | #ifdef CONFIG_ZSMALLOC_STAT | |
259 | struct dentry *stat_dentry; | |
260 | #endif | |
0959c63f | 261 | }; |
61989a80 NG |
262 | |
263 | /* | |
264 | * A zspage's class index and fullness group | |
265 | * are encoded in its (first)page->mapping | |
266 | */ | |
267 | #define CLASS_IDX_BITS 28 | |
268 | #define FULLNESS_BITS 4 | |
269 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
270 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
271 | ||
f553646a | 272 | struct mapping_area { |
1b945aee | 273 | #ifdef CONFIG_PGTABLE_MAPPING |
f553646a SJ |
274 | struct vm_struct *vm; /* vm area for mapping object that span pages */ |
275 | #else | |
276 | char *vm_buf; /* copy buffer for objects that span pages */ | |
277 | #endif | |
278 | char *vm_addr; /* address of kmap_atomic()'ed pages */ | |
279 | enum zs_mapmode vm_mm; /* mapping mode */ | |
7b60a685 | 280 | bool huge; |
f553646a SJ |
281 | }; |
282 | ||
2e40e163 MK |
283 | static int create_handle_cache(struct zs_pool *pool) |
284 | { | |
285 | pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, | |
286 | 0, 0, NULL); | |
287 | return pool->handle_cachep ? 0 : 1; | |
288 | } | |
289 | ||
290 | static void destroy_handle_cache(struct zs_pool *pool) | |
291 | { | |
292 | kmem_cache_destroy(pool->handle_cachep); | |
293 | } | |
294 | ||
295 | static unsigned long alloc_handle(struct zs_pool *pool) | |
296 | { | |
297 | return (unsigned long)kmem_cache_alloc(pool->handle_cachep, | |
298 | pool->flags & ~__GFP_HIGHMEM); | |
299 | } | |
300 | ||
301 | static void free_handle(struct zs_pool *pool, unsigned long handle) | |
302 | { | |
303 | kmem_cache_free(pool->handle_cachep, (void *)handle); | |
304 | } | |
305 | ||
306 | static void record_obj(unsigned long handle, unsigned long obj) | |
307 | { | |
308 | *(unsigned long *)handle = obj; | |
309 | } | |
310 | ||
c795779d DS |
311 | /* zpool driver */ |
312 | ||
313 | #ifdef CONFIG_ZPOOL | |
314 | ||
3eba0c6a | 315 | static void *zs_zpool_create(char *name, gfp_t gfp, struct zpool_ops *zpool_ops) |
c795779d | 316 | { |
3eba0c6a | 317 | return zs_create_pool(name, gfp); |
c795779d DS |
318 | } |
319 | ||
320 | static void zs_zpool_destroy(void *pool) | |
321 | { | |
322 | zs_destroy_pool(pool); | |
323 | } | |
324 | ||
325 | static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, | |
326 | unsigned long *handle) | |
327 | { | |
328 | *handle = zs_malloc(pool, size); | |
329 | return *handle ? 0 : -1; | |
330 | } | |
331 | static void zs_zpool_free(void *pool, unsigned long handle) | |
332 | { | |
333 | zs_free(pool, handle); | |
334 | } | |
335 | ||
336 | static int zs_zpool_shrink(void *pool, unsigned int pages, | |
337 | unsigned int *reclaimed) | |
338 | { | |
339 | return -EINVAL; | |
340 | } | |
341 | ||
342 | static void *zs_zpool_map(void *pool, unsigned long handle, | |
343 | enum zpool_mapmode mm) | |
344 | { | |
345 | enum zs_mapmode zs_mm; | |
346 | ||
347 | switch (mm) { | |
348 | case ZPOOL_MM_RO: | |
349 | zs_mm = ZS_MM_RO; | |
350 | break; | |
351 | case ZPOOL_MM_WO: | |
352 | zs_mm = ZS_MM_WO; | |
353 | break; | |
354 | case ZPOOL_MM_RW: /* fallthru */ | |
355 | default: | |
356 | zs_mm = ZS_MM_RW; | |
357 | break; | |
358 | } | |
359 | ||
360 | return zs_map_object(pool, handle, zs_mm); | |
361 | } | |
362 | static void zs_zpool_unmap(void *pool, unsigned long handle) | |
363 | { | |
364 | zs_unmap_object(pool, handle); | |
365 | } | |
366 | ||
367 | static u64 zs_zpool_total_size(void *pool) | |
368 | { | |
722cdc17 | 369 | return zs_get_total_pages(pool) << PAGE_SHIFT; |
c795779d DS |
370 | } |
371 | ||
372 | static struct zpool_driver zs_zpool_driver = { | |
373 | .type = "zsmalloc", | |
374 | .owner = THIS_MODULE, | |
375 | .create = zs_zpool_create, | |
376 | .destroy = zs_zpool_destroy, | |
377 | .malloc = zs_zpool_malloc, | |
378 | .free = zs_zpool_free, | |
379 | .shrink = zs_zpool_shrink, | |
380 | .map = zs_zpool_map, | |
381 | .unmap = zs_zpool_unmap, | |
382 | .total_size = zs_zpool_total_size, | |
383 | }; | |
384 | ||
137f8cff | 385 | MODULE_ALIAS("zpool-zsmalloc"); |
c795779d DS |
386 | #endif /* CONFIG_ZPOOL */ |
387 | ||
248ca1b0 MK |
388 | static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) |
389 | { | |
390 | return pages_per_zspage * PAGE_SIZE / size; | |
391 | } | |
392 | ||
61989a80 NG |
393 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
394 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
395 | ||
396 | static int is_first_page(struct page *page) | |
397 | { | |
a27545bf | 398 | return PagePrivate(page); |
61989a80 NG |
399 | } |
400 | ||
401 | static int is_last_page(struct page *page) | |
402 | { | |
a27545bf | 403 | return PagePrivate2(page); |
61989a80 NG |
404 | } |
405 | ||
406 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
407 | enum fullness_group *fullness) | |
408 | { | |
409 | unsigned long m; | |
410 | BUG_ON(!is_first_page(page)); | |
411 | ||
412 | m = (unsigned long)page->mapping; | |
413 | *fullness = m & FULLNESS_MASK; | |
414 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
415 | } | |
416 | ||
417 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
418 | enum fullness_group fullness) | |
419 | { | |
420 | unsigned long m; | |
421 | BUG_ON(!is_first_page(page)); | |
422 | ||
423 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
424 | (fullness & FULLNESS_MASK); | |
425 | page->mapping = (struct address_space *)m; | |
426 | } | |
427 | ||
c3e3e88a NC |
428 | /* |
429 | * zsmalloc divides the pool into various size classes where each | |
430 | * class maintains a list of zspages where each zspage is divided | |
431 | * into equal sized chunks. Each allocation falls into one of these | |
432 | * classes depending on its size. This function returns index of the | |
433 | * size class which has chunk size big enough to hold the give size. | |
434 | */ | |
61989a80 NG |
435 | static int get_size_class_index(int size) |
436 | { | |
437 | int idx = 0; | |
438 | ||
439 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
440 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
441 | ZS_SIZE_CLASS_DELTA); | |
442 | ||
7b60a685 | 443 | return min(zs_size_classes - 1, idx); |
61989a80 NG |
444 | } |
445 | ||
248ca1b0 MK |
446 | #ifdef CONFIG_ZSMALLOC_STAT |
447 | ||
448 | static inline void zs_stat_inc(struct size_class *class, | |
449 | enum zs_stat_type type, unsigned long cnt) | |
450 | { | |
451 | class->stats.objs[type] += cnt; | |
452 | } | |
453 | ||
454 | static inline void zs_stat_dec(struct size_class *class, | |
455 | enum zs_stat_type type, unsigned long cnt) | |
456 | { | |
457 | class->stats.objs[type] -= cnt; | |
458 | } | |
459 | ||
460 | static inline unsigned long zs_stat_get(struct size_class *class, | |
461 | enum zs_stat_type type) | |
462 | { | |
463 | return class->stats.objs[type]; | |
464 | } | |
465 | ||
466 | static int __init zs_stat_init(void) | |
467 | { | |
468 | if (!debugfs_initialized()) | |
469 | return -ENODEV; | |
470 | ||
471 | zs_stat_root = debugfs_create_dir("zsmalloc", NULL); | |
472 | if (!zs_stat_root) | |
473 | return -ENOMEM; | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
478 | static void __exit zs_stat_exit(void) | |
479 | { | |
480 | debugfs_remove_recursive(zs_stat_root); | |
481 | } | |
482 | ||
483 | static int zs_stats_size_show(struct seq_file *s, void *v) | |
484 | { | |
485 | int i; | |
486 | struct zs_pool *pool = s->private; | |
487 | struct size_class *class; | |
488 | int objs_per_zspage; | |
489 | unsigned long class_almost_full, class_almost_empty; | |
490 | unsigned long obj_allocated, obj_used, pages_used; | |
491 | unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; | |
492 | unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; | |
493 | ||
494 | seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n", | |
495 | "class", "size", "almost_full", "almost_empty", | |
496 | "obj_allocated", "obj_used", "pages_used", | |
497 | "pages_per_zspage"); | |
498 | ||
499 | for (i = 0; i < zs_size_classes; i++) { | |
500 | class = pool->size_class[i]; | |
501 | ||
502 | if (class->index != i) | |
503 | continue; | |
504 | ||
505 | spin_lock(&class->lock); | |
506 | class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); | |
507 | class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); | |
508 | obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); | |
509 | obj_used = zs_stat_get(class, OBJ_USED); | |
510 | spin_unlock(&class->lock); | |
511 | ||
512 | objs_per_zspage = get_maxobj_per_zspage(class->size, | |
513 | class->pages_per_zspage); | |
514 | pages_used = obj_allocated / objs_per_zspage * | |
515 | class->pages_per_zspage; | |
516 | ||
517 | seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n", | |
518 | i, class->size, class_almost_full, class_almost_empty, | |
519 | obj_allocated, obj_used, pages_used, | |
520 | class->pages_per_zspage); | |
521 | ||
522 | total_class_almost_full += class_almost_full; | |
523 | total_class_almost_empty += class_almost_empty; | |
524 | total_objs += obj_allocated; | |
525 | total_used_objs += obj_used; | |
526 | total_pages += pages_used; | |
527 | } | |
528 | ||
529 | seq_puts(s, "\n"); | |
530 | seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n", | |
531 | "Total", "", total_class_almost_full, | |
532 | total_class_almost_empty, total_objs, | |
533 | total_used_objs, total_pages); | |
534 | ||
535 | return 0; | |
536 | } | |
537 | ||
538 | static int zs_stats_size_open(struct inode *inode, struct file *file) | |
539 | { | |
540 | return single_open(file, zs_stats_size_show, inode->i_private); | |
541 | } | |
542 | ||
543 | static const struct file_operations zs_stat_size_ops = { | |
544 | .open = zs_stats_size_open, | |
545 | .read = seq_read, | |
546 | .llseek = seq_lseek, | |
547 | .release = single_release, | |
548 | }; | |
549 | ||
550 | static int zs_pool_stat_create(char *name, struct zs_pool *pool) | |
551 | { | |
552 | struct dentry *entry; | |
553 | ||
554 | if (!zs_stat_root) | |
555 | return -ENODEV; | |
556 | ||
557 | entry = debugfs_create_dir(name, zs_stat_root); | |
558 | if (!entry) { | |
559 | pr_warn("debugfs dir <%s> creation failed\n", name); | |
560 | return -ENOMEM; | |
561 | } | |
562 | pool->stat_dentry = entry; | |
563 | ||
564 | entry = debugfs_create_file("classes", S_IFREG | S_IRUGO, | |
565 | pool->stat_dentry, pool, &zs_stat_size_ops); | |
566 | if (!entry) { | |
567 | pr_warn("%s: debugfs file entry <%s> creation failed\n", | |
568 | name, "classes"); | |
569 | return -ENOMEM; | |
570 | } | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | static void zs_pool_stat_destroy(struct zs_pool *pool) | |
576 | { | |
577 | debugfs_remove_recursive(pool->stat_dentry); | |
578 | } | |
579 | ||
580 | #else /* CONFIG_ZSMALLOC_STAT */ | |
581 | ||
582 | static inline void zs_stat_inc(struct size_class *class, | |
583 | enum zs_stat_type type, unsigned long cnt) | |
584 | { | |
585 | } | |
586 | ||
587 | static inline void zs_stat_dec(struct size_class *class, | |
588 | enum zs_stat_type type, unsigned long cnt) | |
589 | { | |
590 | } | |
591 | ||
592 | static inline unsigned long zs_stat_get(struct size_class *class, | |
593 | enum zs_stat_type type) | |
594 | { | |
595 | return 0; | |
596 | } | |
597 | ||
598 | static int __init zs_stat_init(void) | |
599 | { | |
600 | return 0; | |
601 | } | |
602 | ||
603 | static void __exit zs_stat_exit(void) | |
604 | { | |
605 | } | |
606 | ||
607 | static inline int zs_pool_stat_create(char *name, struct zs_pool *pool) | |
608 | { | |
609 | return 0; | |
610 | } | |
611 | ||
612 | static inline void zs_pool_stat_destroy(struct zs_pool *pool) | |
613 | { | |
614 | } | |
615 | ||
616 | #endif | |
617 | ||
618 | ||
c3e3e88a NC |
619 | /* |
620 | * For each size class, zspages are divided into different groups | |
621 | * depending on how "full" they are. This was done so that we could | |
622 | * easily find empty or nearly empty zspages when we try to shrink | |
623 | * the pool (not yet implemented). This function returns fullness | |
624 | * status of the given page. | |
625 | */ | |
61989a80 NG |
626 | static enum fullness_group get_fullness_group(struct page *page) |
627 | { | |
628 | int inuse, max_objects; | |
629 | enum fullness_group fg; | |
630 | BUG_ON(!is_first_page(page)); | |
631 | ||
632 | inuse = page->inuse; | |
633 | max_objects = page->objects; | |
634 | ||
635 | if (inuse == 0) | |
636 | fg = ZS_EMPTY; | |
637 | else if (inuse == max_objects) | |
638 | fg = ZS_FULL; | |
d3d07c92 | 639 | else if (inuse <= 3 * max_objects / fullness_threshold_frac) |
61989a80 NG |
640 | fg = ZS_ALMOST_EMPTY; |
641 | else | |
642 | fg = ZS_ALMOST_FULL; | |
643 | ||
644 | return fg; | |
645 | } | |
646 | ||
c3e3e88a NC |
647 | /* |
648 | * Each size class maintains various freelists and zspages are assigned | |
649 | * to one of these freelists based on the number of live objects they | |
650 | * have. This functions inserts the given zspage into the freelist | |
651 | * identified by <class, fullness_group>. | |
652 | */ | |
61989a80 NG |
653 | static void insert_zspage(struct page *page, struct size_class *class, |
654 | enum fullness_group fullness) | |
655 | { | |
656 | struct page **head; | |
657 | ||
658 | BUG_ON(!is_first_page(page)); | |
659 | ||
660 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
661 | return; | |
662 | ||
663 | head = &class->fullness_list[fullness]; | |
664 | if (*head) | |
665 | list_add_tail(&page->lru, &(*head)->lru); | |
666 | ||
667 | *head = page; | |
248ca1b0 MK |
668 | zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? |
669 | CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); | |
61989a80 NG |
670 | } |
671 | ||
c3e3e88a NC |
672 | /* |
673 | * This function removes the given zspage from the freelist identified | |
674 | * by <class, fullness_group>. | |
675 | */ | |
61989a80 NG |
676 | static void remove_zspage(struct page *page, struct size_class *class, |
677 | enum fullness_group fullness) | |
678 | { | |
679 | struct page **head; | |
680 | ||
681 | BUG_ON(!is_first_page(page)); | |
682 | ||
683 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
684 | return; | |
685 | ||
686 | head = &class->fullness_list[fullness]; | |
687 | BUG_ON(!*head); | |
688 | if (list_empty(&(*head)->lru)) | |
689 | *head = NULL; | |
690 | else if (*head == page) | |
691 | *head = (struct page *)list_entry((*head)->lru.next, | |
692 | struct page, lru); | |
693 | ||
694 | list_del_init(&page->lru); | |
248ca1b0 MK |
695 | zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? |
696 | CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); | |
61989a80 NG |
697 | } |
698 | ||
c3e3e88a NC |
699 | /* |
700 | * Each size class maintains zspages in different fullness groups depending | |
701 | * on the number of live objects they contain. When allocating or freeing | |
702 | * objects, the fullness status of the page can change, say, from ALMOST_FULL | |
703 | * to ALMOST_EMPTY when freeing an object. This function checks if such | |
704 | * a status change has occurred for the given page and accordingly moves the | |
705 | * page from the freelist of the old fullness group to that of the new | |
706 | * fullness group. | |
707 | */ | |
c7806261 | 708 | static enum fullness_group fix_fullness_group(struct size_class *class, |
61989a80 NG |
709 | struct page *page) |
710 | { | |
711 | int class_idx; | |
61989a80 NG |
712 | enum fullness_group currfg, newfg; |
713 | ||
714 | BUG_ON(!is_first_page(page)); | |
715 | ||
716 | get_zspage_mapping(page, &class_idx, &currfg); | |
717 | newfg = get_fullness_group(page); | |
718 | if (newfg == currfg) | |
719 | goto out; | |
720 | ||
61989a80 NG |
721 | remove_zspage(page, class, currfg); |
722 | insert_zspage(page, class, newfg); | |
723 | set_zspage_mapping(page, class_idx, newfg); | |
724 | ||
725 | out: | |
726 | return newfg; | |
727 | } | |
728 | ||
729 | /* | |
730 | * We have to decide on how many pages to link together | |
731 | * to form a zspage for each size class. This is important | |
732 | * to reduce wastage due to unusable space left at end of | |
733 | * each zspage which is given as: | |
734 | * wastage = Zp - Zp % size_class | |
735 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
736 | * | |
737 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
738 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
739 | * since then we can perfectly fit in 8 such objects. | |
740 | */ | |
2e3b6154 | 741 | static int get_pages_per_zspage(int class_size) |
61989a80 NG |
742 | { |
743 | int i, max_usedpc = 0; | |
744 | /* zspage order which gives maximum used size per KB */ | |
745 | int max_usedpc_order = 1; | |
746 | ||
84d4faab | 747 | for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { |
61989a80 NG |
748 | int zspage_size; |
749 | int waste, usedpc; | |
750 | ||
751 | zspage_size = i * PAGE_SIZE; | |
752 | waste = zspage_size % class_size; | |
753 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
754 | ||
755 | if (usedpc > max_usedpc) { | |
756 | max_usedpc = usedpc; | |
757 | max_usedpc_order = i; | |
758 | } | |
759 | } | |
760 | ||
761 | return max_usedpc_order; | |
762 | } | |
763 | ||
764 | /* | |
765 | * A single 'zspage' is composed of many system pages which are | |
766 | * linked together using fields in struct page. This function finds | |
767 | * the first/head page, given any component page of a zspage. | |
768 | */ | |
769 | static struct page *get_first_page(struct page *page) | |
770 | { | |
771 | if (is_first_page(page)) | |
772 | return page; | |
773 | else | |
774 | return page->first_page; | |
775 | } | |
776 | ||
777 | static struct page *get_next_page(struct page *page) | |
778 | { | |
779 | struct page *next; | |
780 | ||
781 | if (is_last_page(page)) | |
782 | next = NULL; | |
783 | else if (is_first_page(page)) | |
e842b976 | 784 | next = (struct page *)page_private(page); |
61989a80 NG |
785 | else |
786 | next = list_entry(page->lru.next, struct page, lru); | |
787 | ||
788 | return next; | |
789 | } | |
790 | ||
67296874 OH |
791 | /* |
792 | * Encode <page, obj_idx> as a single handle value. | |
312fcae2 | 793 | * We use the least bit of handle for tagging. |
67296874 | 794 | */ |
312fcae2 | 795 | static void *location_to_obj(struct page *page, unsigned long obj_idx) |
61989a80 | 796 | { |
312fcae2 | 797 | unsigned long obj; |
61989a80 NG |
798 | |
799 | if (!page) { | |
800 | BUG_ON(obj_idx); | |
801 | return NULL; | |
802 | } | |
803 | ||
312fcae2 MK |
804 | obj = page_to_pfn(page) << OBJ_INDEX_BITS; |
805 | obj |= ((obj_idx) & OBJ_INDEX_MASK); | |
806 | obj <<= OBJ_TAG_BITS; | |
61989a80 | 807 | |
312fcae2 | 808 | return (void *)obj; |
61989a80 NG |
809 | } |
810 | ||
67296874 OH |
811 | /* |
812 | * Decode <page, obj_idx> pair from the given object handle. We adjust the | |
813 | * decoded obj_idx back to its original value since it was adjusted in | |
312fcae2 | 814 | * location_to_obj(). |
67296874 | 815 | */ |
312fcae2 | 816 | static void obj_to_location(unsigned long obj, struct page **page, |
61989a80 NG |
817 | unsigned long *obj_idx) |
818 | { | |
312fcae2 MK |
819 | obj >>= OBJ_TAG_BITS; |
820 | *page = pfn_to_page(obj >> OBJ_INDEX_BITS); | |
821 | *obj_idx = (obj & OBJ_INDEX_MASK); | |
61989a80 NG |
822 | } |
823 | ||
2e40e163 MK |
824 | static unsigned long handle_to_obj(unsigned long handle) |
825 | { | |
826 | return *(unsigned long *)handle; | |
827 | } | |
828 | ||
7b60a685 MK |
829 | static unsigned long obj_to_head(struct size_class *class, struct page *page, |
830 | void *obj) | |
312fcae2 | 831 | { |
7b60a685 MK |
832 | if (class->huge) { |
833 | VM_BUG_ON(!is_first_page(page)); | |
834 | return *(unsigned long *)page_private(page); | |
835 | } else | |
836 | return *(unsigned long *)obj; | |
312fcae2 MK |
837 | } |
838 | ||
61989a80 NG |
839 | static unsigned long obj_idx_to_offset(struct page *page, |
840 | unsigned long obj_idx, int class_size) | |
841 | { | |
842 | unsigned long off = 0; | |
843 | ||
844 | if (!is_first_page(page)) | |
845 | off = page->index; | |
846 | ||
847 | return off + obj_idx * class_size; | |
848 | } | |
849 | ||
312fcae2 MK |
850 | static inline int trypin_tag(unsigned long handle) |
851 | { | |
852 | unsigned long *ptr = (unsigned long *)handle; | |
853 | ||
854 | return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr); | |
855 | } | |
856 | ||
857 | static void pin_tag(unsigned long handle) | |
858 | { | |
859 | while (!trypin_tag(handle)); | |
860 | } | |
861 | ||
862 | static void unpin_tag(unsigned long handle) | |
863 | { | |
864 | unsigned long *ptr = (unsigned long *)handle; | |
865 | ||
866 | clear_bit_unlock(HANDLE_PIN_BIT, ptr); | |
867 | } | |
868 | ||
f4477e90 NG |
869 | static void reset_page(struct page *page) |
870 | { | |
871 | clear_bit(PG_private, &page->flags); | |
872 | clear_bit(PG_private_2, &page->flags); | |
873 | set_page_private(page, 0); | |
874 | page->mapping = NULL; | |
875 | page->freelist = NULL; | |
22b751c3 | 876 | page_mapcount_reset(page); |
f4477e90 NG |
877 | } |
878 | ||
61989a80 NG |
879 | static void free_zspage(struct page *first_page) |
880 | { | |
f4477e90 | 881 | struct page *nextp, *tmp, *head_extra; |
61989a80 NG |
882 | |
883 | BUG_ON(!is_first_page(first_page)); | |
884 | BUG_ON(first_page->inuse); | |
885 | ||
f4477e90 | 886 | head_extra = (struct page *)page_private(first_page); |
61989a80 | 887 | |
f4477e90 | 888 | reset_page(first_page); |
61989a80 NG |
889 | __free_page(first_page); |
890 | ||
891 | /* zspage with only 1 system page */ | |
f4477e90 | 892 | if (!head_extra) |
61989a80 NG |
893 | return; |
894 | ||
f4477e90 | 895 | list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { |
61989a80 | 896 | list_del(&nextp->lru); |
f4477e90 | 897 | reset_page(nextp); |
61989a80 NG |
898 | __free_page(nextp); |
899 | } | |
f4477e90 NG |
900 | reset_page(head_extra); |
901 | __free_page(head_extra); | |
61989a80 NG |
902 | } |
903 | ||
904 | /* Initialize a newly allocated zspage */ | |
905 | static void init_zspage(struct page *first_page, struct size_class *class) | |
906 | { | |
907 | unsigned long off = 0; | |
908 | struct page *page = first_page; | |
909 | ||
910 | BUG_ON(!is_first_page(first_page)); | |
911 | while (page) { | |
912 | struct page *next_page; | |
913 | struct link_free *link; | |
5538c562 | 914 | unsigned int i = 1; |
af4ee5e9 | 915 | void *vaddr; |
61989a80 NG |
916 | |
917 | /* | |
918 | * page->index stores offset of first object starting | |
919 | * in the page. For the first page, this is always 0, | |
920 | * so we use first_page->index (aka ->freelist) to store | |
921 | * head of corresponding zspage's freelist. | |
922 | */ | |
923 | if (page != first_page) | |
924 | page->index = off; | |
925 | ||
af4ee5e9 MK |
926 | vaddr = kmap_atomic(page); |
927 | link = (struct link_free *)vaddr + off / sizeof(*link); | |
5538c562 DS |
928 | |
929 | while ((off += class->size) < PAGE_SIZE) { | |
312fcae2 | 930 | link->next = location_to_obj(page, i++); |
5538c562 | 931 | link += class->size / sizeof(*link); |
61989a80 NG |
932 | } |
933 | ||
934 | /* | |
935 | * We now come to the last (full or partial) object on this | |
936 | * page, which must point to the first object on the next | |
937 | * page (if present) | |
938 | */ | |
939 | next_page = get_next_page(page); | |
312fcae2 | 940 | link->next = location_to_obj(next_page, 0); |
af4ee5e9 | 941 | kunmap_atomic(vaddr); |
61989a80 | 942 | page = next_page; |
5538c562 | 943 | off %= PAGE_SIZE; |
61989a80 NG |
944 | } |
945 | } | |
946 | ||
947 | /* | |
948 | * Allocate a zspage for the given size class | |
949 | */ | |
950 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
951 | { | |
952 | int i, error; | |
b4b700c5 | 953 | struct page *first_page = NULL, *uninitialized_var(prev_page); |
61989a80 NG |
954 | |
955 | /* | |
956 | * Allocate individual pages and link them together as: | |
957 | * 1. first page->private = first sub-page | |
958 | * 2. all sub-pages are linked together using page->lru | |
959 | * 3. each sub-page is linked to the first page using page->first_page | |
960 | * | |
961 | * For each size class, First/Head pages are linked together using | |
962 | * page->lru. Also, we set PG_private to identify the first page | |
963 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
964 | * identify the last page. | |
965 | */ | |
966 | error = -ENOMEM; | |
2e3b6154 | 967 | for (i = 0; i < class->pages_per_zspage; i++) { |
b4b700c5 | 968 | struct page *page; |
61989a80 NG |
969 | |
970 | page = alloc_page(flags); | |
971 | if (!page) | |
972 | goto cleanup; | |
973 | ||
974 | INIT_LIST_HEAD(&page->lru); | |
975 | if (i == 0) { /* first page */ | |
a27545bf | 976 | SetPagePrivate(page); |
61989a80 NG |
977 | set_page_private(page, 0); |
978 | first_page = page; | |
979 | first_page->inuse = 0; | |
980 | } | |
981 | if (i == 1) | |
e842b976 | 982 | set_page_private(first_page, (unsigned long)page); |
61989a80 NG |
983 | if (i >= 1) |
984 | page->first_page = first_page; | |
985 | if (i >= 2) | |
986 | list_add(&page->lru, &prev_page->lru); | |
2e3b6154 | 987 | if (i == class->pages_per_zspage - 1) /* last page */ |
a27545bf | 988 | SetPagePrivate2(page); |
61989a80 NG |
989 | prev_page = page; |
990 | } | |
991 | ||
992 | init_zspage(first_page, class); | |
993 | ||
312fcae2 | 994 | first_page->freelist = location_to_obj(first_page, 0); |
61989a80 | 995 | /* Maximum number of objects we can store in this zspage */ |
2e3b6154 | 996 | first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; |
61989a80 NG |
997 | |
998 | error = 0; /* Success */ | |
999 | ||
1000 | cleanup: | |
1001 | if (unlikely(error) && first_page) { | |
1002 | free_zspage(first_page); | |
1003 | first_page = NULL; | |
1004 | } | |
1005 | ||
1006 | return first_page; | |
1007 | } | |
1008 | ||
1009 | static struct page *find_get_zspage(struct size_class *class) | |
1010 | { | |
1011 | int i; | |
1012 | struct page *page; | |
1013 | ||
1014 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
1015 | page = class->fullness_list[i]; | |
1016 | if (page) | |
1017 | break; | |
1018 | } | |
1019 | ||
1020 | return page; | |
1021 | } | |
1022 | ||
1b945aee | 1023 | #ifdef CONFIG_PGTABLE_MAPPING |
f553646a SJ |
1024 | static inline int __zs_cpu_up(struct mapping_area *area) |
1025 | { | |
1026 | /* | |
1027 | * Make sure we don't leak memory if a cpu UP notification | |
1028 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
1029 | */ | |
1030 | if (area->vm) | |
1031 | return 0; | |
1032 | area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL); | |
1033 | if (!area->vm) | |
1034 | return -ENOMEM; | |
1035 | return 0; | |
1036 | } | |
1037 | ||
1038 | static inline void __zs_cpu_down(struct mapping_area *area) | |
1039 | { | |
1040 | if (area->vm) | |
1041 | free_vm_area(area->vm); | |
1042 | area->vm = NULL; | |
1043 | } | |
1044 | ||
1045 | static inline void *__zs_map_object(struct mapping_area *area, | |
1046 | struct page *pages[2], int off, int size) | |
1047 | { | |
f6f8ed47 | 1048 | BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages)); |
f553646a SJ |
1049 | area->vm_addr = area->vm->addr; |
1050 | return area->vm_addr + off; | |
1051 | } | |
1052 | ||
1053 | static inline void __zs_unmap_object(struct mapping_area *area, | |
1054 | struct page *pages[2], int off, int size) | |
1055 | { | |
1056 | unsigned long addr = (unsigned long)area->vm_addr; | |
f553646a | 1057 | |
d95abbbb | 1058 | unmap_kernel_range(addr, PAGE_SIZE * 2); |
f553646a SJ |
1059 | } |
1060 | ||
1b945aee | 1061 | #else /* CONFIG_PGTABLE_MAPPING */ |
f553646a SJ |
1062 | |
1063 | static inline int __zs_cpu_up(struct mapping_area *area) | |
1064 | { | |
1065 | /* | |
1066 | * Make sure we don't leak memory if a cpu UP notification | |
1067 | * and zs_init() race and both call zs_cpu_up() on the same cpu | |
1068 | */ | |
1069 | if (area->vm_buf) | |
1070 | return 0; | |
40f9fb8c | 1071 | area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); |
f553646a SJ |
1072 | if (!area->vm_buf) |
1073 | return -ENOMEM; | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | static inline void __zs_cpu_down(struct mapping_area *area) | |
1078 | { | |
40f9fb8c | 1079 | kfree(area->vm_buf); |
f553646a SJ |
1080 | area->vm_buf = NULL; |
1081 | } | |
1082 | ||
1083 | static void *__zs_map_object(struct mapping_area *area, | |
1084 | struct page *pages[2], int off, int size) | |
5f601902 | 1085 | { |
5f601902 SJ |
1086 | int sizes[2]; |
1087 | void *addr; | |
f553646a | 1088 | char *buf = area->vm_buf; |
5f601902 | 1089 | |
f553646a SJ |
1090 | /* disable page faults to match kmap_atomic() return conditions */ |
1091 | pagefault_disable(); | |
1092 | ||
1093 | /* no read fastpath */ | |
1094 | if (area->vm_mm == ZS_MM_WO) | |
1095 | goto out; | |
5f601902 SJ |
1096 | |
1097 | sizes[0] = PAGE_SIZE - off; | |
1098 | sizes[1] = size - sizes[0]; | |
1099 | ||
5f601902 SJ |
1100 | /* copy object to per-cpu buffer */ |
1101 | addr = kmap_atomic(pages[0]); | |
1102 | memcpy(buf, addr + off, sizes[0]); | |
1103 | kunmap_atomic(addr); | |
1104 | addr = kmap_atomic(pages[1]); | |
1105 | memcpy(buf + sizes[0], addr, sizes[1]); | |
1106 | kunmap_atomic(addr); | |
f553646a SJ |
1107 | out: |
1108 | return area->vm_buf; | |
5f601902 SJ |
1109 | } |
1110 | ||
f553646a SJ |
1111 | static void __zs_unmap_object(struct mapping_area *area, |
1112 | struct page *pages[2], int off, int size) | |
5f601902 | 1113 | { |
5f601902 SJ |
1114 | int sizes[2]; |
1115 | void *addr; | |
2e40e163 | 1116 | char *buf; |
5f601902 | 1117 | |
f553646a SJ |
1118 | /* no write fastpath */ |
1119 | if (area->vm_mm == ZS_MM_RO) | |
1120 | goto out; | |
5f601902 | 1121 | |
7b60a685 MK |
1122 | buf = area->vm_buf; |
1123 | if (!area->huge) { | |
1124 | buf = buf + ZS_HANDLE_SIZE; | |
1125 | size -= ZS_HANDLE_SIZE; | |
1126 | off += ZS_HANDLE_SIZE; | |
1127 | } | |
2e40e163 | 1128 | |
5f601902 SJ |
1129 | sizes[0] = PAGE_SIZE - off; |
1130 | sizes[1] = size - sizes[0]; | |
1131 | ||
1132 | /* copy per-cpu buffer to object */ | |
1133 | addr = kmap_atomic(pages[0]); | |
1134 | memcpy(addr + off, buf, sizes[0]); | |
1135 | kunmap_atomic(addr); | |
1136 | addr = kmap_atomic(pages[1]); | |
1137 | memcpy(addr, buf + sizes[0], sizes[1]); | |
1138 | kunmap_atomic(addr); | |
f553646a SJ |
1139 | |
1140 | out: | |
1141 | /* enable page faults to match kunmap_atomic() return conditions */ | |
1142 | pagefault_enable(); | |
5f601902 | 1143 | } |
61989a80 | 1144 | |
1b945aee | 1145 | #endif /* CONFIG_PGTABLE_MAPPING */ |
f553646a | 1146 | |
61989a80 NG |
1147 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, |
1148 | void *pcpu) | |
1149 | { | |
f553646a | 1150 | int ret, cpu = (long)pcpu; |
61989a80 NG |
1151 | struct mapping_area *area; |
1152 | ||
1153 | switch (action) { | |
1154 | case CPU_UP_PREPARE: | |
1155 | area = &per_cpu(zs_map_area, cpu); | |
f553646a SJ |
1156 | ret = __zs_cpu_up(area); |
1157 | if (ret) | |
1158 | return notifier_from_errno(ret); | |
61989a80 NG |
1159 | break; |
1160 | case CPU_DEAD: | |
1161 | case CPU_UP_CANCELED: | |
1162 | area = &per_cpu(zs_map_area, cpu); | |
f553646a | 1163 | __zs_cpu_down(area); |
61989a80 NG |
1164 | break; |
1165 | } | |
1166 | ||
1167 | return NOTIFY_OK; | |
1168 | } | |
1169 | ||
1170 | static struct notifier_block zs_cpu_nb = { | |
1171 | .notifier_call = zs_cpu_notifier | |
1172 | }; | |
1173 | ||
b1b00a5b | 1174 | static int zs_register_cpu_notifier(void) |
61989a80 | 1175 | { |
b1b00a5b | 1176 | int cpu, uninitialized_var(ret); |
61989a80 | 1177 | |
f0e71fcd SB |
1178 | cpu_notifier_register_begin(); |
1179 | ||
1180 | __register_cpu_notifier(&zs_cpu_nb); | |
61989a80 NG |
1181 | for_each_online_cpu(cpu) { |
1182 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
b1b00a5b SS |
1183 | if (notifier_to_errno(ret)) |
1184 | break; | |
61989a80 | 1185 | } |
f0e71fcd SB |
1186 | |
1187 | cpu_notifier_register_done(); | |
b1b00a5b SS |
1188 | return notifier_to_errno(ret); |
1189 | } | |
f0e71fcd | 1190 | |
66cdef66 | 1191 | static void zs_unregister_cpu_notifier(void) |
40f9fb8c | 1192 | { |
66cdef66 | 1193 | int cpu; |
40f9fb8c | 1194 | |
66cdef66 | 1195 | cpu_notifier_register_begin(); |
40f9fb8c | 1196 | |
66cdef66 GM |
1197 | for_each_online_cpu(cpu) |
1198 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
1199 | __unregister_cpu_notifier(&zs_cpu_nb); | |
40f9fb8c | 1200 | |
66cdef66 | 1201 | cpu_notifier_register_done(); |
b1b00a5b SS |
1202 | } |
1203 | ||
66cdef66 | 1204 | static void init_zs_size_classes(void) |
b1b00a5b | 1205 | { |
66cdef66 | 1206 | int nr; |
c795779d | 1207 | |
66cdef66 GM |
1208 | nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; |
1209 | if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) | |
1210 | nr += 1; | |
40f9fb8c | 1211 | |
66cdef66 | 1212 | zs_size_classes = nr; |
61989a80 NG |
1213 | } |
1214 | ||
9eec4cd5 JK |
1215 | static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) |
1216 | { | |
1217 | if (prev->pages_per_zspage != pages_per_zspage) | |
1218 | return false; | |
1219 | ||
1220 | if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage) | |
1221 | != get_maxobj_per_zspage(size, pages_per_zspage)) | |
1222 | return false; | |
1223 | ||
1224 | return true; | |
1225 | } | |
1226 | ||
312fcae2 MK |
1227 | static bool zspage_full(struct page *page) |
1228 | { | |
1229 | BUG_ON(!is_first_page(page)); | |
1230 | ||
1231 | return page->inuse == page->objects; | |
1232 | } | |
1233 | ||
66cdef66 GM |
1234 | unsigned long zs_get_total_pages(struct zs_pool *pool) |
1235 | { | |
1236 | return atomic_long_read(&pool->pages_allocated); | |
1237 | } | |
1238 | EXPORT_SYMBOL_GPL(zs_get_total_pages); | |
1239 | ||
4bbc0bc0 | 1240 | /** |
66cdef66 GM |
1241 | * zs_map_object - get address of allocated object from handle. |
1242 | * @pool: pool from which the object was allocated | |
1243 | * @handle: handle returned from zs_malloc | |
4bbc0bc0 | 1244 | * |
66cdef66 GM |
1245 | * Before using an object allocated from zs_malloc, it must be mapped using |
1246 | * this function. When done with the object, it must be unmapped using | |
1247 | * zs_unmap_object. | |
4bbc0bc0 | 1248 | * |
66cdef66 GM |
1249 | * Only one object can be mapped per cpu at a time. There is no protection |
1250 | * against nested mappings. | |
1251 | * | |
1252 | * This function returns with preemption and page faults disabled. | |
4bbc0bc0 | 1253 | */ |
66cdef66 GM |
1254 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, |
1255 | enum zs_mapmode mm) | |
61989a80 | 1256 | { |
66cdef66 | 1257 | struct page *page; |
2e40e163 | 1258 | unsigned long obj, obj_idx, off; |
61989a80 | 1259 | |
66cdef66 GM |
1260 | unsigned int class_idx; |
1261 | enum fullness_group fg; | |
1262 | struct size_class *class; | |
1263 | struct mapping_area *area; | |
1264 | struct page *pages[2]; | |
2e40e163 | 1265 | void *ret; |
61989a80 | 1266 | |
66cdef66 | 1267 | BUG_ON(!handle); |
40f9fb8c | 1268 | |
9eec4cd5 | 1269 | /* |
66cdef66 GM |
1270 | * Because we use per-cpu mapping areas shared among the |
1271 | * pools/users, we can't allow mapping in interrupt context | |
1272 | * because it can corrupt another users mappings. | |
9eec4cd5 | 1273 | */ |
66cdef66 | 1274 | BUG_ON(in_interrupt()); |
61989a80 | 1275 | |
312fcae2 MK |
1276 | /* From now on, migration cannot move the object */ |
1277 | pin_tag(handle); | |
1278 | ||
2e40e163 MK |
1279 | obj = handle_to_obj(handle); |
1280 | obj_to_location(obj, &page, &obj_idx); | |
66cdef66 GM |
1281 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1282 | class = pool->size_class[class_idx]; | |
1283 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
df8b5bb9 | 1284 | |
66cdef66 GM |
1285 | area = &get_cpu_var(zs_map_area); |
1286 | area->vm_mm = mm; | |
1287 | if (off + class->size <= PAGE_SIZE) { | |
1288 | /* this object is contained entirely within a page */ | |
1289 | area->vm_addr = kmap_atomic(page); | |
2e40e163 MK |
1290 | ret = area->vm_addr + off; |
1291 | goto out; | |
61989a80 NG |
1292 | } |
1293 | ||
66cdef66 GM |
1294 | /* this object spans two pages */ |
1295 | pages[0] = page; | |
1296 | pages[1] = get_next_page(page); | |
1297 | BUG_ON(!pages[1]); | |
9eec4cd5 | 1298 | |
2e40e163 MK |
1299 | ret = __zs_map_object(area, pages, off, class->size); |
1300 | out: | |
7b60a685 MK |
1301 | if (!class->huge) |
1302 | ret += ZS_HANDLE_SIZE; | |
1303 | ||
1304 | return ret; | |
61989a80 | 1305 | } |
66cdef66 | 1306 | EXPORT_SYMBOL_GPL(zs_map_object); |
61989a80 | 1307 | |
66cdef66 | 1308 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) |
61989a80 | 1309 | { |
66cdef66 | 1310 | struct page *page; |
2e40e163 | 1311 | unsigned long obj, obj_idx, off; |
61989a80 | 1312 | |
66cdef66 GM |
1313 | unsigned int class_idx; |
1314 | enum fullness_group fg; | |
1315 | struct size_class *class; | |
1316 | struct mapping_area *area; | |
9eec4cd5 | 1317 | |
66cdef66 | 1318 | BUG_ON(!handle); |
9eec4cd5 | 1319 | |
2e40e163 MK |
1320 | obj = handle_to_obj(handle); |
1321 | obj_to_location(obj, &page, &obj_idx); | |
66cdef66 GM |
1322 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1323 | class = pool->size_class[class_idx]; | |
1324 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
61989a80 | 1325 | |
66cdef66 GM |
1326 | area = this_cpu_ptr(&zs_map_area); |
1327 | if (off + class->size <= PAGE_SIZE) | |
1328 | kunmap_atomic(area->vm_addr); | |
1329 | else { | |
1330 | struct page *pages[2]; | |
40f9fb8c | 1331 | |
66cdef66 GM |
1332 | pages[0] = page; |
1333 | pages[1] = get_next_page(page); | |
1334 | BUG_ON(!pages[1]); | |
1335 | ||
1336 | __zs_unmap_object(area, pages, off, class->size); | |
1337 | } | |
1338 | put_cpu_var(zs_map_area); | |
312fcae2 | 1339 | unpin_tag(handle); |
61989a80 | 1340 | } |
66cdef66 | 1341 | EXPORT_SYMBOL_GPL(zs_unmap_object); |
61989a80 | 1342 | |
c7806261 MK |
1343 | static unsigned long obj_malloc(struct page *first_page, |
1344 | struct size_class *class, unsigned long handle) | |
1345 | { | |
1346 | unsigned long obj; | |
1347 | struct link_free *link; | |
1348 | ||
1349 | struct page *m_page; | |
1350 | unsigned long m_objidx, m_offset; | |
1351 | void *vaddr; | |
1352 | ||
312fcae2 | 1353 | handle |= OBJ_ALLOCATED_TAG; |
c7806261 MK |
1354 | obj = (unsigned long)first_page->freelist; |
1355 | obj_to_location(obj, &m_page, &m_objidx); | |
1356 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); | |
1357 | ||
1358 | vaddr = kmap_atomic(m_page); | |
1359 | link = (struct link_free *)vaddr + m_offset / sizeof(*link); | |
1360 | first_page->freelist = link->next; | |
7b60a685 MK |
1361 | if (!class->huge) |
1362 | /* record handle in the header of allocated chunk */ | |
1363 | link->handle = handle; | |
1364 | else | |
1365 | /* record handle in first_page->private */ | |
1366 | set_page_private(first_page, handle); | |
c7806261 MK |
1367 | kunmap_atomic(vaddr); |
1368 | first_page->inuse++; | |
1369 | zs_stat_inc(class, OBJ_USED, 1); | |
1370 | ||
1371 | return obj; | |
1372 | } | |
1373 | ||
1374 | ||
61989a80 NG |
1375 | /** |
1376 | * zs_malloc - Allocate block of given size from pool. | |
1377 | * @pool: pool to allocate from | |
1378 | * @size: size of block to allocate | |
61989a80 | 1379 | * |
00a61d86 | 1380 | * On success, handle to the allocated object is returned, |
c2344348 | 1381 | * otherwise 0. |
61989a80 NG |
1382 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. |
1383 | */ | |
c2344348 | 1384 | unsigned long zs_malloc(struct zs_pool *pool, size_t size) |
61989a80 | 1385 | { |
2e40e163 | 1386 | unsigned long handle, obj; |
61989a80 | 1387 | struct size_class *class; |
c7806261 | 1388 | struct page *first_page; |
61989a80 | 1389 | |
7b60a685 | 1390 | if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) |
2e40e163 MK |
1391 | return 0; |
1392 | ||
1393 | handle = alloc_handle(pool); | |
1394 | if (!handle) | |
c2344348 | 1395 | return 0; |
61989a80 | 1396 | |
2e40e163 MK |
1397 | /* extra space in chunk to keep the handle */ |
1398 | size += ZS_HANDLE_SIZE; | |
9eec4cd5 | 1399 | class = pool->size_class[get_size_class_index(size)]; |
7b60a685 MK |
1400 | /* In huge class size, we store the handle into first_page->private */ |
1401 | if (class->huge) { | |
1402 | size -= ZS_HANDLE_SIZE; | |
1403 | class = pool->size_class[get_size_class_index(size)]; | |
1404 | } | |
61989a80 NG |
1405 | |
1406 | spin_lock(&class->lock); | |
1407 | first_page = find_get_zspage(class); | |
1408 | ||
1409 | if (!first_page) { | |
1410 | spin_unlock(&class->lock); | |
1411 | first_page = alloc_zspage(class, pool->flags); | |
2e40e163 MK |
1412 | if (unlikely(!first_page)) { |
1413 | free_handle(pool, handle); | |
c2344348 | 1414 | return 0; |
2e40e163 | 1415 | } |
61989a80 NG |
1416 | |
1417 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
13de8933 MK |
1418 | atomic_long_add(class->pages_per_zspage, |
1419 | &pool->pages_allocated); | |
0f050d99 | 1420 | |
61989a80 | 1421 | spin_lock(&class->lock); |
0f050d99 GM |
1422 | zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( |
1423 | class->size, class->pages_per_zspage)); | |
61989a80 NG |
1424 | } |
1425 | ||
c7806261 | 1426 | obj = obj_malloc(first_page, class, handle); |
61989a80 | 1427 | /* Now move the zspage to another fullness group, if required */ |
c7806261 | 1428 | fix_fullness_group(class, first_page); |
2e40e163 | 1429 | record_obj(handle, obj); |
61989a80 NG |
1430 | spin_unlock(&class->lock); |
1431 | ||
2e40e163 | 1432 | return handle; |
61989a80 NG |
1433 | } |
1434 | EXPORT_SYMBOL_GPL(zs_malloc); | |
1435 | ||
c7806261 MK |
1436 | static void obj_free(struct zs_pool *pool, struct size_class *class, |
1437 | unsigned long obj) | |
61989a80 NG |
1438 | { |
1439 | struct link_free *link; | |
1440 | struct page *first_page, *f_page; | |
c7806261 | 1441 | unsigned long f_objidx, f_offset; |
af4ee5e9 | 1442 | void *vaddr; |
61989a80 | 1443 | int class_idx; |
61989a80 NG |
1444 | enum fullness_group fullness; |
1445 | ||
c7806261 | 1446 | BUG_ON(!obj); |
61989a80 | 1447 | |
312fcae2 | 1448 | obj &= ~OBJ_ALLOCATED_TAG; |
2e40e163 | 1449 | obj_to_location(obj, &f_page, &f_objidx); |
61989a80 NG |
1450 | first_page = get_first_page(f_page); |
1451 | ||
1452 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
61989a80 NG |
1453 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); |
1454 | ||
c7806261 | 1455 | vaddr = kmap_atomic(f_page); |
61989a80 NG |
1456 | |
1457 | /* Insert this object in containing zspage's freelist */ | |
af4ee5e9 | 1458 | link = (struct link_free *)(vaddr + f_offset); |
61989a80 | 1459 | link->next = first_page->freelist; |
7b60a685 MK |
1460 | if (class->huge) |
1461 | set_page_private(first_page, 0); | |
af4ee5e9 | 1462 | kunmap_atomic(vaddr); |
c2344348 | 1463 | first_page->freelist = (void *)obj; |
61989a80 | 1464 | first_page->inuse--; |
0f050d99 | 1465 | zs_stat_dec(class, OBJ_USED, 1); |
c7806261 MK |
1466 | } |
1467 | ||
1468 | void zs_free(struct zs_pool *pool, unsigned long handle) | |
1469 | { | |
1470 | struct page *first_page, *f_page; | |
1471 | unsigned long obj, f_objidx; | |
1472 | int class_idx; | |
1473 | struct size_class *class; | |
1474 | enum fullness_group fullness; | |
1475 | ||
1476 | if (unlikely(!handle)) | |
1477 | return; | |
1478 | ||
312fcae2 | 1479 | pin_tag(handle); |
c7806261 | 1480 | obj = handle_to_obj(handle); |
c7806261 MK |
1481 | obj_to_location(obj, &f_page, &f_objidx); |
1482 | first_page = get_first_page(f_page); | |
1483 | ||
1484 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
1485 | class = pool->size_class[class_idx]; | |
1486 | ||
1487 | spin_lock(&class->lock); | |
1488 | obj_free(pool, class, obj); | |
1489 | fullness = fix_fullness_group(class, first_page); | |
312fcae2 | 1490 | if (fullness == ZS_EMPTY) { |
0f050d99 GM |
1491 | zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( |
1492 | class->size, class->pages_per_zspage)); | |
312fcae2 MK |
1493 | atomic_long_sub(class->pages_per_zspage, |
1494 | &pool->pages_allocated); | |
1495 | free_zspage(first_page); | |
1496 | } | |
61989a80 | 1497 | spin_unlock(&class->lock); |
312fcae2 | 1498 | unpin_tag(handle); |
61989a80 | 1499 | |
312fcae2 MK |
1500 | free_handle(pool, handle); |
1501 | } | |
1502 | EXPORT_SYMBOL_GPL(zs_free); | |
1503 | ||
1504 | static void zs_object_copy(unsigned long src, unsigned long dst, | |
1505 | struct size_class *class) | |
1506 | { | |
1507 | struct page *s_page, *d_page; | |
1508 | unsigned long s_objidx, d_objidx; | |
1509 | unsigned long s_off, d_off; | |
1510 | void *s_addr, *d_addr; | |
1511 | int s_size, d_size, size; | |
1512 | int written = 0; | |
1513 | ||
1514 | s_size = d_size = class->size; | |
1515 | ||
1516 | obj_to_location(src, &s_page, &s_objidx); | |
1517 | obj_to_location(dst, &d_page, &d_objidx); | |
1518 | ||
1519 | s_off = obj_idx_to_offset(s_page, s_objidx, class->size); | |
1520 | d_off = obj_idx_to_offset(d_page, d_objidx, class->size); | |
1521 | ||
1522 | if (s_off + class->size > PAGE_SIZE) | |
1523 | s_size = PAGE_SIZE - s_off; | |
1524 | ||
1525 | if (d_off + class->size > PAGE_SIZE) | |
1526 | d_size = PAGE_SIZE - d_off; | |
1527 | ||
1528 | s_addr = kmap_atomic(s_page); | |
1529 | d_addr = kmap_atomic(d_page); | |
1530 | ||
1531 | while (1) { | |
1532 | size = min(s_size, d_size); | |
1533 | memcpy(d_addr + d_off, s_addr + s_off, size); | |
1534 | written += size; | |
1535 | ||
1536 | if (written == class->size) | |
1537 | break; | |
1538 | ||
1539 | if (s_off + size >= PAGE_SIZE) { | |
1540 | kunmap_atomic(d_addr); | |
1541 | kunmap_atomic(s_addr); | |
1542 | s_page = get_next_page(s_page); | |
1543 | BUG_ON(!s_page); | |
1544 | s_addr = kmap_atomic(s_page); | |
1545 | d_addr = kmap_atomic(d_page); | |
1546 | s_size = class->size - written; | |
1547 | s_off = 0; | |
1548 | } else { | |
1549 | s_off += size; | |
1550 | s_size -= size; | |
1551 | } | |
1552 | ||
1553 | if (d_off + size >= PAGE_SIZE) { | |
1554 | kunmap_atomic(d_addr); | |
1555 | d_page = get_next_page(d_page); | |
1556 | BUG_ON(!d_page); | |
1557 | d_addr = kmap_atomic(d_page); | |
1558 | d_size = class->size - written; | |
1559 | d_off = 0; | |
1560 | } else { | |
1561 | d_off += size; | |
1562 | d_size -= size; | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | kunmap_atomic(d_addr); | |
1567 | kunmap_atomic(s_addr); | |
1568 | } | |
1569 | ||
1570 | /* | |
1571 | * Find alloced object in zspage from index object and | |
1572 | * return handle. | |
1573 | */ | |
1574 | static unsigned long find_alloced_obj(struct page *page, int index, | |
1575 | struct size_class *class) | |
1576 | { | |
1577 | unsigned long head; | |
1578 | int offset = 0; | |
1579 | unsigned long handle = 0; | |
1580 | void *addr = kmap_atomic(page); | |
1581 | ||
1582 | if (!is_first_page(page)) | |
1583 | offset = page->index; | |
1584 | offset += class->size * index; | |
1585 | ||
1586 | while (offset < PAGE_SIZE) { | |
7b60a685 | 1587 | head = obj_to_head(class, page, addr + offset); |
312fcae2 MK |
1588 | if (head & OBJ_ALLOCATED_TAG) { |
1589 | handle = head & ~OBJ_ALLOCATED_TAG; | |
1590 | if (trypin_tag(handle)) | |
1591 | break; | |
1592 | handle = 0; | |
1593 | } | |
1594 | ||
1595 | offset += class->size; | |
1596 | index++; | |
1597 | } | |
1598 | ||
1599 | kunmap_atomic(addr); | |
1600 | return handle; | |
1601 | } | |
1602 | ||
1603 | struct zs_compact_control { | |
1604 | /* Source page for migration which could be a subpage of zspage. */ | |
1605 | struct page *s_page; | |
1606 | /* Destination page for migration which should be a first page | |
1607 | * of zspage. */ | |
1608 | struct page *d_page; | |
1609 | /* Starting object index within @s_page which used for live object | |
1610 | * in the subpage. */ | |
1611 | int index; | |
1612 | /* how many of objects are migrated */ | |
1613 | int nr_migrated; | |
1614 | }; | |
1615 | ||
1616 | static int migrate_zspage(struct zs_pool *pool, struct size_class *class, | |
1617 | struct zs_compact_control *cc) | |
1618 | { | |
1619 | unsigned long used_obj, free_obj; | |
1620 | unsigned long handle; | |
1621 | struct page *s_page = cc->s_page; | |
1622 | struct page *d_page = cc->d_page; | |
1623 | unsigned long index = cc->index; | |
1624 | int nr_migrated = 0; | |
1625 | int ret = 0; | |
1626 | ||
1627 | while (1) { | |
1628 | handle = find_alloced_obj(s_page, index, class); | |
1629 | if (!handle) { | |
1630 | s_page = get_next_page(s_page); | |
1631 | if (!s_page) | |
1632 | break; | |
1633 | index = 0; | |
1634 | continue; | |
1635 | } | |
1636 | ||
1637 | /* Stop if there is no more space */ | |
1638 | if (zspage_full(d_page)) { | |
1639 | unpin_tag(handle); | |
1640 | ret = -ENOMEM; | |
1641 | break; | |
1642 | } | |
1643 | ||
1644 | used_obj = handle_to_obj(handle); | |
1645 | free_obj = obj_malloc(d_page, class, handle); | |
1646 | zs_object_copy(used_obj, free_obj, class); | |
1647 | index++; | |
1648 | record_obj(handle, free_obj); | |
1649 | unpin_tag(handle); | |
1650 | obj_free(pool, class, used_obj); | |
1651 | nr_migrated++; | |
1652 | } | |
1653 | ||
1654 | /* Remember last position in this iteration */ | |
1655 | cc->s_page = s_page; | |
1656 | cc->index = index; | |
1657 | cc->nr_migrated = nr_migrated; | |
1658 | ||
1659 | return ret; | |
1660 | } | |
1661 | ||
1662 | static struct page *alloc_target_page(struct size_class *class) | |
1663 | { | |
1664 | int i; | |
1665 | struct page *page; | |
1666 | ||
1667 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
1668 | page = class->fullness_list[i]; | |
1669 | if (page) { | |
1670 | remove_zspage(page, class, i); | |
1671 | break; | |
1672 | } | |
1673 | } | |
1674 | ||
1675 | return page; | |
1676 | } | |
1677 | ||
1678 | static void putback_zspage(struct zs_pool *pool, struct size_class *class, | |
1679 | struct page *first_page) | |
1680 | { | |
1681 | int class_idx; | |
1682 | enum fullness_group fullness; | |
1683 | ||
1684 | BUG_ON(!is_first_page(first_page)); | |
1685 | ||
1686 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
1687 | insert_zspage(first_page, class, fullness); | |
1688 | fullness = fix_fullness_group(class, first_page); | |
13de8933 | 1689 | if (fullness == ZS_EMPTY) { |
312fcae2 MK |
1690 | zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( |
1691 | class->size, class->pages_per_zspage)); | |
13de8933 MK |
1692 | atomic_long_sub(class->pages_per_zspage, |
1693 | &pool->pages_allocated); | |
312fcae2 | 1694 | |
61989a80 | 1695 | free_zspage(first_page); |
13de8933 | 1696 | } |
61989a80 | 1697 | } |
312fcae2 MK |
1698 | |
1699 | static struct page *isolate_source_page(struct size_class *class) | |
1700 | { | |
1701 | struct page *page; | |
1702 | ||
1703 | page = class->fullness_list[ZS_ALMOST_EMPTY]; | |
1704 | if (page) | |
1705 | remove_zspage(page, class, ZS_ALMOST_EMPTY); | |
1706 | ||
1707 | return page; | |
1708 | } | |
1709 | ||
1710 | static unsigned long __zs_compact(struct zs_pool *pool, | |
1711 | struct size_class *class) | |
1712 | { | |
1713 | int nr_to_migrate; | |
1714 | struct zs_compact_control cc; | |
1715 | struct page *src_page; | |
1716 | struct page *dst_page = NULL; | |
1717 | unsigned long nr_total_migrated = 0; | |
1718 | ||
1719 | cond_resched(); | |
1720 | ||
1721 | spin_lock(&class->lock); | |
1722 | while ((src_page = isolate_source_page(class))) { | |
1723 | ||
1724 | BUG_ON(!is_first_page(src_page)); | |
1725 | ||
1726 | /* The goal is to migrate all live objects in source page */ | |
1727 | nr_to_migrate = src_page->inuse; | |
1728 | cc.index = 0; | |
1729 | cc.s_page = src_page; | |
1730 | ||
1731 | while ((dst_page = alloc_target_page(class))) { | |
1732 | cc.d_page = dst_page; | |
1733 | /* | |
1734 | * If there is no more space in dst_page, try to | |
1735 | * allocate another zspage. | |
1736 | */ | |
1737 | if (!migrate_zspage(pool, class, &cc)) | |
1738 | break; | |
1739 | ||
1740 | putback_zspage(pool, class, dst_page); | |
1741 | nr_total_migrated += cc.nr_migrated; | |
1742 | nr_to_migrate -= cc.nr_migrated; | |
1743 | } | |
1744 | ||
1745 | /* Stop if we couldn't find slot */ | |
1746 | if (dst_page == NULL) | |
1747 | break; | |
1748 | ||
1749 | putback_zspage(pool, class, dst_page); | |
1750 | putback_zspage(pool, class, src_page); | |
1751 | spin_unlock(&class->lock); | |
1752 | nr_total_migrated += cc.nr_migrated; | |
1753 | cond_resched(); | |
1754 | spin_lock(&class->lock); | |
1755 | } | |
1756 | ||
1757 | if (src_page) | |
1758 | putback_zspage(pool, class, src_page); | |
1759 | ||
1760 | spin_unlock(&class->lock); | |
1761 | ||
1762 | return nr_total_migrated; | |
1763 | } | |
1764 | ||
1765 | unsigned long zs_compact(struct zs_pool *pool) | |
1766 | { | |
1767 | int i; | |
1768 | unsigned long nr_migrated = 0; | |
1769 | struct size_class *class; | |
1770 | ||
1771 | for (i = zs_size_classes - 1; i >= 0; i--) { | |
1772 | class = pool->size_class[i]; | |
1773 | if (!class) | |
1774 | continue; | |
1775 | if (class->index != i) | |
1776 | continue; | |
1777 | nr_migrated += __zs_compact(pool, class); | |
1778 | } | |
1779 | ||
1780 | synchronize_rcu(); | |
1781 | ||
1782 | return nr_migrated; | |
1783 | } | |
1784 | EXPORT_SYMBOL_GPL(zs_compact); | |
61989a80 | 1785 | |
00a61d86 | 1786 | /** |
66cdef66 GM |
1787 | * zs_create_pool - Creates an allocation pool to work from. |
1788 | * @flags: allocation flags used to allocate pool metadata | |
166cfda7 | 1789 | * |
66cdef66 GM |
1790 | * This function must be called before anything when using |
1791 | * the zsmalloc allocator. | |
166cfda7 | 1792 | * |
66cdef66 GM |
1793 | * On success, a pointer to the newly created pool is returned, |
1794 | * otherwise NULL. | |
396b7fd6 | 1795 | */ |
3eba0c6a | 1796 | struct zs_pool *zs_create_pool(char *name, gfp_t flags) |
61989a80 | 1797 | { |
66cdef66 GM |
1798 | int i; |
1799 | struct zs_pool *pool; | |
1800 | struct size_class *prev_class = NULL; | |
61989a80 | 1801 | |
66cdef66 GM |
1802 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
1803 | if (!pool) | |
1804 | return NULL; | |
61989a80 | 1805 | |
66cdef66 GM |
1806 | pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), |
1807 | GFP_KERNEL); | |
1808 | if (!pool->size_class) { | |
1809 | kfree(pool); | |
1810 | return NULL; | |
1811 | } | |
61989a80 | 1812 | |
2e40e163 MK |
1813 | pool->name = kstrdup(name, GFP_KERNEL); |
1814 | if (!pool->name) | |
1815 | goto err; | |
1816 | ||
1817 | if (create_handle_cache(pool)) | |
1818 | goto err; | |
1819 | ||
c60369f0 | 1820 | /* |
66cdef66 GM |
1821 | * Iterate reversly, because, size of size_class that we want to use |
1822 | * for merging should be larger or equal to current size. | |
c60369f0 | 1823 | */ |
66cdef66 GM |
1824 | for (i = zs_size_classes - 1; i >= 0; i--) { |
1825 | int size; | |
1826 | int pages_per_zspage; | |
1827 | struct size_class *class; | |
c60369f0 | 1828 | |
66cdef66 GM |
1829 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; |
1830 | if (size > ZS_MAX_ALLOC_SIZE) | |
1831 | size = ZS_MAX_ALLOC_SIZE; | |
1832 | pages_per_zspage = get_pages_per_zspage(size); | |
61989a80 | 1833 | |
66cdef66 GM |
1834 | /* |
1835 | * size_class is used for normal zsmalloc operation such | |
1836 | * as alloc/free for that size. Although it is natural that we | |
1837 | * have one size_class for each size, there is a chance that we | |
1838 | * can get more memory utilization if we use one size_class for | |
1839 | * many different sizes whose size_class have same | |
1840 | * characteristics. So, we makes size_class point to | |
1841 | * previous size_class if possible. | |
1842 | */ | |
1843 | if (prev_class) { | |
1844 | if (can_merge(prev_class, size, pages_per_zspage)) { | |
1845 | pool->size_class[i] = prev_class; | |
1846 | continue; | |
1847 | } | |
1848 | } | |
1849 | ||
1850 | class = kzalloc(sizeof(struct size_class), GFP_KERNEL); | |
1851 | if (!class) | |
1852 | goto err; | |
1853 | ||
1854 | class->size = size; | |
1855 | class->index = i; | |
1856 | class->pages_per_zspage = pages_per_zspage; | |
7b60a685 MK |
1857 | if (pages_per_zspage == 1 && |
1858 | get_maxobj_per_zspage(size, pages_per_zspage) == 1) | |
1859 | class->huge = true; | |
66cdef66 GM |
1860 | spin_lock_init(&class->lock); |
1861 | pool->size_class[i] = class; | |
1862 | ||
1863 | prev_class = class; | |
61989a80 NG |
1864 | } |
1865 | ||
66cdef66 | 1866 | pool->flags = flags; |
b7418510 | 1867 | |
0f050d99 GM |
1868 | if (zs_pool_stat_create(name, pool)) |
1869 | goto err; | |
1870 | ||
66cdef66 GM |
1871 | return pool; |
1872 | ||
1873 | err: | |
1874 | zs_destroy_pool(pool); | |
1875 | return NULL; | |
61989a80 | 1876 | } |
66cdef66 | 1877 | EXPORT_SYMBOL_GPL(zs_create_pool); |
61989a80 | 1878 | |
66cdef66 | 1879 | void zs_destroy_pool(struct zs_pool *pool) |
61989a80 | 1880 | { |
66cdef66 | 1881 | int i; |
61989a80 | 1882 | |
0f050d99 GM |
1883 | zs_pool_stat_destroy(pool); |
1884 | ||
66cdef66 GM |
1885 | for (i = 0; i < zs_size_classes; i++) { |
1886 | int fg; | |
1887 | struct size_class *class = pool->size_class[i]; | |
61989a80 | 1888 | |
66cdef66 GM |
1889 | if (!class) |
1890 | continue; | |
61989a80 | 1891 | |
66cdef66 GM |
1892 | if (class->index != i) |
1893 | continue; | |
61989a80 | 1894 | |
66cdef66 GM |
1895 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { |
1896 | if (class->fullness_list[fg]) { | |
1897 | pr_info("Freeing non-empty class with size %db, fullness group %d\n", | |
1898 | class->size, fg); | |
1899 | } | |
1900 | } | |
1901 | kfree(class); | |
1902 | } | |
f553646a | 1903 | |
2e40e163 | 1904 | destroy_handle_cache(pool); |
66cdef66 | 1905 | kfree(pool->size_class); |
0f050d99 | 1906 | kfree(pool->name); |
66cdef66 GM |
1907 | kfree(pool); |
1908 | } | |
1909 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
b7418510 | 1910 | |
66cdef66 GM |
1911 | static int __init zs_init(void) |
1912 | { | |
1913 | int ret = zs_register_cpu_notifier(); | |
1914 | ||
0f050d99 GM |
1915 | if (ret) |
1916 | goto notifier_fail; | |
66cdef66 GM |
1917 | |
1918 | init_zs_size_classes(); | |
1919 | ||
1920 | #ifdef CONFIG_ZPOOL | |
1921 | zpool_register_driver(&zs_zpool_driver); | |
1922 | #endif | |
0f050d99 GM |
1923 | |
1924 | ret = zs_stat_init(); | |
1925 | if (ret) { | |
1926 | pr_err("zs stat initialization failed\n"); | |
1927 | goto stat_fail; | |
1928 | } | |
66cdef66 | 1929 | return 0; |
0f050d99 GM |
1930 | |
1931 | stat_fail: | |
1932 | #ifdef CONFIG_ZPOOL | |
1933 | zpool_unregister_driver(&zs_zpool_driver); | |
1934 | #endif | |
1935 | notifier_fail: | |
1936 | zs_unregister_cpu_notifier(); | |
1937 | ||
1938 | return ret; | |
61989a80 | 1939 | } |
61989a80 | 1940 | |
66cdef66 | 1941 | static void __exit zs_exit(void) |
61989a80 | 1942 | { |
66cdef66 GM |
1943 | #ifdef CONFIG_ZPOOL |
1944 | zpool_unregister_driver(&zs_zpool_driver); | |
1945 | #endif | |
1946 | zs_unregister_cpu_notifier(); | |
0f050d99 GM |
1947 | |
1948 | zs_stat_exit(); | |
61989a80 | 1949 | } |
069f101f BH |
1950 | |
1951 | module_init(zs_init); | |
1952 | module_exit(zs_exit); | |
1953 | ||
1954 | MODULE_LICENSE("Dual BSD/GPL"); | |
1955 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); |