]>
Commit | Line | Data |
---|---|---|
61989a80 NG |
1 | /* |
2 | * zsmalloc memory allocator | |
3 | * | |
4 | * Copyright (C) 2011 Nitin Gupta | |
5 | * | |
6 | * This code is released using a dual license strategy: BSD/GPL | |
7 | * You can choose the license that better fits your requirements. | |
8 | * | |
9 | * Released under the terms of 3-clause BSD License | |
10 | * Released under the terms of GNU General Public License Version 2.0 | |
11 | */ | |
12 | ||
13 | #ifdef CONFIG_ZSMALLOC_DEBUG | |
14 | #define DEBUG | |
15 | #endif | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/bitops.h> | |
20 | #include <linux/errno.h> | |
21 | #include <linux/highmem.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/slab.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/pgtable.h> | |
27 | #include <linux/cpumask.h> | |
28 | #include <linux/cpu.h> | |
0cbb613f | 29 | #include <linux/vmalloc.h> |
61989a80 NG |
30 | |
31 | #include "zsmalloc.h" | |
32 | #include "zsmalloc_int.h" | |
33 | ||
34 | /* | |
35 | * A zspage's class index and fullness group | |
36 | * are encoded in its (first)page->mapping | |
37 | */ | |
38 | #define CLASS_IDX_BITS 28 | |
39 | #define FULLNESS_BITS 4 | |
40 | #define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) | |
41 | #define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) | |
42 | ||
61989a80 NG |
43 | /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ |
44 | static DEFINE_PER_CPU(struct mapping_area, zs_map_area); | |
45 | ||
46 | static int is_first_page(struct page *page) | |
47 | { | |
48 | return test_bit(PG_private, &page->flags); | |
49 | } | |
50 | ||
51 | static int is_last_page(struct page *page) | |
52 | { | |
53 | return test_bit(PG_private_2, &page->flags); | |
54 | } | |
55 | ||
56 | static void get_zspage_mapping(struct page *page, unsigned int *class_idx, | |
57 | enum fullness_group *fullness) | |
58 | { | |
59 | unsigned long m; | |
60 | BUG_ON(!is_first_page(page)); | |
61 | ||
62 | m = (unsigned long)page->mapping; | |
63 | *fullness = m & FULLNESS_MASK; | |
64 | *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; | |
65 | } | |
66 | ||
67 | static void set_zspage_mapping(struct page *page, unsigned int class_idx, | |
68 | enum fullness_group fullness) | |
69 | { | |
70 | unsigned long m; | |
71 | BUG_ON(!is_first_page(page)); | |
72 | ||
73 | m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | | |
74 | (fullness & FULLNESS_MASK); | |
75 | page->mapping = (struct address_space *)m; | |
76 | } | |
77 | ||
78 | static int get_size_class_index(int size) | |
79 | { | |
80 | int idx = 0; | |
81 | ||
82 | if (likely(size > ZS_MIN_ALLOC_SIZE)) | |
83 | idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, | |
84 | ZS_SIZE_CLASS_DELTA); | |
85 | ||
86 | return idx; | |
87 | } | |
88 | ||
89 | static enum fullness_group get_fullness_group(struct page *page) | |
90 | { | |
91 | int inuse, max_objects; | |
92 | enum fullness_group fg; | |
93 | BUG_ON(!is_first_page(page)); | |
94 | ||
95 | inuse = page->inuse; | |
96 | max_objects = page->objects; | |
97 | ||
98 | if (inuse == 0) | |
99 | fg = ZS_EMPTY; | |
100 | else if (inuse == max_objects) | |
101 | fg = ZS_FULL; | |
102 | else if (inuse <= max_objects / fullness_threshold_frac) | |
103 | fg = ZS_ALMOST_EMPTY; | |
104 | else | |
105 | fg = ZS_ALMOST_FULL; | |
106 | ||
107 | return fg; | |
108 | } | |
109 | ||
110 | static void insert_zspage(struct page *page, struct size_class *class, | |
111 | enum fullness_group fullness) | |
112 | { | |
113 | struct page **head; | |
114 | ||
115 | BUG_ON(!is_first_page(page)); | |
116 | ||
117 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
118 | return; | |
119 | ||
120 | head = &class->fullness_list[fullness]; | |
121 | if (*head) | |
122 | list_add_tail(&page->lru, &(*head)->lru); | |
123 | ||
124 | *head = page; | |
125 | } | |
126 | ||
127 | static void remove_zspage(struct page *page, struct size_class *class, | |
128 | enum fullness_group fullness) | |
129 | { | |
130 | struct page **head; | |
131 | ||
132 | BUG_ON(!is_first_page(page)); | |
133 | ||
134 | if (fullness >= _ZS_NR_FULLNESS_GROUPS) | |
135 | return; | |
136 | ||
137 | head = &class->fullness_list[fullness]; | |
138 | BUG_ON(!*head); | |
139 | if (list_empty(&(*head)->lru)) | |
140 | *head = NULL; | |
141 | else if (*head == page) | |
142 | *head = (struct page *)list_entry((*head)->lru.next, | |
143 | struct page, lru); | |
144 | ||
145 | list_del_init(&page->lru); | |
146 | } | |
147 | ||
148 | static enum fullness_group fix_fullness_group(struct zs_pool *pool, | |
149 | struct page *page) | |
150 | { | |
151 | int class_idx; | |
152 | struct size_class *class; | |
153 | enum fullness_group currfg, newfg; | |
154 | ||
155 | BUG_ON(!is_first_page(page)); | |
156 | ||
157 | get_zspage_mapping(page, &class_idx, &currfg); | |
158 | newfg = get_fullness_group(page); | |
159 | if (newfg == currfg) | |
160 | goto out; | |
161 | ||
162 | class = &pool->size_class[class_idx]; | |
163 | remove_zspage(page, class, currfg); | |
164 | insert_zspage(page, class, newfg); | |
165 | set_zspage_mapping(page, class_idx, newfg); | |
166 | ||
167 | out: | |
168 | return newfg; | |
169 | } | |
170 | ||
171 | /* | |
172 | * We have to decide on how many pages to link together | |
173 | * to form a zspage for each size class. This is important | |
174 | * to reduce wastage due to unusable space left at end of | |
175 | * each zspage which is given as: | |
176 | * wastage = Zp - Zp % size_class | |
177 | * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... | |
178 | * | |
179 | * For example, for size class of 3/8 * PAGE_SIZE, we should | |
180 | * link together 3 PAGE_SIZE sized pages to form a zspage | |
181 | * since then we can perfectly fit in 8 such objects. | |
182 | */ | |
183 | static int get_zspage_order(int class_size) | |
184 | { | |
185 | int i, max_usedpc = 0; | |
186 | /* zspage order which gives maximum used size per KB */ | |
187 | int max_usedpc_order = 1; | |
188 | ||
84d4faab | 189 | for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { |
61989a80 NG |
190 | int zspage_size; |
191 | int waste, usedpc; | |
192 | ||
193 | zspage_size = i * PAGE_SIZE; | |
194 | waste = zspage_size % class_size; | |
195 | usedpc = (zspage_size - waste) * 100 / zspage_size; | |
196 | ||
197 | if (usedpc > max_usedpc) { | |
198 | max_usedpc = usedpc; | |
199 | max_usedpc_order = i; | |
200 | } | |
201 | } | |
202 | ||
203 | return max_usedpc_order; | |
204 | } | |
205 | ||
206 | /* | |
207 | * A single 'zspage' is composed of many system pages which are | |
208 | * linked together using fields in struct page. This function finds | |
209 | * the first/head page, given any component page of a zspage. | |
210 | */ | |
211 | static struct page *get_first_page(struct page *page) | |
212 | { | |
213 | if (is_first_page(page)) | |
214 | return page; | |
215 | else | |
216 | return page->first_page; | |
217 | } | |
218 | ||
219 | static struct page *get_next_page(struct page *page) | |
220 | { | |
221 | struct page *next; | |
222 | ||
223 | if (is_last_page(page)) | |
224 | next = NULL; | |
225 | else if (is_first_page(page)) | |
226 | next = (struct page *)page->private; | |
227 | else | |
228 | next = list_entry(page->lru.next, struct page, lru); | |
229 | ||
230 | return next; | |
231 | } | |
232 | ||
233 | /* Encode <page, obj_idx> as a single handle value */ | |
234 | static void *obj_location_to_handle(struct page *page, unsigned long obj_idx) | |
235 | { | |
236 | unsigned long handle; | |
237 | ||
238 | if (!page) { | |
239 | BUG_ON(obj_idx); | |
240 | return NULL; | |
241 | } | |
242 | ||
243 | handle = page_to_pfn(page) << OBJ_INDEX_BITS; | |
244 | handle |= (obj_idx & OBJ_INDEX_MASK); | |
245 | ||
246 | return (void *)handle; | |
247 | } | |
248 | ||
249 | /* Decode <page, obj_idx> pair from the given object handle */ | |
250 | static void obj_handle_to_location(void *handle, struct page **page, | |
251 | unsigned long *obj_idx) | |
252 | { | |
253 | unsigned long hval = (unsigned long)handle; | |
254 | ||
255 | *page = pfn_to_page(hval >> OBJ_INDEX_BITS); | |
256 | *obj_idx = hval & OBJ_INDEX_MASK; | |
257 | } | |
258 | ||
259 | static unsigned long obj_idx_to_offset(struct page *page, | |
260 | unsigned long obj_idx, int class_size) | |
261 | { | |
262 | unsigned long off = 0; | |
263 | ||
264 | if (!is_first_page(page)) | |
265 | off = page->index; | |
266 | ||
267 | return off + obj_idx * class_size; | |
268 | } | |
269 | ||
270 | static void free_zspage(struct page *first_page) | |
271 | { | |
272 | struct page *nextp, *tmp; | |
273 | ||
274 | BUG_ON(!is_first_page(first_page)); | |
275 | BUG_ON(first_page->inuse); | |
276 | ||
277 | nextp = (struct page *)page_private(first_page); | |
278 | ||
279 | clear_bit(PG_private, &first_page->flags); | |
280 | clear_bit(PG_private_2, &first_page->flags); | |
281 | set_page_private(first_page, 0); | |
282 | first_page->mapping = NULL; | |
283 | first_page->freelist = NULL; | |
284 | reset_page_mapcount(first_page); | |
285 | __free_page(first_page); | |
286 | ||
287 | /* zspage with only 1 system page */ | |
288 | if (!nextp) | |
289 | return; | |
290 | ||
291 | list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) { | |
292 | list_del(&nextp->lru); | |
293 | clear_bit(PG_private_2, &nextp->flags); | |
294 | nextp->index = 0; | |
295 | __free_page(nextp); | |
296 | } | |
297 | } | |
298 | ||
299 | /* Initialize a newly allocated zspage */ | |
300 | static void init_zspage(struct page *first_page, struct size_class *class) | |
301 | { | |
302 | unsigned long off = 0; | |
303 | struct page *page = first_page; | |
304 | ||
305 | BUG_ON(!is_first_page(first_page)); | |
306 | while (page) { | |
307 | struct page *next_page; | |
308 | struct link_free *link; | |
309 | unsigned int i, objs_on_page; | |
310 | ||
311 | /* | |
312 | * page->index stores offset of first object starting | |
313 | * in the page. For the first page, this is always 0, | |
314 | * so we use first_page->index (aka ->freelist) to store | |
315 | * head of corresponding zspage's freelist. | |
316 | */ | |
317 | if (page != first_page) | |
318 | page->index = off; | |
319 | ||
320 | link = (struct link_free *)kmap_atomic(page) + | |
321 | off / sizeof(*link); | |
322 | objs_on_page = (PAGE_SIZE - off) / class->size; | |
323 | ||
324 | for (i = 1; i <= objs_on_page; i++) { | |
325 | off += class->size; | |
326 | if (off < PAGE_SIZE) { | |
327 | link->next = obj_location_to_handle(page, i); | |
328 | link += class->size / sizeof(*link); | |
329 | } | |
330 | } | |
331 | ||
332 | /* | |
333 | * We now come to the last (full or partial) object on this | |
334 | * page, which must point to the first object on the next | |
335 | * page (if present) | |
336 | */ | |
337 | next_page = get_next_page(page); | |
338 | link->next = obj_location_to_handle(next_page, 0); | |
339 | kunmap_atomic(link); | |
340 | page = next_page; | |
341 | off = (off + class->size) % PAGE_SIZE; | |
342 | } | |
343 | } | |
344 | ||
345 | /* | |
346 | * Allocate a zspage for the given size class | |
347 | */ | |
348 | static struct page *alloc_zspage(struct size_class *class, gfp_t flags) | |
349 | { | |
350 | int i, error; | |
351 | struct page *first_page = NULL; | |
352 | ||
353 | /* | |
354 | * Allocate individual pages and link them together as: | |
355 | * 1. first page->private = first sub-page | |
356 | * 2. all sub-pages are linked together using page->lru | |
357 | * 3. each sub-page is linked to the first page using page->first_page | |
358 | * | |
359 | * For each size class, First/Head pages are linked together using | |
360 | * page->lru. Also, we set PG_private to identify the first page | |
361 | * (i.e. no other sub-page has this flag set) and PG_private_2 to | |
362 | * identify the last page. | |
363 | */ | |
364 | error = -ENOMEM; | |
365 | for (i = 0; i < class->zspage_order; i++) { | |
366 | struct page *page, *prev_page; | |
367 | ||
368 | page = alloc_page(flags); | |
369 | if (!page) | |
370 | goto cleanup; | |
371 | ||
372 | INIT_LIST_HEAD(&page->lru); | |
373 | if (i == 0) { /* first page */ | |
374 | set_bit(PG_private, &page->flags); | |
375 | set_page_private(page, 0); | |
376 | first_page = page; | |
377 | first_page->inuse = 0; | |
378 | } | |
379 | if (i == 1) | |
380 | first_page->private = (unsigned long)page; | |
381 | if (i >= 1) | |
382 | page->first_page = first_page; | |
383 | if (i >= 2) | |
384 | list_add(&page->lru, &prev_page->lru); | |
385 | if (i == class->zspage_order - 1) /* last page */ | |
386 | set_bit(PG_private_2, &page->flags); | |
387 | ||
388 | prev_page = page; | |
389 | } | |
390 | ||
391 | init_zspage(first_page, class); | |
392 | ||
393 | first_page->freelist = obj_location_to_handle(first_page, 0); | |
394 | /* Maximum number of objects we can store in this zspage */ | |
395 | first_page->objects = class->zspage_order * PAGE_SIZE / class->size; | |
396 | ||
397 | error = 0; /* Success */ | |
398 | ||
399 | cleanup: | |
400 | if (unlikely(error) && first_page) { | |
401 | free_zspage(first_page); | |
402 | first_page = NULL; | |
403 | } | |
404 | ||
405 | return first_page; | |
406 | } | |
407 | ||
408 | static struct page *find_get_zspage(struct size_class *class) | |
409 | { | |
410 | int i; | |
411 | struct page *page; | |
412 | ||
413 | for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { | |
414 | page = class->fullness_list[i]; | |
415 | if (page) | |
416 | break; | |
417 | } | |
418 | ||
419 | return page; | |
420 | } | |
421 | ||
422 | ||
423 | /* | |
424 | * If this becomes a separate module, register zs_init() with | |
425 | * module_init(), zs_exit with module_exit(), and remove zs_initialized | |
426 | */ | |
427 | static int zs_initialized; | |
428 | ||
429 | static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, | |
430 | void *pcpu) | |
431 | { | |
432 | int cpu = (long)pcpu; | |
433 | struct mapping_area *area; | |
434 | ||
435 | switch (action) { | |
436 | case CPU_UP_PREPARE: | |
437 | area = &per_cpu(zs_map_area, cpu); | |
438 | if (area->vm) | |
439 | break; | |
440 | area->vm = alloc_vm_area(2 * PAGE_SIZE, area->vm_ptes); | |
441 | if (!area->vm) | |
442 | return notifier_from_errno(-ENOMEM); | |
443 | break; | |
444 | case CPU_DEAD: | |
445 | case CPU_UP_CANCELED: | |
446 | area = &per_cpu(zs_map_area, cpu); | |
447 | if (area->vm) | |
448 | free_vm_area(area->vm); | |
449 | area->vm = NULL; | |
450 | break; | |
451 | } | |
452 | ||
453 | return NOTIFY_OK; | |
454 | } | |
455 | ||
456 | static struct notifier_block zs_cpu_nb = { | |
457 | .notifier_call = zs_cpu_notifier | |
458 | }; | |
459 | ||
460 | static void zs_exit(void) | |
461 | { | |
462 | int cpu; | |
463 | ||
464 | for_each_online_cpu(cpu) | |
465 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | |
466 | unregister_cpu_notifier(&zs_cpu_nb); | |
467 | } | |
468 | ||
469 | static int zs_init(void) | |
470 | { | |
471 | int cpu, ret; | |
472 | ||
473 | register_cpu_notifier(&zs_cpu_nb); | |
474 | for_each_online_cpu(cpu) { | |
475 | ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); | |
476 | if (notifier_to_errno(ret)) | |
477 | goto fail; | |
478 | } | |
479 | return 0; | |
480 | fail: | |
481 | zs_exit(); | |
482 | return notifier_to_errno(ret); | |
483 | } | |
484 | ||
485 | struct zs_pool *zs_create_pool(const char *name, gfp_t flags) | |
486 | { | |
487 | int i, error, ovhd_size; | |
488 | struct zs_pool *pool; | |
489 | ||
490 | if (!name) | |
491 | return NULL; | |
492 | ||
493 | ovhd_size = roundup(sizeof(*pool), PAGE_SIZE); | |
494 | pool = kzalloc(ovhd_size, GFP_KERNEL); | |
495 | if (!pool) | |
496 | return NULL; | |
497 | ||
498 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
499 | int size; | |
500 | struct size_class *class; | |
501 | ||
502 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; | |
503 | if (size > ZS_MAX_ALLOC_SIZE) | |
504 | size = ZS_MAX_ALLOC_SIZE; | |
505 | ||
506 | class = &pool->size_class[i]; | |
507 | class->size = size; | |
508 | class->index = i; | |
509 | spin_lock_init(&class->lock); | |
510 | class->zspage_order = get_zspage_order(size); | |
511 | ||
512 | } | |
513 | ||
514 | /* | |
515 | * If this becomes a separate module, register zs_init with | |
516 | * module_init, and remove this block | |
517 | */ | |
518 | if (!zs_initialized) { | |
519 | error = zs_init(); | |
520 | if (error) | |
521 | goto cleanup; | |
522 | zs_initialized = 1; | |
523 | } | |
524 | ||
525 | pool->flags = flags; | |
526 | pool->name = name; | |
527 | ||
528 | error = 0; /* Success */ | |
529 | ||
530 | cleanup: | |
531 | if (error) { | |
532 | zs_destroy_pool(pool); | |
533 | pool = NULL; | |
534 | } | |
535 | ||
536 | return pool; | |
537 | } | |
538 | EXPORT_SYMBOL_GPL(zs_create_pool); | |
539 | ||
540 | void zs_destroy_pool(struct zs_pool *pool) | |
541 | { | |
542 | int i; | |
543 | ||
544 | for (i = 0; i < ZS_SIZE_CLASSES; i++) { | |
545 | int fg; | |
546 | struct size_class *class = &pool->size_class[i]; | |
547 | ||
548 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { | |
549 | if (class->fullness_list[fg]) { | |
550 | pr_info("Freeing non-empty class with size " | |
551 | "%db, fullness group %d\n", | |
552 | class->size, fg); | |
553 | } | |
554 | } | |
555 | } | |
556 | kfree(pool); | |
557 | } | |
558 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | |
559 | ||
560 | /** | |
561 | * zs_malloc - Allocate block of given size from pool. | |
562 | * @pool: pool to allocate from | |
563 | * @size: size of block to allocate | |
564 | * @page: page no. that holds the object | |
565 | * @offset: location of object within page | |
566 | * | |
567 | * On success, <page, offset> identifies block allocated | |
568 | * and 0 is returned. On failure, <page, offset> is set to | |
569 | * 0 and -ENOMEM is returned. | |
570 | * | |
571 | * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. | |
572 | */ | |
573 | void *zs_malloc(struct zs_pool *pool, size_t size) | |
574 | { | |
575 | void *obj; | |
576 | struct link_free *link; | |
577 | int class_idx; | |
578 | struct size_class *class; | |
579 | ||
580 | struct page *first_page, *m_page; | |
581 | unsigned long m_objidx, m_offset; | |
582 | ||
583 | if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) | |
584 | return NULL; | |
585 | ||
586 | class_idx = get_size_class_index(size); | |
587 | class = &pool->size_class[class_idx]; | |
588 | BUG_ON(class_idx != class->index); | |
589 | ||
590 | spin_lock(&class->lock); | |
591 | first_page = find_get_zspage(class); | |
592 | ||
593 | if (!first_page) { | |
594 | spin_unlock(&class->lock); | |
595 | first_page = alloc_zspage(class, pool->flags); | |
596 | if (unlikely(!first_page)) | |
597 | return NULL; | |
598 | ||
599 | set_zspage_mapping(first_page, class->index, ZS_EMPTY); | |
600 | spin_lock(&class->lock); | |
601 | class->pages_allocated += class->zspage_order; | |
602 | } | |
603 | ||
604 | obj = first_page->freelist; | |
605 | obj_handle_to_location(obj, &m_page, &m_objidx); | |
606 | m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); | |
607 | ||
608 | link = (struct link_free *)kmap_atomic(m_page) + | |
609 | m_offset / sizeof(*link); | |
610 | first_page->freelist = link->next; | |
611 | memset(link, POISON_INUSE, sizeof(*link)); | |
612 | kunmap_atomic(link); | |
613 | ||
614 | first_page->inuse++; | |
615 | /* Now move the zspage to another fullness group, if required */ | |
616 | fix_fullness_group(pool, first_page); | |
617 | spin_unlock(&class->lock); | |
618 | ||
619 | return obj; | |
620 | } | |
621 | EXPORT_SYMBOL_GPL(zs_malloc); | |
622 | ||
623 | void zs_free(struct zs_pool *pool, void *obj) | |
624 | { | |
625 | struct link_free *link; | |
626 | struct page *first_page, *f_page; | |
627 | unsigned long f_objidx, f_offset; | |
628 | ||
629 | int class_idx; | |
630 | struct size_class *class; | |
631 | enum fullness_group fullness; | |
632 | ||
633 | if (unlikely(!obj)) | |
634 | return; | |
635 | ||
636 | obj_handle_to_location(obj, &f_page, &f_objidx); | |
637 | first_page = get_first_page(f_page); | |
638 | ||
639 | get_zspage_mapping(first_page, &class_idx, &fullness); | |
640 | class = &pool->size_class[class_idx]; | |
641 | f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); | |
642 | ||
643 | spin_lock(&class->lock); | |
644 | ||
645 | /* Insert this object in containing zspage's freelist */ | |
646 | link = (struct link_free *)((unsigned char *)kmap_atomic(f_page) | |
647 | + f_offset); | |
648 | link->next = first_page->freelist; | |
649 | kunmap_atomic(link); | |
650 | first_page->freelist = obj; | |
651 | ||
652 | first_page->inuse--; | |
653 | fullness = fix_fullness_group(pool, first_page); | |
654 | ||
655 | if (fullness == ZS_EMPTY) | |
656 | class->pages_allocated -= class->zspage_order; | |
657 | ||
658 | spin_unlock(&class->lock); | |
659 | ||
660 | if (fullness == ZS_EMPTY) | |
661 | free_zspage(first_page); | |
662 | } | |
663 | EXPORT_SYMBOL_GPL(zs_free); | |
664 | ||
665 | void *zs_map_object(struct zs_pool *pool, void *handle) | |
666 | { | |
667 | struct page *page; | |
668 | unsigned long obj_idx, off; | |
669 | ||
670 | unsigned int class_idx; | |
671 | enum fullness_group fg; | |
672 | struct size_class *class; | |
673 | struct mapping_area *area; | |
674 | ||
675 | BUG_ON(!handle); | |
676 | ||
677 | obj_handle_to_location(handle, &page, &obj_idx); | |
678 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
679 | class = &pool->size_class[class_idx]; | |
680 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
681 | ||
682 | area = &get_cpu_var(zs_map_area); | |
683 | if (off + class->size <= PAGE_SIZE) { | |
684 | /* this object is contained entirely within a page */ | |
685 | area->vm_addr = kmap_atomic(page); | |
686 | } else { | |
687 | /* this object spans two pages */ | |
688 | struct page *nextp; | |
689 | ||
690 | nextp = get_next_page(page); | |
691 | BUG_ON(!nextp); | |
692 | ||
693 | ||
694 | set_pte(area->vm_ptes[0], mk_pte(page, PAGE_KERNEL)); | |
695 | set_pte(area->vm_ptes[1], mk_pte(nextp, PAGE_KERNEL)); | |
696 | ||
697 | /* We pre-allocated VM area so mapping can never fail */ | |
698 | area->vm_addr = area->vm->addr; | |
699 | } | |
700 | ||
701 | return area->vm_addr + off; | |
702 | } | |
703 | EXPORT_SYMBOL_GPL(zs_map_object); | |
704 | ||
705 | void zs_unmap_object(struct zs_pool *pool, void *handle) | |
706 | { | |
707 | struct page *page; | |
708 | unsigned long obj_idx, off; | |
709 | ||
710 | unsigned int class_idx; | |
711 | enum fullness_group fg; | |
712 | struct size_class *class; | |
713 | struct mapping_area *area; | |
714 | ||
715 | BUG_ON(!handle); | |
716 | ||
717 | obj_handle_to_location(handle, &page, &obj_idx); | |
718 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | |
719 | class = &pool->size_class[class_idx]; | |
720 | off = obj_idx_to_offset(page, obj_idx, class->size); | |
721 | ||
722 | area = &__get_cpu_var(zs_map_area); | |
723 | if (off + class->size <= PAGE_SIZE) { | |
724 | kunmap_atomic(area->vm_addr); | |
725 | } else { | |
726 | set_pte(area->vm_ptes[0], __pte(0)); | |
727 | set_pte(area->vm_ptes[1], __pte(0)); | |
728 | __flush_tlb_one((unsigned long)area->vm_addr); | |
729 | __flush_tlb_one((unsigned long)area->vm_addr + PAGE_SIZE); | |
730 | } | |
731 | put_cpu_var(zs_map_area); | |
732 | } | |
733 | EXPORT_SYMBOL_GPL(zs_unmap_object); | |
734 | ||
735 | u64 zs_get_total_size_bytes(struct zs_pool *pool) | |
736 | { | |
737 | int i; | |
738 | u64 npages = 0; | |
739 | ||
740 | for (i = 0; i < ZS_SIZE_CLASSES; i++) | |
741 | npages += pool->size_class[i].pages_allocated; | |
742 | ||
743 | return npages << PAGE_SHIFT; | |
744 | } | |
745 | EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); |