]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - mm/z3fold.c
Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
[mirror_ubuntu-bionic-kernel.git] / mm / z3fold.c
1 /*
2 * z3fold.c
3 *
4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
6 *
7 * This implementation is based on zbud written by Seth Jennings.
8 *
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
16 *
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
19 *
20 * z3fold doesn't export any API and is meant to be used via zpool API.
21 */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/atomic.h>
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/preempt.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/zpool.h>
33
34 /*****************
35 * Structures
36 *****************/
37 struct z3fold_pool;
38 struct z3fold_ops {
39 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
40 };
41
42 enum buddy {
43 HEADLESS = 0,
44 FIRST,
45 MIDDLE,
46 LAST,
47 BUDDIES_MAX
48 };
49
50 /*
51 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
52 * z3fold page, except for HEADLESS pages
53 * @buddy: links the z3fold page into the relevant list in the pool
54 * @page_lock: per-page lock
55 * @refcount: reference cound for the z3fold page
56 * @first_chunks: the size of the first buddy in chunks, 0 if free
57 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
58 * @last_chunks: the size of the last buddy in chunks, 0 if free
59 * @first_num: the starting number (for the first handle)
60 */
61 struct z3fold_header {
62 struct list_head buddy;
63 spinlock_t page_lock;
64 struct kref refcount;
65 unsigned short first_chunks;
66 unsigned short middle_chunks;
67 unsigned short last_chunks;
68 unsigned short start_middle;
69 unsigned short first_num:2;
70 };
71
72 /*
73 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
74 * adjusting internal fragmentation. It also determines the number of
75 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
76 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
77 * in the beginning of an allocated page are occupied by z3fold header, so
78 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
79 * which shows the max number of free chunks in z3fold page, also there will
80 * be 63, or 62, respectively, freelists per pool.
81 */
82 #define NCHUNKS_ORDER 6
83
84 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
85 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
86 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
87 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
88 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
89 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
90
91 #define BUDDY_MASK (0x3)
92
93 /**
94 * struct z3fold_pool - stores metadata for each z3fold pool
95 * @lock: protects all pool fields and first|last_chunk fields of any
96 * z3fold page in the pool
97 * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
98 * the lists each z3fold page is added to depends on the size of
99 * its free region.
100 * @lru: list tracking the z3fold pages in LRU order by most recently
101 * added buddy.
102 * @pages_nr: number of z3fold pages in the pool.
103 * @ops: pointer to a structure of user defined operations specified at
104 * pool creation time.
105 *
106 * This structure is allocated at pool creation time and maintains metadata
107 * pertaining to a particular z3fold pool.
108 */
109 struct z3fold_pool {
110 spinlock_t lock;
111 struct list_head unbuddied[NCHUNKS];
112 struct list_head lru;
113 atomic64_t pages_nr;
114 const struct z3fold_ops *ops;
115 struct zpool *zpool;
116 const struct zpool_ops *zpool_ops;
117 };
118
119 /*
120 * Internal z3fold page flags
121 */
122 enum z3fold_page_flags {
123 PAGE_HEADLESS = 0,
124 MIDDLE_CHUNK_MAPPED,
125 };
126
127
128 /*****************
129 * Helpers
130 *****************/
131
132 /* Converts an allocation size in bytes to size in z3fold chunks */
133 static int size_to_chunks(size_t size)
134 {
135 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
136 }
137
138 #define for_each_unbuddied_list(_iter, _begin) \
139 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
140
141 /* Initializes the z3fold header of a newly allocated z3fold page */
142 static struct z3fold_header *init_z3fold_page(struct page *page)
143 {
144 struct z3fold_header *zhdr = page_address(page);
145
146 INIT_LIST_HEAD(&page->lru);
147 clear_bit(PAGE_HEADLESS, &page->private);
148 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
149
150 spin_lock_init(&zhdr->page_lock);
151 kref_init(&zhdr->refcount);
152 zhdr->first_chunks = 0;
153 zhdr->middle_chunks = 0;
154 zhdr->last_chunks = 0;
155 zhdr->first_num = 0;
156 zhdr->start_middle = 0;
157 INIT_LIST_HEAD(&zhdr->buddy);
158 return zhdr;
159 }
160
161 /* Resets the struct page fields and frees the page */
162 static void free_z3fold_page(struct page *page)
163 {
164 __free_page(page);
165 }
166
167 static void release_z3fold_page(struct kref *ref)
168 {
169 struct z3fold_header *zhdr;
170 struct page *page;
171
172 zhdr = container_of(ref, struct z3fold_header, refcount);
173 page = virt_to_page(zhdr);
174
175 if (!list_empty(&zhdr->buddy))
176 list_del(&zhdr->buddy);
177 if (!list_empty(&page->lru))
178 list_del(&page->lru);
179 free_z3fold_page(page);
180 }
181
182 /* Lock a z3fold page */
183 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
184 {
185 spin_lock(&zhdr->page_lock);
186 }
187
188 /* Unlock a z3fold page */
189 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
190 {
191 spin_unlock(&zhdr->page_lock);
192 }
193
194 /*
195 * Encodes the handle of a particular buddy within a z3fold page
196 * Pool lock should be held as this function accesses first_num
197 */
198 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
199 {
200 unsigned long handle;
201
202 handle = (unsigned long)zhdr;
203 if (bud != HEADLESS)
204 handle += (bud + zhdr->first_num) & BUDDY_MASK;
205 return handle;
206 }
207
208 /* Returns the z3fold page where a given handle is stored */
209 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
210 {
211 return (struct z3fold_header *)(handle & PAGE_MASK);
212 }
213
214 /*
215 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
216 * but that doesn't matter. because the masking will result in the
217 * correct buddy number.
218 */
219 static enum buddy handle_to_buddy(unsigned long handle)
220 {
221 struct z3fold_header *zhdr = handle_to_z3fold_header(handle);
222 return (handle - zhdr->first_num) & BUDDY_MASK;
223 }
224
225 /*
226 * Returns the number of free chunks in a z3fold page.
227 * NB: can't be used with HEADLESS pages.
228 */
229 static int num_free_chunks(struct z3fold_header *zhdr)
230 {
231 int nfree;
232 /*
233 * If there is a middle object, pick up the bigger free space
234 * either before or after it. Otherwise just subtract the number
235 * of chunks occupied by the first and the last objects.
236 */
237 if (zhdr->middle_chunks != 0) {
238 int nfree_before = zhdr->first_chunks ?
239 0 : zhdr->start_middle - ZHDR_CHUNKS;
240 int nfree_after = zhdr->last_chunks ?
241 0 : TOTAL_CHUNKS -
242 (zhdr->start_middle + zhdr->middle_chunks);
243 nfree = max(nfree_before, nfree_after);
244 } else
245 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
246 return nfree;
247 }
248
249 /*****************
250 * API Functions
251 *****************/
252 /**
253 * z3fold_create_pool() - create a new z3fold pool
254 * @gfp: gfp flags when allocating the z3fold pool structure
255 * @ops: user-defined operations for the z3fold pool
256 *
257 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
258 * failed.
259 */
260 static struct z3fold_pool *z3fold_create_pool(gfp_t gfp,
261 const struct z3fold_ops *ops)
262 {
263 struct z3fold_pool *pool;
264 int i;
265
266 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
267 if (!pool)
268 return NULL;
269 spin_lock_init(&pool->lock);
270 for_each_unbuddied_list(i, 0)
271 INIT_LIST_HEAD(&pool->unbuddied[i]);
272 INIT_LIST_HEAD(&pool->lru);
273 atomic64_set(&pool->pages_nr, 0);
274 pool->ops = ops;
275 return pool;
276 }
277
278 /**
279 * z3fold_destroy_pool() - destroys an existing z3fold pool
280 * @pool: the z3fold pool to be destroyed
281 *
282 * The pool should be emptied before this function is called.
283 */
284 static void z3fold_destroy_pool(struct z3fold_pool *pool)
285 {
286 kfree(pool);
287 }
288
289 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
290 unsigned short dst_chunk)
291 {
292 void *beg = zhdr;
293 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
294 beg + (zhdr->start_middle << CHUNK_SHIFT),
295 zhdr->middle_chunks << CHUNK_SHIFT);
296 }
297
298 #define BIG_CHUNK_GAP 3
299 /* Has to be called with lock held */
300 static int z3fold_compact_page(struct z3fold_header *zhdr)
301 {
302 struct page *page = virt_to_page(zhdr);
303
304 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
305 return 0; /* can't move middle chunk, it's used */
306
307 if (zhdr->middle_chunks == 0)
308 return 0; /* nothing to compact */
309
310 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
311 /* move to the beginning */
312 mchunk_memmove(zhdr, ZHDR_CHUNKS);
313 zhdr->first_chunks = zhdr->middle_chunks;
314 zhdr->middle_chunks = 0;
315 zhdr->start_middle = 0;
316 zhdr->first_num++;
317 return 1;
318 }
319
320 /*
321 * moving data is expensive, so let's only do that if
322 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
323 */
324 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
325 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
326 BIG_CHUNK_GAP) {
327 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
328 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
329 return 1;
330 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
331 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
332 + zhdr->middle_chunks) >=
333 BIG_CHUNK_GAP) {
334 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
335 zhdr->middle_chunks;
336 mchunk_memmove(zhdr, new_start);
337 zhdr->start_middle = new_start;
338 return 1;
339 }
340
341 return 0;
342 }
343
344 /**
345 * z3fold_alloc() - allocates a region of a given size
346 * @pool: z3fold pool from which to allocate
347 * @size: size in bytes of the desired allocation
348 * @gfp: gfp flags used if the pool needs to grow
349 * @handle: handle of the new allocation
350 *
351 * This function will attempt to find a free region in the pool large enough to
352 * satisfy the allocation request. A search of the unbuddied lists is
353 * performed first. If no suitable free region is found, then a new page is
354 * allocated and added to the pool to satisfy the request.
355 *
356 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
357 * as z3fold pool pages.
358 *
359 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
360 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
361 * a new page.
362 */
363 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
364 unsigned long *handle)
365 {
366 int chunks = 0, i, freechunks;
367 struct z3fold_header *zhdr = NULL;
368 enum buddy bud;
369 struct page *page;
370
371 if (!size || (gfp & __GFP_HIGHMEM))
372 return -EINVAL;
373
374 if (size > PAGE_SIZE)
375 return -ENOSPC;
376
377 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
378 bud = HEADLESS;
379 else {
380 chunks = size_to_chunks(size);
381
382 /* First, try to find an unbuddied z3fold page. */
383 zhdr = NULL;
384 for_each_unbuddied_list(i, chunks) {
385 spin_lock(&pool->lock);
386 zhdr = list_first_entry_or_null(&pool->unbuddied[i],
387 struct z3fold_header, buddy);
388 if (!zhdr) {
389 spin_unlock(&pool->lock);
390 continue;
391 }
392 kref_get(&zhdr->refcount);
393 list_del_init(&zhdr->buddy);
394 spin_unlock(&pool->lock);
395
396 page = virt_to_page(zhdr);
397 z3fold_page_lock(zhdr);
398 if (zhdr->first_chunks == 0) {
399 if (zhdr->middle_chunks != 0 &&
400 chunks >= zhdr->start_middle)
401 bud = LAST;
402 else
403 bud = FIRST;
404 } else if (zhdr->last_chunks == 0)
405 bud = LAST;
406 else if (zhdr->middle_chunks == 0)
407 bud = MIDDLE;
408 else {
409 z3fold_page_unlock(zhdr);
410 spin_lock(&pool->lock);
411 if (kref_put(&zhdr->refcount,
412 release_z3fold_page))
413 atomic64_dec(&pool->pages_nr);
414 spin_unlock(&pool->lock);
415 pr_err("No free chunks in unbuddied\n");
416 WARN_ON(1);
417 continue;
418 }
419 goto found;
420 }
421 bud = FIRST;
422 }
423
424 /* Couldn't find unbuddied z3fold page, create new one */
425 page = alloc_page(gfp);
426 if (!page)
427 return -ENOMEM;
428
429 atomic64_inc(&pool->pages_nr);
430 zhdr = init_z3fold_page(page);
431
432 if (bud == HEADLESS) {
433 set_bit(PAGE_HEADLESS, &page->private);
434 spin_lock(&pool->lock);
435 goto headless;
436 }
437 z3fold_page_lock(zhdr);
438
439 found:
440 if (bud == FIRST)
441 zhdr->first_chunks = chunks;
442 else if (bud == LAST)
443 zhdr->last_chunks = chunks;
444 else {
445 zhdr->middle_chunks = chunks;
446 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
447 }
448
449 spin_lock(&pool->lock);
450 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
451 zhdr->middle_chunks == 0) {
452 /* Add to unbuddied list */
453 freechunks = num_free_chunks(zhdr);
454 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
455 }
456
457 headless:
458 /* Add/move z3fold page to beginning of LRU */
459 if (!list_empty(&page->lru))
460 list_del(&page->lru);
461
462 list_add(&page->lru, &pool->lru);
463
464 *handle = encode_handle(zhdr, bud);
465 spin_unlock(&pool->lock);
466 if (bud != HEADLESS)
467 z3fold_page_unlock(zhdr);
468
469 return 0;
470 }
471
472 /**
473 * z3fold_free() - frees the allocation associated with the given handle
474 * @pool: pool in which the allocation resided
475 * @handle: handle associated with the allocation returned by z3fold_alloc()
476 *
477 * In the case that the z3fold page in which the allocation resides is under
478 * reclaim, as indicated by the PG_reclaim flag being set, this function
479 * only sets the first|last_chunks to 0. The page is actually freed
480 * once both buddies are evicted (see z3fold_reclaim_page() below).
481 */
482 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
483 {
484 struct z3fold_header *zhdr;
485 int freechunks;
486 struct page *page;
487 enum buddy bud;
488
489 zhdr = handle_to_z3fold_header(handle);
490 page = virt_to_page(zhdr);
491
492 if (test_bit(PAGE_HEADLESS, &page->private)) {
493 /* HEADLESS page stored */
494 bud = HEADLESS;
495 } else {
496 z3fold_page_lock(zhdr);
497 bud = handle_to_buddy(handle);
498
499 switch (bud) {
500 case FIRST:
501 zhdr->first_chunks = 0;
502 break;
503 case MIDDLE:
504 zhdr->middle_chunks = 0;
505 zhdr->start_middle = 0;
506 break;
507 case LAST:
508 zhdr->last_chunks = 0;
509 break;
510 default:
511 pr_err("%s: unknown bud %d\n", __func__, bud);
512 WARN_ON(1);
513 z3fold_page_unlock(zhdr);
514 return;
515 }
516 }
517
518 if (bud == HEADLESS) {
519 spin_lock(&pool->lock);
520 list_del(&page->lru);
521 spin_unlock(&pool->lock);
522 free_z3fold_page(page);
523 atomic64_dec(&pool->pages_nr);
524 } else {
525 if (zhdr->first_chunks != 0 || zhdr->middle_chunks != 0 ||
526 zhdr->last_chunks != 0) {
527 z3fold_compact_page(zhdr);
528 /* Add to the unbuddied list */
529 spin_lock(&pool->lock);
530 if (!list_empty(&zhdr->buddy))
531 list_del(&zhdr->buddy);
532 freechunks = num_free_chunks(zhdr);
533 list_add(&zhdr->buddy, &pool->unbuddied[freechunks]);
534 spin_unlock(&pool->lock);
535 }
536 z3fold_page_unlock(zhdr);
537 spin_lock(&pool->lock);
538 if (kref_put(&zhdr->refcount, release_z3fold_page))
539 atomic64_dec(&pool->pages_nr);
540 spin_unlock(&pool->lock);
541 }
542
543 }
544
545 /**
546 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
547 * @pool: pool from which a page will attempt to be evicted
548 * @retires: number of pages on the LRU list for which eviction will
549 * be attempted before failing
550 *
551 * z3fold reclaim is different from normal system reclaim in that it is done
552 * from the bottom, up. This is because only the bottom layer, z3fold, has
553 * information on how the allocations are organized within each z3fold page.
554 * This has the potential to create interesting locking situations between
555 * z3fold and the user, however.
556 *
557 * To avoid these, this is how z3fold_reclaim_page() should be called:
558
559 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
560 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
561 * call the user-defined eviction handler with the pool and handle as
562 * arguments.
563 *
564 * If the handle can not be evicted, the eviction handler should return
565 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
566 * appropriate list and try the next z3fold page on the LRU up to
567 * a user defined number of retries.
568 *
569 * If the handle is successfully evicted, the eviction handler should
570 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
571 * contains logic to delay freeing the page if the page is under reclaim,
572 * as indicated by the setting of the PG_reclaim flag on the underlying page.
573 *
574 * If all buddies in the z3fold page are successfully evicted, then the
575 * z3fold page can be freed.
576 *
577 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
578 * no pages to evict or an eviction handler is not registered, -EAGAIN if
579 * the retry limit was hit.
580 */
581 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
582 {
583 int i, ret = 0, freechunks;
584 struct z3fold_header *zhdr;
585 struct page *page;
586 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
587
588 spin_lock(&pool->lock);
589 if (!pool->ops || !pool->ops->evict || retries == 0) {
590 spin_unlock(&pool->lock);
591 return -EINVAL;
592 }
593 for (i = 0; i < retries; i++) {
594 if (list_empty(&pool->lru)) {
595 spin_unlock(&pool->lock);
596 return -EINVAL;
597 }
598 page = list_last_entry(&pool->lru, struct page, lru);
599 list_del_init(&page->lru);
600
601 zhdr = page_address(page);
602 if (!test_bit(PAGE_HEADLESS, &page->private)) {
603 if (!list_empty(&zhdr->buddy))
604 list_del_init(&zhdr->buddy);
605 kref_get(&zhdr->refcount);
606 spin_unlock(&pool->lock);
607 z3fold_page_lock(zhdr);
608 /*
609 * We need encode the handles before unlocking, since
610 * we can race with free that will set
611 * (first|last)_chunks to 0
612 */
613 first_handle = 0;
614 last_handle = 0;
615 middle_handle = 0;
616 if (zhdr->first_chunks)
617 first_handle = encode_handle(zhdr, FIRST);
618 if (zhdr->middle_chunks)
619 middle_handle = encode_handle(zhdr, MIDDLE);
620 if (zhdr->last_chunks)
621 last_handle = encode_handle(zhdr, LAST);
622 z3fold_page_unlock(zhdr);
623 } else {
624 first_handle = encode_handle(zhdr, HEADLESS);
625 last_handle = middle_handle = 0;
626 spin_unlock(&pool->lock);
627 }
628
629 /* Issue the eviction callback(s) */
630 if (middle_handle) {
631 ret = pool->ops->evict(pool, middle_handle);
632 if (ret)
633 goto next;
634 }
635 if (first_handle) {
636 ret = pool->ops->evict(pool, first_handle);
637 if (ret)
638 goto next;
639 }
640 if (last_handle) {
641 ret = pool->ops->evict(pool, last_handle);
642 if (ret)
643 goto next;
644 }
645 next:
646 if (test_bit(PAGE_HEADLESS, &page->private)) {
647 if (ret == 0) {
648 free_z3fold_page(page);
649 return 0;
650 } else {
651 spin_lock(&pool->lock);
652 }
653 } else {
654 z3fold_page_lock(zhdr);
655 if ((zhdr->first_chunks || zhdr->last_chunks ||
656 zhdr->middle_chunks) &&
657 !(zhdr->first_chunks && zhdr->last_chunks &&
658 zhdr->middle_chunks)) {
659 z3fold_compact_page(zhdr);
660 /* add to unbuddied list */
661 spin_lock(&pool->lock);
662 freechunks = num_free_chunks(zhdr);
663 list_add(&zhdr->buddy,
664 &pool->unbuddied[freechunks]);
665 spin_unlock(&pool->lock);
666 }
667 z3fold_page_unlock(zhdr);
668 spin_lock(&pool->lock);
669 if (kref_put(&zhdr->refcount, release_z3fold_page)) {
670 atomic64_dec(&pool->pages_nr);
671 return 0;
672 }
673 }
674
675 /*
676 * Add to the beginning of LRU.
677 * Pool lock has to be kept here to ensure the page has
678 * not already been released
679 */
680 list_add(&page->lru, &pool->lru);
681 }
682 spin_unlock(&pool->lock);
683 return -EAGAIN;
684 }
685
686 /**
687 * z3fold_map() - maps the allocation associated with the given handle
688 * @pool: pool in which the allocation resides
689 * @handle: handle associated with the allocation to be mapped
690 *
691 * Extracts the buddy number from handle and constructs the pointer to the
692 * correct starting chunk within the page.
693 *
694 * Returns: a pointer to the mapped allocation
695 */
696 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
697 {
698 struct z3fold_header *zhdr;
699 struct page *page;
700 void *addr;
701 enum buddy buddy;
702
703 zhdr = handle_to_z3fold_header(handle);
704 addr = zhdr;
705 page = virt_to_page(zhdr);
706
707 if (test_bit(PAGE_HEADLESS, &page->private))
708 goto out;
709
710 z3fold_page_lock(zhdr);
711 buddy = handle_to_buddy(handle);
712 switch (buddy) {
713 case FIRST:
714 addr += ZHDR_SIZE_ALIGNED;
715 break;
716 case MIDDLE:
717 addr += zhdr->start_middle << CHUNK_SHIFT;
718 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
719 break;
720 case LAST:
721 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
722 break;
723 default:
724 pr_err("unknown buddy id %d\n", buddy);
725 WARN_ON(1);
726 addr = NULL;
727 break;
728 }
729
730 z3fold_page_unlock(zhdr);
731 out:
732 return addr;
733 }
734
735 /**
736 * z3fold_unmap() - unmaps the allocation associated with the given handle
737 * @pool: pool in which the allocation resides
738 * @handle: handle associated with the allocation to be unmapped
739 */
740 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
741 {
742 struct z3fold_header *zhdr;
743 struct page *page;
744 enum buddy buddy;
745
746 zhdr = handle_to_z3fold_header(handle);
747 page = virt_to_page(zhdr);
748
749 if (test_bit(PAGE_HEADLESS, &page->private))
750 return;
751
752 z3fold_page_lock(zhdr);
753 buddy = handle_to_buddy(handle);
754 if (buddy == MIDDLE)
755 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
756 z3fold_page_unlock(zhdr);
757 }
758
759 /**
760 * z3fold_get_pool_size() - gets the z3fold pool size in pages
761 * @pool: pool whose size is being queried
762 *
763 * Returns: size in pages of the given pool.
764 */
765 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
766 {
767 return atomic64_read(&pool->pages_nr);
768 }
769
770 /*****************
771 * zpool
772 ****************/
773
774 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
775 {
776 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
777 return pool->zpool_ops->evict(pool->zpool, handle);
778 else
779 return -ENOENT;
780 }
781
782 static const struct z3fold_ops z3fold_zpool_ops = {
783 .evict = z3fold_zpool_evict
784 };
785
786 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
787 const struct zpool_ops *zpool_ops,
788 struct zpool *zpool)
789 {
790 struct z3fold_pool *pool;
791
792 pool = z3fold_create_pool(gfp, zpool_ops ? &z3fold_zpool_ops : NULL);
793 if (pool) {
794 pool->zpool = zpool;
795 pool->zpool_ops = zpool_ops;
796 }
797 return pool;
798 }
799
800 static void z3fold_zpool_destroy(void *pool)
801 {
802 z3fold_destroy_pool(pool);
803 }
804
805 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
806 unsigned long *handle)
807 {
808 return z3fold_alloc(pool, size, gfp, handle);
809 }
810 static void z3fold_zpool_free(void *pool, unsigned long handle)
811 {
812 z3fold_free(pool, handle);
813 }
814
815 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
816 unsigned int *reclaimed)
817 {
818 unsigned int total = 0;
819 int ret = -EINVAL;
820
821 while (total < pages) {
822 ret = z3fold_reclaim_page(pool, 8);
823 if (ret < 0)
824 break;
825 total++;
826 }
827
828 if (reclaimed)
829 *reclaimed = total;
830
831 return ret;
832 }
833
834 static void *z3fold_zpool_map(void *pool, unsigned long handle,
835 enum zpool_mapmode mm)
836 {
837 return z3fold_map(pool, handle);
838 }
839 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
840 {
841 z3fold_unmap(pool, handle);
842 }
843
844 static u64 z3fold_zpool_total_size(void *pool)
845 {
846 return z3fold_get_pool_size(pool) * PAGE_SIZE;
847 }
848
849 static struct zpool_driver z3fold_zpool_driver = {
850 .type = "z3fold",
851 .owner = THIS_MODULE,
852 .create = z3fold_zpool_create,
853 .destroy = z3fold_zpool_destroy,
854 .malloc = z3fold_zpool_malloc,
855 .free = z3fold_zpool_free,
856 .shrink = z3fold_zpool_shrink,
857 .map = z3fold_zpool_map,
858 .unmap = z3fold_zpool_unmap,
859 .total_size = z3fold_zpool_total_size,
860 };
861
862 MODULE_ALIAS("zpool-z3fold");
863
864 static int __init init_z3fold(void)
865 {
866 /* Make sure the z3fold header is not larger than the page size */
867 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
868 zpool_register_driver(&z3fold_zpool_driver);
869
870 return 0;
871 }
872
873 static void __exit exit_z3fold(void)
874 {
875 zpool_unregister_driver(&z3fold_zpool_driver);
876 }
877
878 module_init(init_z3fold);
879 module_exit(exit_z3fold);
880
881 MODULE_LICENSE("GPL");
882 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
883 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");