4 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
5 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 * This implementation is based on zbud written by Seth Jennings.
9 * z3fold is an special purpose allocator for storing compressed pages. It
10 * can store up to three compressed pages per page which improves the
11 * compression ratio of zbud while retaining its main concepts (e. g. always
12 * storing an integral number of objects per page) and simplicity.
13 * It still has simple and deterministic reclaim properties that make it
14 * preferable to a higher density approach (with no requirement on integral
15 * number of object per page) when reclaim is used.
17 * As in zbud, pages are divided into "chunks". The size of the chunks is
18 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 * z3fold doesn't export any API and is meant to be used via zpool API.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/atomic.h>
26 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/preempt.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/zpool.h>
39 int (*evict
)(struct z3fold_pool
*pool
, unsigned long handle
);
51 * struct z3fold_header - z3fold page metadata occupying the first chunk of each
52 * z3fold page, except for HEADLESS pages
53 * @buddy: links the z3fold page into the relevant list in the pool
54 * @page_lock: per-page lock
55 * @refcount: reference cound for the z3fold page
56 * @first_chunks: the size of the first buddy in chunks, 0 if free
57 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
58 * @last_chunks: the size of the last buddy in chunks, 0 if free
59 * @first_num: the starting number (for the first handle)
61 struct z3fold_header
{
62 struct list_head buddy
;
65 unsigned short first_chunks
;
66 unsigned short middle_chunks
;
67 unsigned short last_chunks
;
68 unsigned short start_middle
;
69 unsigned short first_num
:2;
73 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
74 * adjusting internal fragmentation. It also determines the number of
75 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
76 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
77 * in the beginning of an allocated page are occupied by z3fold header, so
78 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
79 * which shows the max number of free chunks in z3fold page, also there will
80 * be 63, or 62, respectively, freelists per pool.
82 #define NCHUNKS_ORDER 6
84 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
85 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
86 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
87 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
88 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
89 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
91 #define BUDDY_MASK (0x3)
94 * struct z3fold_pool - stores metadata for each z3fold pool
95 * @lock: protects all pool fields and first|last_chunk fields of any
96 * z3fold page in the pool
97 * @unbuddied: array of lists tracking z3fold pages that contain 2- buddies;
98 * the lists each z3fold page is added to depends on the size of
100 * @lru: list tracking the z3fold pages in LRU order by most recently
102 * @pages_nr: number of z3fold pages in the pool.
103 * @ops: pointer to a structure of user defined operations specified at
104 * pool creation time.
106 * This structure is allocated at pool creation time and maintains metadata
107 * pertaining to a particular z3fold pool.
111 struct list_head unbuddied
[NCHUNKS
];
112 struct list_head lru
;
114 const struct z3fold_ops
*ops
;
116 const struct zpool_ops
*zpool_ops
;
120 * Internal z3fold page flags
122 enum z3fold_page_flags
{
132 /* Converts an allocation size in bytes to size in z3fold chunks */
133 static int size_to_chunks(size_t size
)
135 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
138 #define for_each_unbuddied_list(_iter, _begin) \
139 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
141 /* Initializes the z3fold header of a newly allocated z3fold page */
142 static struct z3fold_header
*init_z3fold_page(struct page
*page
)
144 struct z3fold_header
*zhdr
= page_address(page
);
146 INIT_LIST_HEAD(&page
->lru
);
147 clear_bit(PAGE_HEADLESS
, &page
->private);
148 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
150 spin_lock_init(&zhdr
->page_lock
);
151 kref_init(&zhdr
->refcount
);
152 zhdr
->first_chunks
= 0;
153 zhdr
->middle_chunks
= 0;
154 zhdr
->last_chunks
= 0;
156 zhdr
->start_middle
= 0;
157 INIT_LIST_HEAD(&zhdr
->buddy
);
161 /* Resets the struct page fields and frees the page */
162 static void free_z3fold_page(struct page
*page
)
167 static void release_z3fold_page(struct kref
*ref
)
169 struct z3fold_header
*zhdr
;
172 zhdr
= container_of(ref
, struct z3fold_header
, refcount
);
173 page
= virt_to_page(zhdr
);
175 if (!list_empty(&zhdr
->buddy
))
176 list_del(&zhdr
->buddy
);
177 if (!list_empty(&page
->lru
))
178 list_del(&page
->lru
);
179 free_z3fold_page(page
);
182 /* Lock a z3fold page */
183 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
185 spin_lock(&zhdr
->page_lock
);
188 /* Unlock a z3fold page */
189 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
191 spin_unlock(&zhdr
->page_lock
);
195 * Encodes the handle of a particular buddy within a z3fold page
196 * Pool lock should be held as this function accesses first_num
198 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
200 unsigned long handle
;
202 handle
= (unsigned long)zhdr
;
204 handle
+= (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
208 /* Returns the z3fold page where a given handle is stored */
209 static struct z3fold_header
*handle_to_z3fold_header(unsigned long handle
)
211 return (struct z3fold_header
*)(handle
& PAGE_MASK
);
215 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
216 * but that doesn't matter. because the masking will result in the
217 * correct buddy number.
219 static enum buddy
handle_to_buddy(unsigned long handle
)
221 struct z3fold_header
*zhdr
= handle_to_z3fold_header(handle
);
222 return (handle
- zhdr
->first_num
) & BUDDY_MASK
;
226 * Returns the number of free chunks in a z3fold page.
227 * NB: can't be used with HEADLESS pages.
229 static int num_free_chunks(struct z3fold_header
*zhdr
)
233 * If there is a middle object, pick up the bigger free space
234 * either before or after it. Otherwise just subtract the number
235 * of chunks occupied by the first and the last objects.
237 if (zhdr
->middle_chunks
!= 0) {
238 int nfree_before
= zhdr
->first_chunks
?
239 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
240 int nfree_after
= zhdr
->last_chunks
?
242 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
243 nfree
= max(nfree_before
, nfree_after
);
245 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
253 * z3fold_create_pool() - create a new z3fold pool
254 * @gfp: gfp flags when allocating the z3fold pool structure
255 * @ops: user-defined operations for the z3fold pool
257 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
260 static struct z3fold_pool
*z3fold_create_pool(gfp_t gfp
,
261 const struct z3fold_ops
*ops
)
263 struct z3fold_pool
*pool
;
266 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
269 spin_lock_init(&pool
->lock
);
270 for_each_unbuddied_list(i
, 0)
271 INIT_LIST_HEAD(&pool
->unbuddied
[i
]);
272 INIT_LIST_HEAD(&pool
->lru
);
273 atomic64_set(&pool
->pages_nr
, 0);
279 * z3fold_destroy_pool() - destroys an existing z3fold pool
280 * @pool: the z3fold pool to be destroyed
282 * The pool should be emptied before this function is called.
284 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
289 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
290 unsigned short dst_chunk
)
293 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
294 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
295 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
298 #define BIG_CHUNK_GAP 3
299 /* Has to be called with lock held */
300 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
302 struct page
*page
= virt_to_page(zhdr
);
304 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
305 return 0; /* can't move middle chunk, it's used */
307 if (zhdr
->middle_chunks
== 0)
308 return 0; /* nothing to compact */
310 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
311 /* move to the beginning */
312 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
313 zhdr
->first_chunks
= zhdr
->middle_chunks
;
314 zhdr
->middle_chunks
= 0;
315 zhdr
->start_middle
= 0;
321 * moving data is expensive, so let's only do that if
322 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
324 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
325 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
327 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
328 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
330 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
331 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
332 + zhdr
->middle_chunks
) >=
334 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
336 mchunk_memmove(zhdr
, new_start
);
337 zhdr
->start_middle
= new_start
;
345 * z3fold_alloc() - allocates a region of a given size
346 * @pool: z3fold pool from which to allocate
347 * @size: size in bytes of the desired allocation
348 * @gfp: gfp flags used if the pool needs to grow
349 * @handle: handle of the new allocation
351 * This function will attempt to find a free region in the pool large enough to
352 * satisfy the allocation request. A search of the unbuddied lists is
353 * performed first. If no suitable free region is found, then a new page is
354 * allocated and added to the pool to satisfy the request.
356 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
357 * as z3fold pool pages.
359 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
360 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
363 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
364 unsigned long *handle
)
366 int chunks
= 0, i
, freechunks
;
367 struct z3fold_header
*zhdr
= NULL
;
371 if (!size
|| (gfp
& __GFP_HIGHMEM
))
374 if (size
> PAGE_SIZE
)
377 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
380 chunks
= size_to_chunks(size
);
382 /* First, try to find an unbuddied z3fold page. */
384 for_each_unbuddied_list(i
, chunks
) {
385 spin_lock(&pool
->lock
);
386 zhdr
= list_first_entry_or_null(&pool
->unbuddied
[i
],
387 struct z3fold_header
, buddy
);
389 spin_unlock(&pool
->lock
);
392 kref_get(&zhdr
->refcount
);
393 list_del_init(&zhdr
->buddy
);
394 spin_unlock(&pool
->lock
);
396 page
= virt_to_page(zhdr
);
397 z3fold_page_lock(zhdr
);
398 if (zhdr
->first_chunks
== 0) {
399 if (zhdr
->middle_chunks
!= 0 &&
400 chunks
>= zhdr
->start_middle
)
404 } else if (zhdr
->last_chunks
== 0)
406 else if (zhdr
->middle_chunks
== 0)
409 z3fold_page_unlock(zhdr
);
410 spin_lock(&pool
->lock
);
411 if (kref_put(&zhdr
->refcount
,
412 release_z3fold_page
))
413 atomic64_dec(&pool
->pages_nr
);
414 spin_unlock(&pool
->lock
);
415 pr_err("No free chunks in unbuddied\n");
424 /* Couldn't find unbuddied z3fold page, create new one */
425 page
= alloc_page(gfp
);
429 atomic64_inc(&pool
->pages_nr
);
430 zhdr
= init_z3fold_page(page
);
432 if (bud
== HEADLESS
) {
433 set_bit(PAGE_HEADLESS
, &page
->private);
434 spin_lock(&pool
->lock
);
437 z3fold_page_lock(zhdr
);
441 zhdr
->first_chunks
= chunks
;
442 else if (bud
== LAST
)
443 zhdr
->last_chunks
= chunks
;
445 zhdr
->middle_chunks
= chunks
;
446 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
449 spin_lock(&pool
->lock
);
450 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
451 zhdr
->middle_chunks
== 0) {
452 /* Add to unbuddied list */
453 freechunks
= num_free_chunks(zhdr
);
454 list_add(&zhdr
->buddy
, &pool
->unbuddied
[freechunks
]);
458 /* Add/move z3fold page to beginning of LRU */
459 if (!list_empty(&page
->lru
))
460 list_del(&page
->lru
);
462 list_add(&page
->lru
, &pool
->lru
);
464 *handle
= encode_handle(zhdr
, bud
);
465 spin_unlock(&pool
->lock
);
467 z3fold_page_unlock(zhdr
);
473 * z3fold_free() - frees the allocation associated with the given handle
474 * @pool: pool in which the allocation resided
475 * @handle: handle associated with the allocation returned by z3fold_alloc()
477 * In the case that the z3fold page in which the allocation resides is under
478 * reclaim, as indicated by the PG_reclaim flag being set, this function
479 * only sets the first|last_chunks to 0. The page is actually freed
480 * once both buddies are evicted (see z3fold_reclaim_page() below).
482 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
484 struct z3fold_header
*zhdr
;
489 zhdr
= handle_to_z3fold_header(handle
);
490 page
= virt_to_page(zhdr
);
492 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
493 /* HEADLESS page stored */
496 z3fold_page_lock(zhdr
);
497 bud
= handle_to_buddy(handle
);
501 zhdr
->first_chunks
= 0;
504 zhdr
->middle_chunks
= 0;
505 zhdr
->start_middle
= 0;
508 zhdr
->last_chunks
= 0;
511 pr_err("%s: unknown bud %d\n", __func__
, bud
);
513 z3fold_page_unlock(zhdr
);
518 if (bud
== HEADLESS
) {
519 spin_lock(&pool
->lock
);
520 list_del(&page
->lru
);
521 spin_unlock(&pool
->lock
);
522 free_z3fold_page(page
);
523 atomic64_dec(&pool
->pages_nr
);
525 if (zhdr
->first_chunks
!= 0 || zhdr
->middle_chunks
!= 0 ||
526 zhdr
->last_chunks
!= 0) {
527 z3fold_compact_page(zhdr
);
528 /* Add to the unbuddied list */
529 spin_lock(&pool
->lock
);
530 if (!list_empty(&zhdr
->buddy
))
531 list_del(&zhdr
->buddy
);
532 freechunks
= num_free_chunks(zhdr
);
533 list_add(&zhdr
->buddy
, &pool
->unbuddied
[freechunks
]);
534 spin_unlock(&pool
->lock
);
536 z3fold_page_unlock(zhdr
);
537 spin_lock(&pool
->lock
);
538 if (kref_put(&zhdr
->refcount
, release_z3fold_page
))
539 atomic64_dec(&pool
->pages_nr
);
540 spin_unlock(&pool
->lock
);
546 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
547 * @pool: pool from which a page will attempt to be evicted
548 * @retires: number of pages on the LRU list for which eviction will
549 * be attempted before failing
551 * z3fold reclaim is different from normal system reclaim in that it is done
552 * from the bottom, up. This is because only the bottom layer, z3fold, has
553 * information on how the allocations are organized within each z3fold page.
554 * This has the potential to create interesting locking situations between
555 * z3fold and the user, however.
557 * To avoid these, this is how z3fold_reclaim_page() should be called:
559 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
560 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
561 * call the user-defined eviction handler with the pool and handle as
564 * If the handle can not be evicted, the eviction handler should return
565 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
566 * appropriate list and try the next z3fold page on the LRU up to
567 * a user defined number of retries.
569 * If the handle is successfully evicted, the eviction handler should
570 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
571 * contains logic to delay freeing the page if the page is under reclaim,
572 * as indicated by the setting of the PG_reclaim flag on the underlying page.
574 * If all buddies in the z3fold page are successfully evicted, then the
575 * z3fold page can be freed.
577 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
578 * no pages to evict or an eviction handler is not registered, -EAGAIN if
579 * the retry limit was hit.
581 static int z3fold_reclaim_page(struct z3fold_pool
*pool
, unsigned int retries
)
583 int i
, ret
= 0, freechunks
;
584 struct z3fold_header
*zhdr
;
586 unsigned long first_handle
= 0, middle_handle
= 0, last_handle
= 0;
588 spin_lock(&pool
->lock
);
589 if (!pool
->ops
|| !pool
->ops
->evict
|| retries
== 0) {
590 spin_unlock(&pool
->lock
);
593 for (i
= 0; i
< retries
; i
++) {
594 if (list_empty(&pool
->lru
)) {
595 spin_unlock(&pool
->lock
);
598 page
= list_last_entry(&pool
->lru
, struct page
, lru
);
599 list_del_init(&page
->lru
);
601 zhdr
= page_address(page
);
602 if (!test_bit(PAGE_HEADLESS
, &page
->private)) {
603 if (!list_empty(&zhdr
->buddy
))
604 list_del_init(&zhdr
->buddy
);
605 kref_get(&zhdr
->refcount
);
606 spin_unlock(&pool
->lock
);
607 z3fold_page_lock(zhdr
);
609 * We need encode the handles before unlocking, since
610 * we can race with free that will set
611 * (first|last)_chunks to 0
616 if (zhdr
->first_chunks
)
617 first_handle
= encode_handle(zhdr
, FIRST
);
618 if (zhdr
->middle_chunks
)
619 middle_handle
= encode_handle(zhdr
, MIDDLE
);
620 if (zhdr
->last_chunks
)
621 last_handle
= encode_handle(zhdr
, LAST
);
622 z3fold_page_unlock(zhdr
);
624 first_handle
= encode_handle(zhdr
, HEADLESS
);
625 last_handle
= middle_handle
= 0;
626 spin_unlock(&pool
->lock
);
629 /* Issue the eviction callback(s) */
631 ret
= pool
->ops
->evict(pool
, middle_handle
);
636 ret
= pool
->ops
->evict(pool
, first_handle
);
641 ret
= pool
->ops
->evict(pool
, last_handle
);
646 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
648 free_z3fold_page(page
);
651 spin_lock(&pool
->lock
);
654 z3fold_page_lock(zhdr
);
655 if ((zhdr
->first_chunks
|| zhdr
->last_chunks
||
656 zhdr
->middle_chunks
) &&
657 !(zhdr
->first_chunks
&& zhdr
->last_chunks
&&
658 zhdr
->middle_chunks
)) {
659 z3fold_compact_page(zhdr
);
660 /* add to unbuddied list */
661 spin_lock(&pool
->lock
);
662 freechunks
= num_free_chunks(zhdr
);
663 list_add(&zhdr
->buddy
,
664 &pool
->unbuddied
[freechunks
]);
665 spin_unlock(&pool
->lock
);
667 z3fold_page_unlock(zhdr
);
668 spin_lock(&pool
->lock
);
669 if (kref_put(&zhdr
->refcount
, release_z3fold_page
)) {
670 atomic64_dec(&pool
->pages_nr
);
676 * Add to the beginning of LRU.
677 * Pool lock has to be kept here to ensure the page has
678 * not already been released
680 list_add(&page
->lru
, &pool
->lru
);
682 spin_unlock(&pool
->lock
);
687 * z3fold_map() - maps the allocation associated with the given handle
688 * @pool: pool in which the allocation resides
689 * @handle: handle associated with the allocation to be mapped
691 * Extracts the buddy number from handle and constructs the pointer to the
692 * correct starting chunk within the page.
694 * Returns: a pointer to the mapped allocation
696 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
698 struct z3fold_header
*zhdr
;
703 zhdr
= handle_to_z3fold_header(handle
);
705 page
= virt_to_page(zhdr
);
707 if (test_bit(PAGE_HEADLESS
, &page
->private))
710 z3fold_page_lock(zhdr
);
711 buddy
= handle_to_buddy(handle
);
714 addr
+= ZHDR_SIZE_ALIGNED
;
717 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
718 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
721 addr
+= PAGE_SIZE
- (zhdr
->last_chunks
<< CHUNK_SHIFT
);
724 pr_err("unknown buddy id %d\n", buddy
);
730 z3fold_page_unlock(zhdr
);
736 * z3fold_unmap() - unmaps the allocation associated with the given handle
737 * @pool: pool in which the allocation resides
738 * @handle: handle associated with the allocation to be unmapped
740 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
742 struct z3fold_header
*zhdr
;
746 zhdr
= handle_to_z3fold_header(handle
);
747 page
= virt_to_page(zhdr
);
749 if (test_bit(PAGE_HEADLESS
, &page
->private))
752 z3fold_page_lock(zhdr
);
753 buddy
= handle_to_buddy(handle
);
755 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
756 z3fold_page_unlock(zhdr
);
760 * z3fold_get_pool_size() - gets the z3fold pool size in pages
761 * @pool: pool whose size is being queried
763 * Returns: size in pages of the given pool.
765 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
767 return atomic64_read(&pool
->pages_nr
);
774 static int z3fold_zpool_evict(struct z3fold_pool
*pool
, unsigned long handle
)
776 if (pool
->zpool
&& pool
->zpool_ops
&& pool
->zpool_ops
->evict
)
777 return pool
->zpool_ops
->evict(pool
->zpool
, handle
);
782 static const struct z3fold_ops z3fold_zpool_ops
= {
783 .evict
= z3fold_zpool_evict
786 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
,
787 const struct zpool_ops
*zpool_ops
,
790 struct z3fold_pool
*pool
;
792 pool
= z3fold_create_pool(gfp
, zpool_ops
? &z3fold_zpool_ops
: NULL
);
795 pool
->zpool_ops
= zpool_ops
;
800 static void z3fold_zpool_destroy(void *pool
)
802 z3fold_destroy_pool(pool
);
805 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
806 unsigned long *handle
)
808 return z3fold_alloc(pool
, size
, gfp
, handle
);
810 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
812 z3fold_free(pool
, handle
);
815 static int z3fold_zpool_shrink(void *pool
, unsigned int pages
,
816 unsigned int *reclaimed
)
818 unsigned int total
= 0;
821 while (total
< pages
) {
822 ret
= z3fold_reclaim_page(pool
, 8);
834 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
835 enum zpool_mapmode mm
)
837 return z3fold_map(pool
, handle
);
839 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
841 z3fold_unmap(pool
, handle
);
844 static u64
z3fold_zpool_total_size(void *pool
)
846 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
849 static struct zpool_driver z3fold_zpool_driver
= {
851 .owner
= THIS_MODULE
,
852 .create
= z3fold_zpool_create
,
853 .destroy
= z3fold_zpool_destroy
,
854 .malloc
= z3fold_zpool_malloc
,
855 .free
= z3fold_zpool_free
,
856 .shrink
= z3fold_zpool_shrink
,
857 .map
= z3fold_zpool_map
,
858 .unmap
= z3fold_zpool_unmap
,
859 .total_size
= z3fold_zpool_total_size
,
862 MODULE_ALIAS("zpool-z3fold");
864 static int __init
init_z3fold(void)
866 /* Make sure the z3fold header is not larger than the page size */
867 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
);
868 zpool_register_driver(&z3fold_zpool_driver
);
873 static void __exit
exit_z3fold(void)
875 zpool_unregister_driver(&z3fold_zpool_driver
);
878 module_init(init_z3fold
);
879 module_exit(exit_z3fold
);
881 MODULE_LICENSE("GPL");
882 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
883 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");