1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/wait.h>
45 #include <linux/zpool.h>
46 #include <linux/magic.h>
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
58 #define NCHUNKS_ORDER 6
60 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
67 #define BUDDY_MASK (0x3)
69 #define SLOTS_ALIGN (0x40)
76 int (*evict
)(struct z3fold_pool
*pool
, unsigned long handle
);
87 struct z3fold_buddy_slots
{
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
92 unsigned long slot
[BUDDY_MASK
+ 1];
93 unsigned long pool
; /* back link + flags */
95 #define HANDLE_FLAG_MASK (0x03)
98 * struct z3fold_header - z3fold page metadata occupying first chunks of each
99 * z3fold page, except for HEADLESS pages
100 * @buddy: links the z3fold page into the relevant list in the
102 * @page_lock: per-page lock
103 * @refcount: reference count for the z3fold page
104 * @work: work_struct for page layout optimization
105 * @slots: pointer to the structure holding buddy slots
106 * @pool: pointer to the containing pool
107 * @cpu: CPU which this page "belongs" to
108 * @first_chunks: the size of the first buddy in chunks, 0 if free
109 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
110 * @last_chunks: the size of the last buddy in chunks, 0 if free
111 * @first_num: the starting number (for the first handle)
112 * @mapped_count: the number of objects currently mapped
114 struct z3fold_header
{
115 struct list_head buddy
;
116 spinlock_t page_lock
;
117 struct kref refcount
;
118 struct work_struct work
;
119 struct z3fold_buddy_slots
*slots
;
120 struct z3fold_pool
*pool
;
122 unsigned short first_chunks
;
123 unsigned short middle_chunks
;
124 unsigned short last_chunks
;
125 unsigned short start_middle
;
126 unsigned short first_num
:2;
127 unsigned short mapped_count
:2;
131 * struct z3fold_pool - stores metadata for each z3fold pool
133 * @lock: protects pool unbuddied/lru lists
134 * @stale_lock: protects pool stale page list
135 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
136 * buddies; the list each z3fold page is added to depends on
137 * the size of its free region.
138 * @lru: list tracking the z3fold pages in LRU order by most recently
140 * @stale: list of pages marked for freeing
141 * @pages_nr: number of z3fold pages in the pool.
142 * @c_handle: cache for z3fold_buddy_slots allocation
143 * @ops: pointer to a structure of user defined operations specified at
144 * pool creation time.
145 * @compact_wq: workqueue for page layout background optimization
146 * @release_wq: workqueue for safe page release
147 * @work: work_struct for safe page release
148 * @inode: inode for z3fold pseudo filesystem
149 * @destroying: bool to stop migration once we start destruction
150 * @isolated: int to count the number of pages currently in isolation
152 * This structure is allocated at pool creation time and maintains metadata
153 * pertaining to a particular z3fold pool.
158 spinlock_t stale_lock
;
159 struct list_head
*unbuddied
;
160 struct list_head lru
;
161 struct list_head stale
;
163 struct kmem_cache
*c_handle
;
164 const struct z3fold_ops
*ops
;
166 const struct zpool_ops
*zpool_ops
;
167 struct workqueue_struct
*compact_wq
;
168 struct workqueue_struct
*release_wq
;
169 struct wait_queue_head isolate_wait
;
170 struct work_struct work
;
177 * Internal z3fold page flags
179 enum z3fold_page_flags
{
184 PAGE_CLAIMED
, /* by either reclaim or free */
191 /* Converts an allocation size in bytes to size in z3fold chunks */
192 static int size_to_chunks(size_t size
)
194 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
197 #define for_each_unbuddied_list(_iter, _begin) \
198 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
200 static void compact_page_work(struct work_struct
*w
);
202 static inline struct z3fold_buddy_slots
*alloc_slots(struct z3fold_pool
*pool
,
205 struct z3fold_buddy_slots
*slots
;
207 slots
= kmem_cache_alloc(pool
->c_handle
,
208 (gfp
& ~(__GFP_HIGHMEM
| __GFP_MOVABLE
)));
211 memset(slots
->slot
, 0, sizeof(slots
->slot
));
212 slots
->pool
= (unsigned long)pool
;
218 static inline struct z3fold_pool
*slots_to_pool(struct z3fold_buddy_slots
*s
)
220 return (struct z3fold_pool
*)(s
->pool
& ~HANDLE_FLAG_MASK
);
223 static inline struct z3fold_buddy_slots
*handle_to_slots(unsigned long handle
)
225 return (struct z3fold_buddy_slots
*)(handle
& ~(SLOTS_ALIGN
- 1));
228 static inline void free_handle(unsigned long handle
)
230 struct z3fold_buddy_slots
*slots
;
234 if (handle
& (1 << PAGE_HEADLESS
))
237 WARN_ON(*(unsigned long *)handle
== 0);
238 *(unsigned long *)handle
= 0;
239 slots
= handle_to_slots(handle
);
241 for (i
= 0; i
<= BUDDY_MASK
; i
++) {
242 if (slots
->slot
[i
]) {
249 struct z3fold_pool
*pool
= slots_to_pool(slots
);
251 kmem_cache_free(pool
->c_handle
, slots
);
255 static int z3fold_init_fs_context(struct fs_context
*fc
)
257 return init_pseudo(fc
, Z3FOLD_MAGIC
) ? 0 : -ENOMEM
;
260 static struct file_system_type z3fold_fs
= {
262 .init_fs_context
= z3fold_init_fs_context
,
263 .kill_sb
= kill_anon_super
,
266 static struct vfsmount
*z3fold_mnt
;
267 static int z3fold_mount(void)
271 z3fold_mnt
= kern_mount(&z3fold_fs
);
272 if (IS_ERR(z3fold_mnt
))
273 ret
= PTR_ERR(z3fold_mnt
);
278 static void z3fold_unmount(void)
280 kern_unmount(z3fold_mnt
);
283 static const struct address_space_operations z3fold_aops
;
284 static int z3fold_register_migration(struct z3fold_pool
*pool
)
286 pool
->inode
= alloc_anon_inode(z3fold_mnt
->mnt_sb
);
287 if (IS_ERR(pool
->inode
)) {
292 pool
->inode
->i_mapping
->private_data
= pool
;
293 pool
->inode
->i_mapping
->a_ops
= &z3fold_aops
;
297 static void z3fold_unregister_migration(struct z3fold_pool
*pool
)
303 /* Initializes the z3fold header of a newly allocated z3fold page */
304 static struct z3fold_header
*init_z3fold_page(struct page
*page
,
305 struct z3fold_pool
*pool
, gfp_t gfp
)
307 struct z3fold_header
*zhdr
= page_address(page
);
308 struct z3fold_buddy_slots
*slots
= alloc_slots(pool
, gfp
);
313 INIT_LIST_HEAD(&page
->lru
);
314 clear_bit(PAGE_HEADLESS
, &page
->private);
315 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
316 clear_bit(NEEDS_COMPACTING
, &page
->private);
317 clear_bit(PAGE_STALE
, &page
->private);
318 clear_bit(PAGE_CLAIMED
, &page
->private);
320 spin_lock_init(&zhdr
->page_lock
);
321 kref_init(&zhdr
->refcount
);
322 zhdr
->first_chunks
= 0;
323 zhdr
->middle_chunks
= 0;
324 zhdr
->last_chunks
= 0;
326 zhdr
->start_middle
= 0;
330 INIT_LIST_HEAD(&zhdr
->buddy
);
331 INIT_WORK(&zhdr
->work
, compact_page_work
);
335 /* Resets the struct page fields and frees the page */
336 static void free_z3fold_page(struct page
*page
, bool headless
)
340 __ClearPageMovable(page
);
343 ClearPagePrivate(page
);
347 /* Lock a z3fold page */
348 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
350 spin_lock(&zhdr
->page_lock
);
353 /* Try to lock a z3fold page */
354 static inline int z3fold_page_trylock(struct z3fold_header
*zhdr
)
356 return spin_trylock(&zhdr
->page_lock
);
359 /* Unlock a z3fold page */
360 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
362 spin_unlock(&zhdr
->page_lock
);
365 /* Helper function to build the index */
366 static inline int __idx(struct z3fold_header
*zhdr
, enum buddy bud
)
368 return (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
372 * Encodes the handle of a particular buddy within a z3fold page
373 * Pool lock should be held as this function accesses first_num
375 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
377 struct z3fold_buddy_slots
*slots
;
378 unsigned long h
= (unsigned long)zhdr
;
382 * For a headless page, its handle is its pointer with the extra
383 * PAGE_HEADLESS bit set
386 return h
| (1 << PAGE_HEADLESS
);
388 /* otherwise, return pointer to encoded handle */
389 idx
= __idx(zhdr
, bud
);
392 h
|= (zhdr
->last_chunks
<< BUDDY_SHIFT
);
395 slots
->slot
[idx
] = h
;
396 return (unsigned long)&slots
->slot
[idx
];
399 /* Returns the z3fold page where a given handle is stored */
400 static inline struct z3fold_header
*handle_to_z3fold_header(unsigned long h
)
402 unsigned long addr
= h
;
404 if (!(addr
& (1 << PAGE_HEADLESS
)))
405 addr
= *(unsigned long *)h
;
407 return (struct z3fold_header
*)(addr
& PAGE_MASK
);
410 /* only for LAST bud, returns zero otherwise */
411 static unsigned short handle_to_chunks(unsigned long handle
)
413 unsigned long addr
= *(unsigned long *)handle
;
415 return (addr
& ~PAGE_MASK
) >> BUDDY_SHIFT
;
419 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
420 * but that doesn't matter. because the masking will result in the
421 * correct buddy number.
423 static enum buddy
handle_to_buddy(unsigned long handle
)
425 struct z3fold_header
*zhdr
;
428 WARN_ON(handle
& (1 << PAGE_HEADLESS
));
429 addr
= *(unsigned long *)handle
;
430 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
431 return (addr
- zhdr
->first_num
) & BUDDY_MASK
;
434 static inline struct z3fold_pool
*zhdr_to_pool(struct z3fold_header
*zhdr
)
439 static void __release_z3fold_page(struct z3fold_header
*zhdr
, bool locked
)
441 struct page
*page
= virt_to_page(zhdr
);
442 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
444 WARN_ON(!list_empty(&zhdr
->buddy
));
445 set_bit(PAGE_STALE
, &page
->private);
446 clear_bit(NEEDS_COMPACTING
, &page
->private);
447 spin_lock(&pool
->lock
);
448 if (!list_empty(&page
->lru
))
449 list_del_init(&page
->lru
);
450 spin_unlock(&pool
->lock
);
452 z3fold_page_unlock(zhdr
);
453 spin_lock(&pool
->stale_lock
);
454 list_add(&zhdr
->buddy
, &pool
->stale
);
455 queue_work(pool
->release_wq
, &pool
->work
);
456 spin_unlock(&pool
->stale_lock
);
459 static void __attribute__((__unused__
))
460 release_z3fold_page(struct kref
*ref
)
462 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
464 __release_z3fold_page(zhdr
, false);
467 static void release_z3fold_page_locked(struct kref
*ref
)
469 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
471 WARN_ON(z3fold_page_trylock(zhdr
));
472 __release_z3fold_page(zhdr
, true);
475 static void release_z3fold_page_locked_list(struct kref
*ref
)
477 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
479 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
480 spin_lock(&pool
->lock
);
481 list_del_init(&zhdr
->buddy
);
482 spin_unlock(&pool
->lock
);
484 WARN_ON(z3fold_page_trylock(zhdr
));
485 __release_z3fold_page(zhdr
, true);
488 static void free_pages_work(struct work_struct
*w
)
490 struct z3fold_pool
*pool
= container_of(w
, struct z3fold_pool
, work
);
492 spin_lock(&pool
->stale_lock
);
493 while (!list_empty(&pool
->stale
)) {
494 struct z3fold_header
*zhdr
= list_first_entry(&pool
->stale
,
495 struct z3fold_header
, buddy
);
496 struct page
*page
= virt_to_page(zhdr
);
498 list_del(&zhdr
->buddy
);
499 if (WARN_ON(!test_bit(PAGE_STALE
, &page
->private)))
501 spin_unlock(&pool
->stale_lock
);
502 cancel_work_sync(&zhdr
->work
);
503 free_z3fold_page(page
, false);
505 spin_lock(&pool
->stale_lock
);
507 spin_unlock(&pool
->stale_lock
);
511 * Returns the number of free chunks in a z3fold page.
512 * NB: can't be used with HEADLESS pages.
514 static int num_free_chunks(struct z3fold_header
*zhdr
)
518 * If there is a middle object, pick up the bigger free space
519 * either before or after it. Otherwise just subtract the number
520 * of chunks occupied by the first and the last objects.
522 if (zhdr
->middle_chunks
!= 0) {
523 int nfree_before
= zhdr
->first_chunks
?
524 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
525 int nfree_after
= zhdr
->last_chunks
?
527 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
528 nfree
= max(nfree_before
, nfree_after
);
530 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
534 /* Add to the appropriate unbuddied list */
535 static inline void add_to_unbuddied(struct z3fold_pool
*pool
,
536 struct z3fold_header
*zhdr
)
538 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
539 zhdr
->middle_chunks
== 0) {
540 struct list_head
*unbuddied
= get_cpu_ptr(pool
->unbuddied
);
542 int freechunks
= num_free_chunks(zhdr
);
543 spin_lock(&pool
->lock
);
544 list_add(&zhdr
->buddy
, &unbuddied
[freechunks
]);
545 spin_unlock(&pool
->lock
);
546 zhdr
->cpu
= smp_processor_id();
547 put_cpu_ptr(pool
->unbuddied
);
551 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
552 unsigned short dst_chunk
)
555 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
556 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
557 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
560 #define BIG_CHUNK_GAP 3
561 /* Has to be called with lock held */
562 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
564 struct page
*page
= virt_to_page(zhdr
);
566 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
567 return 0; /* can't move middle chunk, it's used */
569 if (unlikely(PageIsolated(page
)))
572 if (zhdr
->middle_chunks
== 0)
573 return 0; /* nothing to compact */
575 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
576 /* move to the beginning */
577 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
578 zhdr
->first_chunks
= zhdr
->middle_chunks
;
579 zhdr
->middle_chunks
= 0;
580 zhdr
->start_middle
= 0;
586 * moving data is expensive, so let's only do that if
587 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
589 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
590 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
592 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
593 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
595 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
596 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
597 + zhdr
->middle_chunks
) >=
599 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
601 mchunk_memmove(zhdr
, new_start
);
602 zhdr
->start_middle
= new_start
;
609 static void do_compact_page(struct z3fold_header
*zhdr
, bool locked
)
611 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
614 page
= virt_to_page(zhdr
);
616 WARN_ON(z3fold_page_trylock(zhdr
));
618 z3fold_page_lock(zhdr
);
619 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING
, &page
->private))) {
620 z3fold_page_unlock(zhdr
);
623 spin_lock(&pool
->lock
);
624 list_del_init(&zhdr
->buddy
);
625 spin_unlock(&pool
->lock
);
627 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
628 atomic64_dec(&pool
->pages_nr
);
632 if (unlikely(PageIsolated(page
) ||
633 test_bit(PAGE_STALE
, &page
->private))) {
634 z3fold_page_unlock(zhdr
);
638 z3fold_compact_page(zhdr
);
639 add_to_unbuddied(pool
, zhdr
);
640 z3fold_page_unlock(zhdr
);
643 static void compact_page_work(struct work_struct
*w
)
645 struct z3fold_header
*zhdr
= container_of(w
, struct z3fold_header
,
648 do_compact_page(zhdr
, false);
651 /* returns _locked_ z3fold page header or NULL */
652 static inline struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*pool
,
653 size_t size
, bool can_sleep
)
655 struct z3fold_header
*zhdr
= NULL
;
657 struct list_head
*unbuddied
;
658 int chunks
= size_to_chunks(size
), i
;
661 /* First, try to find an unbuddied z3fold page. */
662 unbuddied
= get_cpu_ptr(pool
->unbuddied
);
663 for_each_unbuddied_list(i
, chunks
) {
664 struct list_head
*l
= &unbuddied
[i
];
666 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
667 struct z3fold_header
, buddy
);
672 /* Re-check under lock. */
673 spin_lock(&pool
->lock
);
675 if (unlikely(zhdr
!= list_first_entry(READ_ONCE(l
),
676 struct z3fold_header
, buddy
)) ||
677 !z3fold_page_trylock(zhdr
)) {
678 spin_unlock(&pool
->lock
);
680 put_cpu_ptr(pool
->unbuddied
);
685 list_del_init(&zhdr
->buddy
);
687 spin_unlock(&pool
->lock
);
689 page
= virt_to_page(zhdr
);
690 if (test_bit(NEEDS_COMPACTING
, &page
->private)) {
691 z3fold_page_unlock(zhdr
);
693 put_cpu_ptr(pool
->unbuddied
);
700 * this page could not be removed from its unbuddied
701 * list while pool lock was held, and then we've taken
702 * page lock so kref_put could not be called before
703 * we got here, so it's safe to just call kref_get()
705 kref_get(&zhdr
->refcount
);
708 put_cpu_ptr(pool
->unbuddied
);
713 /* look for _exact_ match on other cpus' lists */
714 for_each_online_cpu(cpu
) {
717 unbuddied
= per_cpu_ptr(pool
->unbuddied
, cpu
);
718 spin_lock(&pool
->lock
);
719 l
= &unbuddied
[chunks
];
721 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
722 struct z3fold_header
, buddy
);
724 if (!zhdr
|| !z3fold_page_trylock(zhdr
)) {
725 spin_unlock(&pool
->lock
);
729 list_del_init(&zhdr
->buddy
);
731 spin_unlock(&pool
->lock
);
733 page
= virt_to_page(zhdr
);
734 if (test_bit(NEEDS_COMPACTING
, &page
->private)) {
735 z3fold_page_unlock(zhdr
);
741 kref_get(&zhdr
->refcount
);
754 * z3fold_create_pool() - create a new z3fold pool
756 * @gfp: gfp flags when allocating the z3fold pool structure
757 * @ops: user-defined operations for the z3fold pool
759 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
762 static struct z3fold_pool
*z3fold_create_pool(const char *name
, gfp_t gfp
,
763 const struct z3fold_ops
*ops
)
765 struct z3fold_pool
*pool
= NULL
;
768 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
771 pool
->c_handle
= kmem_cache_create("z3fold_handle",
772 sizeof(struct z3fold_buddy_slots
),
773 SLOTS_ALIGN
, 0, NULL
);
776 spin_lock_init(&pool
->lock
);
777 spin_lock_init(&pool
->stale_lock
);
778 init_waitqueue_head(&pool
->isolate_wait
);
779 pool
->unbuddied
= __alloc_percpu(sizeof(struct list_head
)*NCHUNKS
, 2);
780 if (!pool
->unbuddied
)
782 for_each_possible_cpu(cpu
) {
783 struct list_head
*unbuddied
=
784 per_cpu_ptr(pool
->unbuddied
, cpu
);
785 for_each_unbuddied_list(i
, 0)
786 INIT_LIST_HEAD(&unbuddied
[i
]);
788 INIT_LIST_HEAD(&pool
->lru
);
789 INIT_LIST_HEAD(&pool
->stale
);
790 atomic64_set(&pool
->pages_nr
, 0);
792 pool
->compact_wq
= create_singlethread_workqueue(pool
->name
);
793 if (!pool
->compact_wq
)
795 pool
->release_wq
= create_singlethread_workqueue(pool
->name
);
796 if (!pool
->release_wq
)
798 if (z3fold_register_migration(pool
))
800 INIT_WORK(&pool
->work
, free_pages_work
);
805 destroy_workqueue(pool
->release_wq
);
807 destroy_workqueue(pool
->compact_wq
);
809 free_percpu(pool
->unbuddied
);
811 kmem_cache_destroy(pool
->c_handle
);
818 static bool pool_isolated_are_drained(struct z3fold_pool
*pool
)
822 spin_lock(&pool
->lock
);
823 ret
= pool
->isolated
== 0;
824 spin_unlock(&pool
->lock
);
828 * z3fold_destroy_pool() - destroys an existing z3fold pool
829 * @pool: the z3fold pool to be destroyed
831 * The pool should be emptied before this function is called.
833 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
835 kmem_cache_destroy(pool
->c_handle
);
837 * We set pool-> destroying under lock to ensure that
838 * z3fold_page_isolate() sees any changes to destroying. This way we
839 * avoid the need for any memory barriers.
842 spin_lock(&pool
->lock
);
843 pool
->destroying
= true;
844 spin_unlock(&pool
->lock
);
847 * We need to ensure that no pages are being migrated while we destroy
848 * these workqueues, as migration can queue work on either of the
851 wait_event(pool
->isolate_wait
, !pool_isolated_are_drained(pool
));
854 * We need to destroy pool->compact_wq before pool->release_wq,
855 * as any pending work on pool->compact_wq will call
856 * queue_work(pool->release_wq, &pool->work).
858 * There are still outstanding pages until both workqueues are drained,
859 * so we cannot unregister migration until then.
862 destroy_workqueue(pool
->compact_wq
);
863 destroy_workqueue(pool
->release_wq
);
864 z3fold_unregister_migration(pool
);
869 * z3fold_alloc() - allocates a region of a given size
870 * @pool: z3fold pool from which to allocate
871 * @size: size in bytes of the desired allocation
872 * @gfp: gfp flags used if the pool needs to grow
873 * @handle: handle of the new allocation
875 * This function will attempt to find a free region in the pool large enough to
876 * satisfy the allocation request. A search of the unbuddied lists is
877 * performed first. If no suitable free region is found, then a new page is
878 * allocated and added to the pool to satisfy the request.
880 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
881 * as z3fold pool pages.
883 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
884 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
887 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
888 unsigned long *handle
)
890 int chunks
= size_to_chunks(size
);
891 struct z3fold_header
*zhdr
= NULL
;
892 struct page
*page
= NULL
;
894 bool can_sleep
= gfpflags_allow_blocking(gfp
);
899 if (size
> PAGE_SIZE
)
902 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
906 zhdr
= __z3fold_alloc(pool
, size
, can_sleep
);
908 if (zhdr
->first_chunks
== 0) {
909 if (zhdr
->middle_chunks
!= 0 &&
910 chunks
>= zhdr
->start_middle
)
914 } else if (zhdr
->last_chunks
== 0)
916 else if (zhdr
->middle_chunks
== 0)
919 if (kref_put(&zhdr
->refcount
,
920 release_z3fold_page_locked
))
921 atomic64_dec(&pool
->pages_nr
);
923 z3fold_page_unlock(zhdr
);
924 pr_err("No free chunks in unbuddied\n");
928 page
= virt_to_page(zhdr
);
936 spin_lock(&pool
->stale_lock
);
937 zhdr
= list_first_entry_or_null(&pool
->stale
,
938 struct z3fold_header
, buddy
);
940 * Before allocating a page, let's see if we can take one from
941 * the stale pages list. cancel_work_sync() can sleep so we
942 * limit this case to the contexts where we can sleep
945 list_del(&zhdr
->buddy
);
946 spin_unlock(&pool
->stale_lock
);
947 cancel_work_sync(&zhdr
->work
);
948 page
= virt_to_page(zhdr
);
950 spin_unlock(&pool
->stale_lock
);
954 page
= alloc_page(gfp
);
959 zhdr
= init_z3fold_page(page
, pool
, gfp
);
964 atomic64_inc(&pool
->pages_nr
);
966 if (bud
== HEADLESS
) {
967 set_bit(PAGE_HEADLESS
, &page
->private);
972 __SetPageMovable(page
, pool
->inode
->i_mapping
);
975 if (trylock_page(page
)) {
976 __SetPageMovable(page
, pool
->inode
->i_mapping
);
980 z3fold_page_lock(zhdr
);
984 zhdr
->first_chunks
= chunks
;
985 else if (bud
== LAST
)
986 zhdr
->last_chunks
= chunks
;
988 zhdr
->middle_chunks
= chunks
;
989 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
991 add_to_unbuddied(pool
, zhdr
);
994 spin_lock(&pool
->lock
);
995 /* Add/move z3fold page to beginning of LRU */
996 if (!list_empty(&page
->lru
))
997 list_del(&page
->lru
);
999 list_add(&page
->lru
, &pool
->lru
);
1001 *handle
= encode_handle(zhdr
, bud
);
1002 spin_unlock(&pool
->lock
);
1003 if (bud
!= HEADLESS
)
1004 z3fold_page_unlock(zhdr
);
1010 * z3fold_free() - frees the allocation associated with the given handle
1011 * @pool: pool in which the allocation resided
1012 * @handle: handle associated with the allocation returned by z3fold_alloc()
1014 * In the case that the z3fold page in which the allocation resides is under
1015 * reclaim, as indicated by the PG_reclaim flag being set, this function
1016 * only sets the first|last_chunks to 0. The page is actually freed
1017 * once both buddies are evicted (see z3fold_reclaim_page() below).
1019 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
1021 struct z3fold_header
*zhdr
;
1025 zhdr
= handle_to_z3fold_header(handle
);
1026 page
= virt_to_page(zhdr
);
1028 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1029 /* if a headless page is under reclaim, just leave.
1030 * NB: we use test_and_set_bit for a reason: if the bit
1031 * has not been set before, we release this page
1032 * immediately so we don't care about its value any more.
1034 if (!test_and_set_bit(PAGE_CLAIMED
, &page
->private)) {
1035 spin_lock(&pool
->lock
);
1036 list_del(&page
->lru
);
1037 spin_unlock(&pool
->lock
);
1038 free_z3fold_page(page
, true);
1039 atomic64_dec(&pool
->pages_nr
);
1044 /* Non-headless case */
1045 z3fold_page_lock(zhdr
);
1046 bud
= handle_to_buddy(handle
);
1050 zhdr
->first_chunks
= 0;
1053 zhdr
->middle_chunks
= 0;
1056 zhdr
->last_chunks
= 0;
1059 pr_err("%s: unknown bud %d\n", __func__
, bud
);
1061 z3fold_page_unlock(zhdr
);
1065 free_handle(handle
);
1066 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked_list
)) {
1067 atomic64_dec(&pool
->pages_nr
);
1070 if (test_bit(PAGE_CLAIMED
, &page
->private)) {
1071 z3fold_page_unlock(zhdr
);
1074 if (unlikely(PageIsolated(page
)) ||
1075 test_and_set_bit(NEEDS_COMPACTING
, &page
->private)) {
1076 z3fold_page_unlock(zhdr
);
1079 if (zhdr
->cpu
< 0 || !cpu_online(zhdr
->cpu
)) {
1080 spin_lock(&pool
->lock
);
1081 list_del_init(&zhdr
->buddy
);
1082 spin_unlock(&pool
->lock
);
1084 kref_get(&zhdr
->refcount
);
1085 do_compact_page(zhdr
, true);
1088 kref_get(&zhdr
->refcount
);
1089 queue_work_on(zhdr
->cpu
, pool
->compact_wq
, &zhdr
->work
);
1090 z3fold_page_unlock(zhdr
);
1094 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1095 * @pool: pool from which a page will attempt to be evicted
1096 * @retries: number of pages on the LRU list for which eviction will
1097 * be attempted before failing
1099 * z3fold reclaim is different from normal system reclaim in that it is done
1100 * from the bottom, up. This is because only the bottom layer, z3fold, has
1101 * information on how the allocations are organized within each z3fold page.
1102 * This has the potential to create interesting locking situations between
1103 * z3fold and the user, however.
1105 * To avoid these, this is how z3fold_reclaim_page() should be called:
1107 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1108 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1109 * call the user-defined eviction handler with the pool and handle as
1112 * If the handle can not be evicted, the eviction handler should return
1113 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1114 * appropriate list and try the next z3fold page on the LRU up to
1115 * a user defined number of retries.
1117 * If the handle is successfully evicted, the eviction handler should
1118 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1119 * contains logic to delay freeing the page if the page is under reclaim,
1120 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1122 * If all buddies in the z3fold page are successfully evicted, then the
1123 * z3fold page can be freed.
1125 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1126 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1127 * the retry limit was hit.
1129 static int z3fold_reclaim_page(struct z3fold_pool
*pool
, unsigned int retries
)
1132 struct z3fold_header
*zhdr
= NULL
;
1133 struct page
*page
= NULL
;
1134 struct list_head
*pos
;
1135 unsigned long first_handle
= 0, middle_handle
= 0, last_handle
= 0;
1137 spin_lock(&pool
->lock
);
1138 if (!pool
->ops
|| !pool
->ops
->evict
|| retries
== 0) {
1139 spin_unlock(&pool
->lock
);
1142 for (i
= 0; i
< retries
; i
++) {
1143 if (list_empty(&pool
->lru
)) {
1144 spin_unlock(&pool
->lock
);
1147 list_for_each_prev(pos
, &pool
->lru
) {
1148 page
= list_entry(pos
, struct page
, lru
);
1150 /* this bit could have been set by free, in which case
1151 * we pass over to the next page in the pool.
1153 if (test_and_set_bit(PAGE_CLAIMED
, &page
->private))
1156 if (unlikely(PageIsolated(page
)))
1158 if (test_bit(PAGE_HEADLESS
, &page
->private))
1161 zhdr
= page_address(page
);
1162 if (!z3fold_page_trylock(zhdr
)) {
1164 continue; /* can't evict at this point */
1166 kref_get(&zhdr
->refcount
);
1167 list_del_init(&zhdr
->buddy
);
1175 list_del_init(&page
->lru
);
1176 spin_unlock(&pool
->lock
);
1178 if (!test_bit(PAGE_HEADLESS
, &page
->private)) {
1180 * We need encode the handles before unlocking, since
1181 * we can race with free that will set
1182 * (first|last)_chunks to 0
1187 if (zhdr
->first_chunks
)
1188 first_handle
= encode_handle(zhdr
, FIRST
);
1189 if (zhdr
->middle_chunks
)
1190 middle_handle
= encode_handle(zhdr
, MIDDLE
);
1191 if (zhdr
->last_chunks
)
1192 last_handle
= encode_handle(zhdr
, LAST
);
1194 * it's safe to unlock here because we hold a
1195 * reference to this page
1197 z3fold_page_unlock(zhdr
);
1199 first_handle
= encode_handle(zhdr
, HEADLESS
);
1200 last_handle
= middle_handle
= 0;
1203 /* Issue the eviction callback(s) */
1204 if (middle_handle
) {
1205 ret
= pool
->ops
->evict(pool
, middle_handle
);
1210 ret
= pool
->ops
->evict(pool
, first_handle
);
1215 ret
= pool
->ops
->evict(pool
, last_handle
);
1220 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1222 free_z3fold_page(page
, true);
1223 atomic64_dec(&pool
->pages_nr
);
1226 spin_lock(&pool
->lock
);
1227 list_add(&page
->lru
, &pool
->lru
);
1228 spin_unlock(&pool
->lock
);
1230 z3fold_page_lock(zhdr
);
1231 clear_bit(PAGE_CLAIMED
, &page
->private);
1232 if (kref_put(&zhdr
->refcount
,
1233 release_z3fold_page_locked
)) {
1234 atomic64_dec(&pool
->pages_nr
);
1238 * if we are here, the page is still not completely
1239 * free. Take the global pool lock then to be able
1240 * to add it back to the lru list
1242 spin_lock(&pool
->lock
);
1243 list_add(&page
->lru
, &pool
->lru
);
1244 spin_unlock(&pool
->lock
);
1245 z3fold_page_unlock(zhdr
);
1248 /* We started off locked to we need to lock the pool back */
1249 spin_lock(&pool
->lock
);
1251 spin_unlock(&pool
->lock
);
1256 * z3fold_map() - maps the allocation associated with the given handle
1257 * @pool: pool in which the allocation resides
1258 * @handle: handle associated with the allocation to be mapped
1260 * Extracts the buddy number from handle and constructs the pointer to the
1261 * correct starting chunk within the page.
1263 * Returns: a pointer to the mapped allocation
1265 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
1267 struct z3fold_header
*zhdr
;
1272 zhdr
= handle_to_z3fold_header(handle
);
1274 page
= virt_to_page(zhdr
);
1276 if (test_bit(PAGE_HEADLESS
, &page
->private))
1279 z3fold_page_lock(zhdr
);
1280 buddy
= handle_to_buddy(handle
);
1283 addr
+= ZHDR_SIZE_ALIGNED
;
1286 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
1287 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1290 addr
+= PAGE_SIZE
- (handle_to_chunks(handle
) << CHUNK_SHIFT
);
1293 pr_err("unknown buddy id %d\n", buddy
);
1300 zhdr
->mapped_count
++;
1301 z3fold_page_unlock(zhdr
);
1307 * z3fold_unmap() - unmaps the allocation associated with the given handle
1308 * @pool: pool in which the allocation resides
1309 * @handle: handle associated with the allocation to be unmapped
1311 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
1313 struct z3fold_header
*zhdr
;
1317 zhdr
= handle_to_z3fold_header(handle
);
1318 page
= virt_to_page(zhdr
);
1320 if (test_bit(PAGE_HEADLESS
, &page
->private))
1323 z3fold_page_lock(zhdr
);
1324 buddy
= handle_to_buddy(handle
);
1325 if (buddy
== MIDDLE
)
1326 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1327 zhdr
->mapped_count
--;
1328 z3fold_page_unlock(zhdr
);
1332 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1333 * @pool: pool whose size is being queried
1335 * Returns: size in pages of the given pool.
1337 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
1339 return atomic64_read(&pool
->pages_nr
);
1343 * z3fold_dec_isolated() expects to be called while pool->lock is held.
1345 static void z3fold_dec_isolated(struct z3fold_pool
*pool
)
1347 assert_spin_locked(&pool
->lock
);
1348 VM_BUG_ON(pool
->isolated
<= 0);
1352 * If we have no more isolated pages, we have to see if
1353 * z3fold_destroy_pool() is waiting for a signal.
1355 if (pool
->isolated
== 0 && waitqueue_active(&pool
->isolate_wait
))
1356 wake_up_all(&pool
->isolate_wait
);
1359 static void z3fold_inc_isolated(struct z3fold_pool
*pool
)
1364 static bool z3fold_page_isolate(struct page
*page
, isolate_mode_t mode
)
1366 struct z3fold_header
*zhdr
;
1367 struct z3fold_pool
*pool
;
1369 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1370 VM_BUG_ON_PAGE(PageIsolated(page
), page
);
1372 if (test_bit(PAGE_HEADLESS
, &page
->private))
1375 zhdr
= page_address(page
);
1376 z3fold_page_lock(zhdr
);
1377 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
1378 test_bit(PAGE_STALE
, &page
->private))
1381 pool
= zhdr_to_pool(zhdr
);
1383 if (zhdr
->mapped_count
== 0) {
1384 kref_get(&zhdr
->refcount
);
1385 if (!list_empty(&zhdr
->buddy
))
1386 list_del_init(&zhdr
->buddy
);
1387 spin_lock(&pool
->lock
);
1388 if (!list_empty(&page
->lru
))
1389 list_del(&page
->lru
);
1391 * We need to check for destruction while holding pool->lock, as
1392 * otherwise destruction could see 0 isolated pages, and
1395 if (unlikely(pool
->destroying
)) {
1396 spin_unlock(&pool
->lock
);
1398 * If this page isn't stale, somebody else holds a
1399 * reference to it. Let't drop our refcount so that they
1400 * can call the release logic.
1402 if (unlikely(kref_put(&zhdr
->refcount
,
1403 release_z3fold_page_locked
))) {
1405 * If we get here we have kref problems, so we
1408 WARN(1, "Z3fold is experiencing kref problems\n");
1409 z3fold_page_unlock(zhdr
);
1412 z3fold_page_unlock(zhdr
);
1417 z3fold_inc_isolated(pool
);
1418 spin_unlock(&pool
->lock
);
1419 z3fold_page_unlock(zhdr
);
1423 z3fold_page_unlock(zhdr
);
1427 static int z3fold_page_migrate(struct address_space
*mapping
, struct page
*newpage
,
1428 struct page
*page
, enum migrate_mode mode
)
1430 struct z3fold_header
*zhdr
, *new_zhdr
;
1431 struct z3fold_pool
*pool
;
1432 struct address_space
*new_mapping
;
1434 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1435 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1436 VM_BUG_ON_PAGE(!PageLocked(newpage
), newpage
);
1438 zhdr
= page_address(page
);
1439 pool
= zhdr_to_pool(zhdr
);
1441 if (!z3fold_page_trylock(zhdr
)) {
1444 if (zhdr
->mapped_count
!= 0) {
1445 z3fold_page_unlock(zhdr
);
1448 if (work_pending(&zhdr
->work
)) {
1449 z3fold_page_unlock(zhdr
);
1452 new_zhdr
= page_address(newpage
);
1453 memcpy(new_zhdr
, zhdr
, PAGE_SIZE
);
1454 newpage
->private = page
->private;
1456 z3fold_page_unlock(zhdr
);
1457 spin_lock_init(&new_zhdr
->page_lock
);
1458 INIT_WORK(&new_zhdr
->work
, compact_page_work
);
1460 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1461 * so we only have to reinitialize it.
1463 INIT_LIST_HEAD(&new_zhdr
->buddy
);
1464 new_mapping
= page_mapping(page
);
1465 __ClearPageMovable(page
);
1466 ClearPagePrivate(page
);
1469 z3fold_page_lock(new_zhdr
);
1470 if (new_zhdr
->first_chunks
)
1471 encode_handle(new_zhdr
, FIRST
);
1472 if (new_zhdr
->last_chunks
)
1473 encode_handle(new_zhdr
, LAST
);
1474 if (new_zhdr
->middle_chunks
)
1475 encode_handle(new_zhdr
, MIDDLE
);
1476 set_bit(NEEDS_COMPACTING
, &newpage
->private);
1477 new_zhdr
->cpu
= smp_processor_id();
1478 spin_lock(&pool
->lock
);
1479 list_add(&newpage
->lru
, &pool
->lru
);
1480 spin_unlock(&pool
->lock
);
1481 __SetPageMovable(newpage
, new_mapping
);
1482 z3fold_page_unlock(new_zhdr
);
1484 queue_work_on(new_zhdr
->cpu
, pool
->compact_wq
, &new_zhdr
->work
);
1486 spin_lock(&pool
->lock
);
1487 z3fold_dec_isolated(pool
);
1488 spin_unlock(&pool
->lock
);
1490 page_mapcount_reset(page
);
1495 static void z3fold_page_putback(struct page
*page
)
1497 struct z3fold_header
*zhdr
;
1498 struct z3fold_pool
*pool
;
1500 zhdr
= page_address(page
);
1501 pool
= zhdr_to_pool(zhdr
);
1503 z3fold_page_lock(zhdr
);
1504 if (!list_empty(&zhdr
->buddy
))
1505 list_del_init(&zhdr
->buddy
);
1506 INIT_LIST_HEAD(&page
->lru
);
1507 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
1508 atomic64_dec(&pool
->pages_nr
);
1509 spin_lock(&pool
->lock
);
1510 z3fold_dec_isolated(pool
);
1511 spin_unlock(&pool
->lock
);
1514 spin_lock(&pool
->lock
);
1515 list_add(&page
->lru
, &pool
->lru
);
1516 z3fold_dec_isolated(pool
);
1517 spin_unlock(&pool
->lock
);
1518 z3fold_page_unlock(zhdr
);
1521 static const struct address_space_operations z3fold_aops
= {
1522 .isolate_page
= z3fold_page_isolate
,
1523 .migratepage
= z3fold_page_migrate
,
1524 .putback_page
= z3fold_page_putback
,
1531 static int z3fold_zpool_evict(struct z3fold_pool
*pool
, unsigned long handle
)
1533 if (pool
->zpool
&& pool
->zpool_ops
&& pool
->zpool_ops
->evict
)
1534 return pool
->zpool_ops
->evict(pool
->zpool
, handle
);
1539 static const struct z3fold_ops z3fold_zpool_ops
= {
1540 .evict
= z3fold_zpool_evict
1543 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
,
1544 const struct zpool_ops
*zpool_ops
,
1545 struct zpool
*zpool
)
1547 struct z3fold_pool
*pool
;
1549 pool
= z3fold_create_pool(name
, gfp
,
1550 zpool_ops
? &z3fold_zpool_ops
: NULL
);
1552 pool
->zpool
= zpool
;
1553 pool
->zpool_ops
= zpool_ops
;
1558 static void z3fold_zpool_destroy(void *pool
)
1560 z3fold_destroy_pool(pool
);
1563 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
1564 unsigned long *handle
)
1566 return z3fold_alloc(pool
, size
, gfp
, handle
);
1568 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
1570 z3fold_free(pool
, handle
);
1573 static int z3fold_zpool_shrink(void *pool
, unsigned int pages
,
1574 unsigned int *reclaimed
)
1576 unsigned int total
= 0;
1579 while (total
< pages
) {
1580 ret
= z3fold_reclaim_page(pool
, 8);
1592 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
1593 enum zpool_mapmode mm
)
1595 return z3fold_map(pool
, handle
);
1597 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
1599 z3fold_unmap(pool
, handle
);
1602 static u64
z3fold_zpool_total_size(void *pool
)
1604 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
1607 static struct zpool_driver z3fold_zpool_driver
= {
1609 .owner
= THIS_MODULE
,
1610 .create
= z3fold_zpool_create
,
1611 .destroy
= z3fold_zpool_destroy
,
1612 .malloc
= z3fold_zpool_malloc
,
1613 .free
= z3fold_zpool_free
,
1614 .shrink
= z3fold_zpool_shrink
,
1615 .map
= z3fold_zpool_map
,
1616 .unmap
= z3fold_zpool_unmap
,
1617 .total_size
= z3fold_zpool_total_size
,
1620 MODULE_ALIAS("zpool-z3fold");
1622 static int __init
init_z3fold(void)
1626 /* Make sure the z3fold header is not larger than the page size */
1627 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
);
1628 ret
= z3fold_mount();
1632 zpool_register_driver(&z3fold_zpool_driver
);
1637 static void __exit
exit_z3fold(void)
1640 zpool_unregister_driver(&z3fold_zpool_driver
);
1643 module_init(init_z3fold
);
1644 module_exit(exit_z3fold
);
1646 MODULE_LICENSE("GPL");
1647 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1648 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");