1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/dcache.h>
30 #include <linux/list.h>
32 #include <linux/module.h>
33 #include <linux/page-flags.h>
34 #include <linux/migrate.h>
35 #include <linux/node.h>
36 #include <linux/compaction.h>
37 #include <linux/percpu.h>
38 #include <linux/mount.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
47 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
48 * adjusting internal fragmentation. It also determines the number of
49 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
50 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
51 * in the beginning of an allocated page are occupied by z3fold header, so
52 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
53 * which shows the max number of free chunks in z3fold page, also there will
54 * be 63, or 62, respectively, freelists per pool.
56 #define NCHUNKS_ORDER 6
58 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
59 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
60 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
61 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
62 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
63 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
65 #define BUDDY_MASK (0x3)
67 #define SLOTS_ALIGN (0x40)
74 int (*evict
)(struct z3fold_pool
*pool
, unsigned long handle
);
85 struct z3fold_buddy_slots
{
87 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
88 * be enough slots to hold all possible variants
90 unsigned long slot
[BUDDY_MASK
+ 1];
91 unsigned long pool
; /* back link + flags */
93 #define HANDLE_FLAG_MASK (0x03)
96 * struct z3fold_header - z3fold page metadata occupying first chunks of each
97 * z3fold page, except for HEADLESS pages
98 * @buddy: links the z3fold page into the relevant list in the
100 * @page_lock: per-page lock
101 * @refcount: reference count for the z3fold page
102 * @work: work_struct for page layout optimization
103 * @slots: pointer to the structure holding buddy slots
104 * @cpu: CPU which this page "belongs" to
105 * @first_chunks: the size of the first buddy in chunks, 0 if free
106 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
107 * @last_chunks: the size of the last buddy in chunks, 0 if free
108 * @first_num: the starting number (for the first handle)
109 * @mapped_count: the number of objects currently mapped
111 struct z3fold_header
{
112 struct list_head buddy
;
113 spinlock_t page_lock
;
114 struct kref refcount
;
115 struct work_struct work
;
116 struct z3fold_buddy_slots
*slots
;
118 unsigned short first_chunks
;
119 unsigned short middle_chunks
;
120 unsigned short last_chunks
;
121 unsigned short start_middle
;
122 unsigned short first_num
:2;
123 unsigned short mapped_count
:2;
127 * struct z3fold_pool - stores metadata for each z3fold pool
129 * @lock: protects pool unbuddied/lru lists
130 * @stale_lock: protects pool stale page list
131 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
132 * buddies; the list each z3fold page is added to depends on
133 * the size of its free region.
134 * @lru: list tracking the z3fold pages in LRU order by most recently
136 * @stale: list of pages marked for freeing
137 * @pages_nr: number of z3fold pages in the pool.
138 * @c_handle: cache for z3fold_buddy_slots allocation
139 * @ops: pointer to a structure of user defined operations specified at
140 * pool creation time.
141 * @compact_wq: workqueue for page layout background optimization
142 * @release_wq: workqueue for safe page release
143 * @work: work_struct for safe page release
144 * @inode: inode for z3fold pseudo filesystem
146 * This structure is allocated at pool creation time and maintains metadata
147 * pertaining to a particular z3fold pool.
152 spinlock_t stale_lock
;
153 struct list_head
*unbuddied
;
154 struct list_head lru
;
155 struct list_head stale
;
157 struct kmem_cache
*c_handle
;
158 const struct z3fold_ops
*ops
;
160 const struct zpool_ops
*zpool_ops
;
161 struct workqueue_struct
*compact_wq
;
162 struct workqueue_struct
*release_wq
;
163 struct work_struct work
;
168 * Internal z3fold page flags
170 enum z3fold_page_flags
{
175 PAGE_CLAIMED
, /* by either reclaim or free */
182 /* Converts an allocation size in bytes to size in z3fold chunks */
183 static int size_to_chunks(size_t size
)
185 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
188 #define for_each_unbuddied_list(_iter, _begin) \
189 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
191 static void compact_page_work(struct work_struct
*w
);
193 static inline struct z3fold_buddy_slots
*alloc_slots(struct z3fold_pool
*pool
,
196 struct z3fold_buddy_slots
*slots
= kmem_cache_alloc(pool
->c_handle
,
200 memset(slots
->slot
, 0, sizeof(slots
->slot
));
201 slots
->pool
= (unsigned long)pool
;
207 static inline struct z3fold_pool
*slots_to_pool(struct z3fold_buddy_slots
*s
)
209 return (struct z3fold_pool
*)(s
->pool
& ~HANDLE_FLAG_MASK
);
212 static inline struct z3fold_buddy_slots
*handle_to_slots(unsigned long handle
)
214 return (struct z3fold_buddy_slots
*)(handle
& ~(SLOTS_ALIGN
- 1));
217 static inline void free_handle(unsigned long handle
)
219 struct z3fold_buddy_slots
*slots
;
223 if (handle
& (1 << PAGE_HEADLESS
))
226 WARN_ON(*(unsigned long *)handle
== 0);
227 *(unsigned long *)handle
= 0;
228 slots
= handle_to_slots(handle
);
230 for (i
= 0; i
<= BUDDY_MASK
; i
++) {
231 if (slots
->slot
[i
]) {
238 struct z3fold_pool
*pool
= slots_to_pool(slots
);
240 kmem_cache_free(pool
->c_handle
, slots
);
244 static struct dentry
*z3fold_do_mount(struct file_system_type
*fs_type
,
245 int flags
, const char *dev_name
, void *data
)
247 static const struct dentry_operations ops
= {
248 .d_dname
= simple_dname
,
251 return mount_pseudo(fs_type
, "z3fold:", NULL
, &ops
, 0x33);
254 static struct file_system_type z3fold_fs
= {
256 .mount
= z3fold_do_mount
,
257 .kill_sb
= kill_anon_super
,
260 static struct vfsmount
*z3fold_mnt
;
261 static int z3fold_mount(void)
265 z3fold_mnt
= kern_mount(&z3fold_fs
);
266 if (IS_ERR(z3fold_mnt
))
267 ret
= PTR_ERR(z3fold_mnt
);
272 static void z3fold_unmount(void)
274 kern_unmount(z3fold_mnt
);
277 static const struct address_space_operations z3fold_aops
;
278 static int z3fold_register_migration(struct z3fold_pool
*pool
)
280 pool
->inode
= alloc_anon_inode(z3fold_mnt
->mnt_sb
);
281 if (IS_ERR(pool
->inode
)) {
286 pool
->inode
->i_mapping
->private_data
= pool
;
287 pool
->inode
->i_mapping
->a_ops
= &z3fold_aops
;
291 static void z3fold_unregister_migration(struct z3fold_pool
*pool
)
297 /* Initializes the z3fold header of a newly allocated z3fold page */
298 static struct z3fold_header
*init_z3fold_page(struct page
*page
,
299 struct z3fold_pool
*pool
, gfp_t gfp
)
301 struct z3fold_header
*zhdr
= page_address(page
);
302 struct z3fold_buddy_slots
*slots
= alloc_slots(pool
, gfp
);
307 INIT_LIST_HEAD(&page
->lru
);
308 clear_bit(PAGE_HEADLESS
, &page
->private);
309 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
310 clear_bit(NEEDS_COMPACTING
, &page
->private);
311 clear_bit(PAGE_STALE
, &page
->private);
312 clear_bit(PAGE_CLAIMED
, &page
->private);
314 spin_lock_init(&zhdr
->page_lock
);
315 kref_init(&zhdr
->refcount
);
316 zhdr
->first_chunks
= 0;
317 zhdr
->middle_chunks
= 0;
318 zhdr
->last_chunks
= 0;
320 zhdr
->start_middle
= 0;
323 INIT_LIST_HEAD(&zhdr
->buddy
);
324 INIT_WORK(&zhdr
->work
, compact_page_work
);
328 /* Resets the struct page fields and frees the page */
329 static void free_z3fold_page(struct page
*page
, bool headless
)
333 __ClearPageMovable(page
);
336 ClearPagePrivate(page
);
340 /* Lock a z3fold page */
341 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
343 spin_lock(&zhdr
->page_lock
);
346 /* Try to lock a z3fold page */
347 static inline int z3fold_page_trylock(struct z3fold_header
*zhdr
)
349 return spin_trylock(&zhdr
->page_lock
);
352 /* Unlock a z3fold page */
353 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
355 spin_unlock(&zhdr
->page_lock
);
358 /* Helper function to build the index */
359 static inline int __idx(struct z3fold_header
*zhdr
, enum buddy bud
)
361 return (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
365 * Encodes the handle of a particular buddy within a z3fold page
366 * Pool lock should be held as this function accesses first_num
368 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
370 struct z3fold_buddy_slots
*slots
;
371 unsigned long h
= (unsigned long)zhdr
;
375 * For a headless page, its handle is its pointer with the extra
376 * PAGE_HEADLESS bit set
379 return h
| (1 << PAGE_HEADLESS
);
381 /* otherwise, return pointer to encoded handle */
382 idx
= __idx(zhdr
, bud
);
385 h
|= (zhdr
->last_chunks
<< BUDDY_SHIFT
);
388 slots
->slot
[idx
] = h
;
389 return (unsigned long)&slots
->slot
[idx
];
392 /* Returns the z3fold page where a given handle is stored */
393 static inline struct z3fold_header
*handle_to_z3fold_header(unsigned long h
)
395 unsigned long addr
= h
;
397 if (!(addr
& (1 << PAGE_HEADLESS
)))
398 addr
= *(unsigned long *)h
;
400 return (struct z3fold_header
*)(addr
& PAGE_MASK
);
403 /* only for LAST bud, returns zero otherwise */
404 static unsigned short handle_to_chunks(unsigned long handle
)
406 unsigned long addr
= *(unsigned long *)handle
;
408 return (addr
& ~PAGE_MASK
) >> BUDDY_SHIFT
;
412 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
413 * but that doesn't matter. because the masking will result in the
414 * correct buddy number.
416 static enum buddy
handle_to_buddy(unsigned long handle
)
418 struct z3fold_header
*zhdr
;
421 WARN_ON(handle
& (1 << PAGE_HEADLESS
));
422 addr
= *(unsigned long *)handle
;
423 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
424 return (addr
- zhdr
->first_num
) & BUDDY_MASK
;
427 static inline struct z3fold_pool
*zhdr_to_pool(struct z3fold_header
*zhdr
)
429 return slots_to_pool(zhdr
->slots
);
432 static void __release_z3fold_page(struct z3fold_header
*zhdr
, bool locked
)
434 struct page
*page
= virt_to_page(zhdr
);
435 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
437 WARN_ON(!list_empty(&zhdr
->buddy
));
438 set_bit(PAGE_STALE
, &page
->private);
439 clear_bit(NEEDS_COMPACTING
, &page
->private);
440 spin_lock(&pool
->lock
);
441 if (!list_empty(&page
->lru
))
442 list_del_init(&page
->lru
);
443 spin_unlock(&pool
->lock
);
445 z3fold_page_unlock(zhdr
);
446 spin_lock(&pool
->stale_lock
);
447 list_add(&zhdr
->buddy
, &pool
->stale
);
448 queue_work(pool
->release_wq
, &pool
->work
);
449 spin_unlock(&pool
->stale_lock
);
452 static void __attribute__((__unused__
))
453 release_z3fold_page(struct kref
*ref
)
455 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
457 __release_z3fold_page(zhdr
, false);
460 static void release_z3fold_page_locked(struct kref
*ref
)
462 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
464 WARN_ON(z3fold_page_trylock(zhdr
));
465 __release_z3fold_page(zhdr
, true);
468 static void release_z3fold_page_locked_list(struct kref
*ref
)
470 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
472 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
473 spin_lock(&pool
->lock
);
474 list_del_init(&zhdr
->buddy
);
475 spin_unlock(&pool
->lock
);
477 WARN_ON(z3fold_page_trylock(zhdr
));
478 __release_z3fold_page(zhdr
, true);
481 static void free_pages_work(struct work_struct
*w
)
483 struct z3fold_pool
*pool
= container_of(w
, struct z3fold_pool
, work
);
485 spin_lock(&pool
->stale_lock
);
486 while (!list_empty(&pool
->stale
)) {
487 struct z3fold_header
*zhdr
= list_first_entry(&pool
->stale
,
488 struct z3fold_header
, buddy
);
489 struct page
*page
= virt_to_page(zhdr
);
491 list_del(&zhdr
->buddy
);
492 if (WARN_ON(!test_bit(PAGE_STALE
, &page
->private)))
494 spin_unlock(&pool
->stale_lock
);
495 cancel_work_sync(&zhdr
->work
);
496 free_z3fold_page(page
, false);
498 spin_lock(&pool
->stale_lock
);
500 spin_unlock(&pool
->stale_lock
);
504 * Returns the number of free chunks in a z3fold page.
505 * NB: can't be used with HEADLESS pages.
507 static int num_free_chunks(struct z3fold_header
*zhdr
)
511 * If there is a middle object, pick up the bigger free space
512 * either before or after it. Otherwise just subtract the number
513 * of chunks occupied by the first and the last objects.
515 if (zhdr
->middle_chunks
!= 0) {
516 int nfree_before
= zhdr
->first_chunks
?
517 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
518 int nfree_after
= zhdr
->last_chunks
?
520 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
521 nfree
= max(nfree_before
, nfree_after
);
523 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
527 /* Add to the appropriate unbuddied list */
528 static inline void add_to_unbuddied(struct z3fold_pool
*pool
,
529 struct z3fold_header
*zhdr
)
531 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
532 zhdr
->middle_chunks
== 0) {
533 struct list_head
*unbuddied
= get_cpu_ptr(pool
->unbuddied
);
535 int freechunks
= num_free_chunks(zhdr
);
536 spin_lock(&pool
->lock
);
537 list_add(&zhdr
->buddy
, &unbuddied
[freechunks
]);
538 spin_unlock(&pool
->lock
);
539 zhdr
->cpu
= smp_processor_id();
540 put_cpu_ptr(pool
->unbuddied
);
544 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
545 unsigned short dst_chunk
)
548 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
549 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
550 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
553 #define BIG_CHUNK_GAP 3
554 /* Has to be called with lock held */
555 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
557 struct page
*page
= virt_to_page(zhdr
);
559 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
560 return 0; /* can't move middle chunk, it's used */
562 if (unlikely(PageIsolated(page
)))
565 if (zhdr
->middle_chunks
== 0)
566 return 0; /* nothing to compact */
568 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
569 /* move to the beginning */
570 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
571 zhdr
->first_chunks
= zhdr
->middle_chunks
;
572 zhdr
->middle_chunks
= 0;
573 zhdr
->start_middle
= 0;
579 * moving data is expensive, so let's only do that if
580 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
582 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
583 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
585 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
586 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
588 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
589 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
590 + zhdr
->middle_chunks
) >=
592 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
594 mchunk_memmove(zhdr
, new_start
);
595 zhdr
->start_middle
= new_start
;
602 static void do_compact_page(struct z3fold_header
*zhdr
, bool locked
)
604 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
607 page
= virt_to_page(zhdr
);
609 WARN_ON(z3fold_page_trylock(zhdr
));
611 z3fold_page_lock(zhdr
);
612 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING
, &page
->private))) {
613 z3fold_page_unlock(zhdr
);
616 spin_lock(&pool
->lock
);
617 list_del_init(&zhdr
->buddy
);
618 spin_unlock(&pool
->lock
);
620 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
621 atomic64_dec(&pool
->pages_nr
);
625 if (unlikely(PageIsolated(page
) ||
626 test_bit(PAGE_STALE
, &page
->private))) {
627 z3fold_page_unlock(zhdr
);
631 z3fold_compact_page(zhdr
);
632 add_to_unbuddied(pool
, zhdr
);
633 z3fold_page_unlock(zhdr
);
636 static void compact_page_work(struct work_struct
*w
)
638 struct z3fold_header
*zhdr
= container_of(w
, struct z3fold_header
,
641 do_compact_page(zhdr
, false);
644 /* returns _locked_ z3fold page header or NULL */
645 static inline struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*pool
,
646 size_t size
, bool can_sleep
)
648 struct z3fold_header
*zhdr
= NULL
;
650 struct list_head
*unbuddied
;
651 int chunks
= size_to_chunks(size
), i
;
654 /* First, try to find an unbuddied z3fold page. */
655 unbuddied
= get_cpu_ptr(pool
->unbuddied
);
656 for_each_unbuddied_list(i
, chunks
) {
657 struct list_head
*l
= &unbuddied
[i
];
659 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
660 struct z3fold_header
, buddy
);
665 /* Re-check under lock. */
666 spin_lock(&pool
->lock
);
668 if (unlikely(zhdr
!= list_first_entry(READ_ONCE(l
),
669 struct z3fold_header
, buddy
)) ||
670 !z3fold_page_trylock(zhdr
)) {
671 spin_unlock(&pool
->lock
);
673 put_cpu_ptr(pool
->unbuddied
);
678 list_del_init(&zhdr
->buddy
);
680 spin_unlock(&pool
->lock
);
682 page
= virt_to_page(zhdr
);
683 if (test_bit(NEEDS_COMPACTING
, &page
->private)) {
684 z3fold_page_unlock(zhdr
);
686 put_cpu_ptr(pool
->unbuddied
);
693 * this page could not be removed from its unbuddied
694 * list while pool lock was held, and then we've taken
695 * page lock so kref_put could not be called before
696 * we got here, so it's safe to just call kref_get()
698 kref_get(&zhdr
->refcount
);
701 put_cpu_ptr(pool
->unbuddied
);
706 /* look for _exact_ match on other cpus' lists */
707 for_each_online_cpu(cpu
) {
710 unbuddied
= per_cpu_ptr(pool
->unbuddied
, cpu
);
711 spin_lock(&pool
->lock
);
712 l
= &unbuddied
[chunks
];
714 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
715 struct z3fold_header
, buddy
);
717 if (!zhdr
|| !z3fold_page_trylock(zhdr
)) {
718 spin_unlock(&pool
->lock
);
722 list_del_init(&zhdr
->buddy
);
724 spin_unlock(&pool
->lock
);
726 page
= virt_to_page(zhdr
);
727 if (test_bit(NEEDS_COMPACTING
, &page
->private)) {
728 z3fold_page_unlock(zhdr
);
734 kref_get(&zhdr
->refcount
);
747 * z3fold_create_pool() - create a new z3fold pool
749 * @gfp: gfp flags when allocating the z3fold pool structure
750 * @ops: user-defined operations for the z3fold pool
752 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
755 static struct z3fold_pool
*z3fold_create_pool(const char *name
, gfp_t gfp
,
756 const struct z3fold_ops
*ops
)
758 struct z3fold_pool
*pool
= NULL
;
761 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
764 pool
->c_handle
= kmem_cache_create("z3fold_handle",
765 sizeof(struct z3fold_buddy_slots
),
766 SLOTS_ALIGN
, 0, NULL
);
769 spin_lock_init(&pool
->lock
);
770 spin_lock_init(&pool
->stale_lock
);
771 pool
->unbuddied
= __alloc_percpu(sizeof(struct list_head
)*NCHUNKS
, 2);
772 if (!pool
->unbuddied
)
774 for_each_possible_cpu(cpu
) {
775 struct list_head
*unbuddied
=
776 per_cpu_ptr(pool
->unbuddied
, cpu
);
777 for_each_unbuddied_list(i
, 0)
778 INIT_LIST_HEAD(&unbuddied
[i
]);
780 INIT_LIST_HEAD(&pool
->lru
);
781 INIT_LIST_HEAD(&pool
->stale
);
782 atomic64_set(&pool
->pages_nr
, 0);
784 pool
->compact_wq
= create_singlethread_workqueue(pool
->name
);
785 if (!pool
->compact_wq
)
787 pool
->release_wq
= create_singlethread_workqueue(pool
->name
);
788 if (!pool
->release_wq
)
790 if (z3fold_register_migration(pool
))
792 INIT_WORK(&pool
->work
, free_pages_work
);
797 destroy_workqueue(pool
->release_wq
);
799 destroy_workqueue(pool
->compact_wq
);
801 free_percpu(pool
->unbuddied
);
803 kmem_cache_destroy(pool
->c_handle
);
811 * z3fold_destroy_pool() - destroys an existing z3fold pool
812 * @pool: the z3fold pool to be destroyed
814 * The pool should be emptied before this function is called.
816 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
818 kmem_cache_destroy(pool
->c_handle
);
819 z3fold_unregister_migration(pool
);
820 destroy_workqueue(pool
->release_wq
);
821 destroy_workqueue(pool
->compact_wq
);
826 * z3fold_alloc() - allocates a region of a given size
827 * @pool: z3fold pool from which to allocate
828 * @size: size in bytes of the desired allocation
829 * @gfp: gfp flags used if the pool needs to grow
830 * @handle: handle of the new allocation
832 * This function will attempt to find a free region in the pool large enough to
833 * satisfy the allocation request. A search of the unbuddied lists is
834 * performed first. If no suitable free region is found, then a new page is
835 * allocated and added to the pool to satisfy the request.
837 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
838 * as z3fold pool pages.
840 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
841 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
844 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
845 unsigned long *handle
)
847 int chunks
= size_to_chunks(size
);
848 struct z3fold_header
*zhdr
= NULL
;
849 struct page
*page
= NULL
;
851 bool can_sleep
= gfpflags_allow_blocking(gfp
);
853 if (!size
|| (gfp
& __GFP_HIGHMEM
))
856 if (size
> PAGE_SIZE
)
859 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
863 zhdr
= __z3fold_alloc(pool
, size
, can_sleep
);
865 if (zhdr
->first_chunks
== 0) {
866 if (zhdr
->middle_chunks
!= 0 &&
867 chunks
>= zhdr
->start_middle
)
871 } else if (zhdr
->last_chunks
== 0)
873 else if (zhdr
->middle_chunks
== 0)
876 if (kref_put(&zhdr
->refcount
,
877 release_z3fold_page_locked
))
878 atomic64_dec(&pool
->pages_nr
);
880 z3fold_page_unlock(zhdr
);
881 pr_err("No free chunks in unbuddied\n");
885 page
= virt_to_page(zhdr
);
893 spin_lock(&pool
->stale_lock
);
894 zhdr
= list_first_entry_or_null(&pool
->stale
,
895 struct z3fold_header
, buddy
);
897 * Before allocating a page, let's see if we can take one from
898 * the stale pages list. cancel_work_sync() can sleep so we
899 * limit this case to the contexts where we can sleep
902 list_del(&zhdr
->buddy
);
903 spin_unlock(&pool
->stale_lock
);
904 cancel_work_sync(&zhdr
->work
);
905 page
= virt_to_page(zhdr
);
907 spin_unlock(&pool
->stale_lock
);
911 page
= alloc_page(gfp
);
916 zhdr
= init_z3fold_page(page
, pool
, gfp
);
921 atomic64_inc(&pool
->pages_nr
);
923 if (bud
== HEADLESS
) {
924 set_bit(PAGE_HEADLESS
, &page
->private);
929 __SetPageMovable(page
, pool
->inode
->i_mapping
);
932 if (trylock_page(page
)) {
933 __SetPageMovable(page
, pool
->inode
->i_mapping
);
937 z3fold_page_lock(zhdr
);
941 zhdr
->first_chunks
= chunks
;
942 else if (bud
== LAST
)
943 zhdr
->last_chunks
= chunks
;
945 zhdr
->middle_chunks
= chunks
;
946 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
948 add_to_unbuddied(pool
, zhdr
);
951 spin_lock(&pool
->lock
);
952 /* Add/move z3fold page to beginning of LRU */
953 if (!list_empty(&page
->lru
))
954 list_del(&page
->lru
);
956 list_add(&page
->lru
, &pool
->lru
);
958 *handle
= encode_handle(zhdr
, bud
);
959 spin_unlock(&pool
->lock
);
961 z3fold_page_unlock(zhdr
);
967 * z3fold_free() - frees the allocation associated with the given handle
968 * @pool: pool in which the allocation resided
969 * @handle: handle associated with the allocation returned by z3fold_alloc()
971 * In the case that the z3fold page in which the allocation resides is under
972 * reclaim, as indicated by the PG_reclaim flag being set, this function
973 * only sets the first|last_chunks to 0. The page is actually freed
974 * once both buddies are evicted (see z3fold_reclaim_page() below).
976 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
978 struct z3fold_header
*zhdr
;
982 zhdr
= handle_to_z3fold_header(handle
);
983 page
= virt_to_page(zhdr
);
985 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
986 /* if a headless page is under reclaim, just leave.
987 * NB: we use test_and_set_bit for a reason: if the bit
988 * has not been set before, we release this page
989 * immediately so we don't care about its value any more.
991 if (!test_and_set_bit(PAGE_CLAIMED
, &page
->private)) {
992 spin_lock(&pool
->lock
);
993 list_del(&page
->lru
);
994 spin_unlock(&pool
->lock
);
995 free_z3fold_page(page
, true);
996 atomic64_dec(&pool
->pages_nr
);
1001 /* Non-headless case */
1002 z3fold_page_lock(zhdr
);
1003 bud
= handle_to_buddy(handle
);
1007 zhdr
->first_chunks
= 0;
1010 zhdr
->middle_chunks
= 0;
1013 zhdr
->last_chunks
= 0;
1016 pr_err("%s: unknown bud %d\n", __func__
, bud
);
1018 z3fold_page_unlock(zhdr
);
1022 free_handle(handle
);
1023 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked_list
)) {
1024 atomic64_dec(&pool
->pages_nr
);
1027 if (test_bit(PAGE_CLAIMED
, &page
->private)) {
1028 z3fold_page_unlock(zhdr
);
1031 if (unlikely(PageIsolated(page
)) ||
1032 test_and_set_bit(NEEDS_COMPACTING
, &page
->private)) {
1033 z3fold_page_unlock(zhdr
);
1036 if (zhdr
->cpu
< 0 || !cpu_online(zhdr
->cpu
)) {
1037 spin_lock(&pool
->lock
);
1038 list_del_init(&zhdr
->buddy
);
1039 spin_unlock(&pool
->lock
);
1041 kref_get(&zhdr
->refcount
);
1042 do_compact_page(zhdr
, true);
1045 kref_get(&zhdr
->refcount
);
1046 queue_work_on(zhdr
->cpu
, pool
->compact_wq
, &zhdr
->work
);
1047 z3fold_page_unlock(zhdr
);
1051 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1052 * @pool: pool from which a page will attempt to be evicted
1053 * @retries: number of pages on the LRU list for which eviction will
1054 * be attempted before failing
1056 * z3fold reclaim is different from normal system reclaim in that it is done
1057 * from the bottom, up. This is because only the bottom layer, z3fold, has
1058 * information on how the allocations are organized within each z3fold page.
1059 * This has the potential to create interesting locking situations between
1060 * z3fold and the user, however.
1062 * To avoid these, this is how z3fold_reclaim_page() should be called:
1064 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1065 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1066 * call the user-defined eviction handler with the pool and handle as
1069 * If the handle can not be evicted, the eviction handler should return
1070 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1071 * appropriate list and try the next z3fold page on the LRU up to
1072 * a user defined number of retries.
1074 * If the handle is successfully evicted, the eviction handler should
1075 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1076 * contains logic to delay freeing the page if the page is under reclaim,
1077 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1079 * If all buddies in the z3fold page are successfully evicted, then the
1080 * z3fold page can be freed.
1082 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1083 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1084 * the retry limit was hit.
1086 static int z3fold_reclaim_page(struct z3fold_pool
*pool
, unsigned int retries
)
1089 struct z3fold_header
*zhdr
= NULL
;
1090 struct page
*page
= NULL
;
1091 struct list_head
*pos
;
1092 unsigned long first_handle
= 0, middle_handle
= 0, last_handle
= 0;
1094 spin_lock(&pool
->lock
);
1095 if (!pool
->ops
|| !pool
->ops
->evict
|| retries
== 0) {
1096 spin_unlock(&pool
->lock
);
1099 for (i
= 0; i
< retries
; i
++) {
1100 if (list_empty(&pool
->lru
)) {
1101 spin_unlock(&pool
->lock
);
1104 list_for_each_prev(pos
, &pool
->lru
) {
1105 page
= list_entry(pos
, struct page
, lru
);
1107 /* this bit could have been set by free, in which case
1108 * we pass over to the next page in the pool.
1110 if (test_and_set_bit(PAGE_CLAIMED
, &page
->private))
1113 if (unlikely(PageIsolated(page
)))
1115 if (test_bit(PAGE_HEADLESS
, &page
->private))
1118 zhdr
= page_address(page
);
1119 if (!z3fold_page_trylock(zhdr
)) {
1121 continue; /* can't evict at this point */
1123 kref_get(&zhdr
->refcount
);
1124 list_del_init(&zhdr
->buddy
);
1132 list_del_init(&page
->lru
);
1133 spin_unlock(&pool
->lock
);
1135 if (!test_bit(PAGE_HEADLESS
, &page
->private)) {
1137 * We need encode the handles before unlocking, since
1138 * we can race with free that will set
1139 * (first|last)_chunks to 0
1144 if (zhdr
->first_chunks
)
1145 first_handle
= encode_handle(zhdr
, FIRST
);
1146 if (zhdr
->middle_chunks
)
1147 middle_handle
= encode_handle(zhdr
, MIDDLE
);
1148 if (zhdr
->last_chunks
)
1149 last_handle
= encode_handle(zhdr
, LAST
);
1151 * it's safe to unlock here because we hold a
1152 * reference to this page
1154 z3fold_page_unlock(zhdr
);
1156 first_handle
= encode_handle(zhdr
, HEADLESS
);
1157 last_handle
= middle_handle
= 0;
1160 /* Issue the eviction callback(s) */
1161 if (middle_handle
) {
1162 ret
= pool
->ops
->evict(pool
, middle_handle
);
1167 ret
= pool
->ops
->evict(pool
, first_handle
);
1172 ret
= pool
->ops
->evict(pool
, last_handle
);
1177 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1179 free_z3fold_page(page
, true);
1180 atomic64_dec(&pool
->pages_nr
);
1183 spin_lock(&pool
->lock
);
1184 list_add(&page
->lru
, &pool
->lru
);
1185 spin_unlock(&pool
->lock
);
1187 z3fold_page_lock(zhdr
);
1188 clear_bit(PAGE_CLAIMED
, &page
->private);
1189 if (kref_put(&zhdr
->refcount
,
1190 release_z3fold_page_locked
)) {
1191 atomic64_dec(&pool
->pages_nr
);
1195 * if we are here, the page is still not completely
1196 * free. Take the global pool lock then to be able
1197 * to add it back to the lru list
1199 spin_lock(&pool
->lock
);
1200 list_add(&page
->lru
, &pool
->lru
);
1201 spin_unlock(&pool
->lock
);
1202 z3fold_page_unlock(zhdr
);
1205 /* We started off locked to we need to lock the pool back */
1206 spin_lock(&pool
->lock
);
1208 spin_unlock(&pool
->lock
);
1213 * z3fold_map() - maps the allocation associated with the given handle
1214 * @pool: pool in which the allocation resides
1215 * @handle: handle associated with the allocation to be mapped
1217 * Extracts the buddy number from handle and constructs the pointer to the
1218 * correct starting chunk within the page.
1220 * Returns: a pointer to the mapped allocation
1222 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
1224 struct z3fold_header
*zhdr
;
1229 zhdr
= handle_to_z3fold_header(handle
);
1231 page
= virt_to_page(zhdr
);
1233 if (test_bit(PAGE_HEADLESS
, &page
->private))
1236 z3fold_page_lock(zhdr
);
1237 buddy
= handle_to_buddy(handle
);
1240 addr
+= ZHDR_SIZE_ALIGNED
;
1243 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
1244 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1247 addr
+= PAGE_SIZE
- (handle_to_chunks(handle
) << CHUNK_SHIFT
);
1250 pr_err("unknown buddy id %d\n", buddy
);
1257 zhdr
->mapped_count
++;
1258 z3fold_page_unlock(zhdr
);
1264 * z3fold_unmap() - unmaps the allocation associated with the given handle
1265 * @pool: pool in which the allocation resides
1266 * @handle: handle associated with the allocation to be unmapped
1268 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
1270 struct z3fold_header
*zhdr
;
1274 zhdr
= handle_to_z3fold_header(handle
);
1275 page
= virt_to_page(zhdr
);
1277 if (test_bit(PAGE_HEADLESS
, &page
->private))
1280 z3fold_page_lock(zhdr
);
1281 buddy
= handle_to_buddy(handle
);
1282 if (buddy
== MIDDLE
)
1283 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1284 zhdr
->mapped_count
--;
1285 z3fold_page_unlock(zhdr
);
1289 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1290 * @pool: pool whose size is being queried
1292 * Returns: size in pages of the given pool.
1294 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
1296 return atomic64_read(&pool
->pages_nr
);
1299 static bool z3fold_page_isolate(struct page
*page
, isolate_mode_t mode
)
1301 struct z3fold_header
*zhdr
;
1302 struct z3fold_pool
*pool
;
1304 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1305 VM_BUG_ON_PAGE(PageIsolated(page
), page
);
1307 if (test_bit(PAGE_HEADLESS
, &page
->private))
1310 zhdr
= page_address(page
);
1311 z3fold_page_lock(zhdr
);
1312 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
1313 test_bit(PAGE_STALE
, &page
->private))
1316 pool
= zhdr_to_pool(zhdr
);
1318 if (zhdr
->mapped_count
== 0) {
1319 kref_get(&zhdr
->refcount
);
1320 if (!list_empty(&zhdr
->buddy
))
1321 list_del_init(&zhdr
->buddy
);
1322 spin_lock(&pool
->lock
);
1323 if (!list_empty(&page
->lru
))
1324 list_del(&page
->lru
);
1325 spin_unlock(&pool
->lock
);
1326 z3fold_page_unlock(zhdr
);
1330 z3fold_page_unlock(zhdr
);
1334 static int z3fold_page_migrate(struct address_space
*mapping
, struct page
*newpage
,
1335 struct page
*page
, enum migrate_mode mode
)
1337 struct z3fold_header
*zhdr
, *new_zhdr
;
1338 struct z3fold_pool
*pool
;
1339 struct address_space
*new_mapping
;
1341 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1342 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1343 VM_BUG_ON_PAGE(!PageLocked(newpage
), newpage
);
1345 zhdr
= page_address(page
);
1346 pool
= zhdr_to_pool(zhdr
);
1348 if (!trylock_page(page
))
1351 if (!z3fold_page_trylock(zhdr
)) {
1355 if (zhdr
->mapped_count
!= 0) {
1356 z3fold_page_unlock(zhdr
);
1360 new_zhdr
= page_address(newpage
);
1361 memcpy(new_zhdr
, zhdr
, PAGE_SIZE
);
1362 newpage
->private = page
->private;
1364 z3fold_page_unlock(zhdr
);
1365 spin_lock_init(&new_zhdr
->page_lock
);
1366 new_mapping
= page_mapping(page
);
1367 __ClearPageMovable(page
);
1368 ClearPagePrivate(page
);
1371 z3fold_page_lock(new_zhdr
);
1372 if (new_zhdr
->first_chunks
)
1373 encode_handle(new_zhdr
, FIRST
);
1374 if (new_zhdr
->last_chunks
)
1375 encode_handle(new_zhdr
, LAST
);
1376 if (new_zhdr
->middle_chunks
)
1377 encode_handle(new_zhdr
, MIDDLE
);
1378 set_bit(NEEDS_COMPACTING
, &newpage
->private);
1379 new_zhdr
->cpu
= smp_processor_id();
1380 spin_lock(&pool
->lock
);
1381 list_add(&newpage
->lru
, &pool
->lru
);
1382 spin_unlock(&pool
->lock
);
1383 __SetPageMovable(newpage
, new_mapping
);
1384 z3fold_page_unlock(new_zhdr
);
1386 queue_work_on(new_zhdr
->cpu
, pool
->compact_wq
, &new_zhdr
->work
);
1388 page_mapcount_reset(page
);
1394 static void z3fold_page_putback(struct page
*page
)
1396 struct z3fold_header
*zhdr
;
1397 struct z3fold_pool
*pool
;
1399 zhdr
= page_address(page
);
1400 pool
= zhdr_to_pool(zhdr
);
1402 z3fold_page_lock(zhdr
);
1403 if (!list_empty(&zhdr
->buddy
))
1404 list_del_init(&zhdr
->buddy
);
1405 INIT_LIST_HEAD(&page
->lru
);
1406 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
1407 atomic64_dec(&pool
->pages_nr
);
1410 spin_lock(&pool
->lock
);
1411 list_add(&page
->lru
, &pool
->lru
);
1412 spin_unlock(&pool
->lock
);
1413 z3fold_page_unlock(zhdr
);
1416 static const struct address_space_operations z3fold_aops
= {
1417 .isolate_page
= z3fold_page_isolate
,
1418 .migratepage
= z3fold_page_migrate
,
1419 .putback_page
= z3fold_page_putback
,
1426 static int z3fold_zpool_evict(struct z3fold_pool
*pool
, unsigned long handle
)
1428 if (pool
->zpool
&& pool
->zpool_ops
&& pool
->zpool_ops
->evict
)
1429 return pool
->zpool_ops
->evict(pool
->zpool
, handle
);
1434 static const struct z3fold_ops z3fold_zpool_ops
= {
1435 .evict
= z3fold_zpool_evict
1438 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
,
1439 const struct zpool_ops
*zpool_ops
,
1440 struct zpool
*zpool
)
1442 struct z3fold_pool
*pool
;
1444 pool
= z3fold_create_pool(name
, gfp
,
1445 zpool_ops
? &z3fold_zpool_ops
: NULL
);
1447 pool
->zpool
= zpool
;
1448 pool
->zpool_ops
= zpool_ops
;
1453 static void z3fold_zpool_destroy(void *pool
)
1455 z3fold_destroy_pool(pool
);
1458 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
1459 unsigned long *handle
)
1461 return z3fold_alloc(pool
, size
, gfp
, handle
);
1463 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
1465 z3fold_free(pool
, handle
);
1468 static int z3fold_zpool_shrink(void *pool
, unsigned int pages
,
1469 unsigned int *reclaimed
)
1471 unsigned int total
= 0;
1474 while (total
< pages
) {
1475 ret
= z3fold_reclaim_page(pool
, 8);
1487 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
1488 enum zpool_mapmode mm
)
1490 return z3fold_map(pool
, handle
);
1492 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
1494 z3fold_unmap(pool
, handle
);
1497 static u64
z3fold_zpool_total_size(void *pool
)
1499 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
1502 static struct zpool_driver z3fold_zpool_driver
= {
1504 .owner
= THIS_MODULE
,
1505 .create
= z3fold_zpool_create
,
1506 .destroy
= z3fold_zpool_destroy
,
1507 .malloc
= z3fold_zpool_malloc
,
1508 .free
= z3fold_zpool_free
,
1509 .shrink
= z3fold_zpool_shrink
,
1510 .map
= z3fold_zpool_map
,
1511 .unmap
= z3fold_zpool_unmap
,
1512 .total_size
= z3fold_zpool_total_size
,
1515 MODULE_ALIAS("zpool-z3fold");
1517 static int __init
init_z3fold(void)
1521 /* Make sure the z3fold header is not larger than the page size */
1522 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
);
1523 ret
= z3fold_mount();
1527 zpool_register_driver(&z3fold_zpool_driver
);
1532 static void __exit
exit_z3fold(void)
1535 zpool_unregister_driver(&z3fold_zpool_driver
);
1538 module_init(init_z3fold
);
1539 module_exit(exit_z3fold
);
1541 MODULE_LICENSE("GPL");
1542 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1543 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");