1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
8 * This implementation is based on zbud written by Seth Jennings.
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
21 * z3fold doesn't export any API and is meant to be used via zpool API.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 #include <linux/kmemleak.h>
49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
50 * adjusting internal fragmentation. It also determines the number of
51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
53 * in the beginning of an allocated page are occupied by z3fold header, so
54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
55 * which shows the max number of free chunks in z3fold page, also there will
56 * be 63, or 62, respectively, freelists per pool.
58 #define NCHUNKS_ORDER 6
60 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
61 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
63 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
64 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
65 #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS)
67 #define BUDDY_MASK (0x3)
69 #define SLOTS_ALIGN (0x40)
76 int (*evict
)(struct z3fold_pool
*pool
, unsigned long handle
);
87 struct z3fold_buddy_slots
{
89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
90 * be enough slots to hold all possible variants
92 unsigned long slot
[BUDDY_MASK
+ 1];
93 unsigned long pool
; /* back link */
96 #define HANDLE_FLAG_MASK (0x03)
99 * struct z3fold_header - z3fold page metadata occupying first chunks of each
100 * z3fold page, except for HEADLESS pages
101 * @buddy: links the z3fold page into the relevant list in the
103 * @page_lock: per-page lock
104 * @refcount: reference count for the z3fold page
105 * @work: work_struct for page layout optimization
106 * @slots: pointer to the structure holding buddy slots
107 * @pool: pointer to the containing pool
108 * @cpu: CPU which this page "belongs" to
109 * @first_chunks: the size of the first buddy in chunks, 0 if free
110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
111 * @last_chunks: the size of the last buddy in chunks, 0 if free
112 * @first_num: the starting number (for the first handle)
113 * @mapped_count: the number of objects currently mapped
115 struct z3fold_header
{
116 struct list_head buddy
;
117 spinlock_t page_lock
;
118 struct kref refcount
;
119 struct work_struct work
;
120 struct z3fold_buddy_slots
*slots
;
121 struct z3fold_pool
*pool
;
123 unsigned short first_chunks
;
124 unsigned short middle_chunks
;
125 unsigned short last_chunks
;
126 unsigned short start_middle
;
127 unsigned short first_num
:2;
128 unsigned short mapped_count
:2;
129 unsigned short foreign_handles
:2;
133 * struct z3fold_pool - stores metadata for each z3fold pool
135 * @lock: protects pool unbuddied/lru lists
136 * @stale_lock: protects pool stale page list
137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
138 * buddies; the list each z3fold page is added to depends on
139 * the size of its free region.
140 * @lru: list tracking the z3fold pages in LRU order by most recently
142 * @stale: list of pages marked for freeing
143 * @pages_nr: number of z3fold pages in the pool.
144 * @c_handle: cache for z3fold_buddy_slots allocation
145 * @ops: pointer to a structure of user defined operations specified at
146 * pool creation time.
147 * @zpool: zpool driver
148 * @zpool_ops: zpool operations structure with an evict callback
149 * @compact_wq: workqueue for page layout background optimization
150 * @release_wq: workqueue for safe page release
151 * @work: work_struct for safe page release
152 * @inode: inode for z3fold pseudo filesystem
154 * This structure is allocated at pool creation time and maintains metadata
155 * pertaining to a particular z3fold pool.
160 spinlock_t stale_lock
;
161 struct list_head
*unbuddied
;
162 struct list_head lru
;
163 struct list_head stale
;
165 struct kmem_cache
*c_handle
;
166 const struct z3fold_ops
*ops
;
168 const struct zpool_ops
*zpool_ops
;
169 struct workqueue_struct
*compact_wq
;
170 struct workqueue_struct
*release_wq
;
171 struct work_struct work
;
176 * Internal z3fold page flags
178 enum z3fold_page_flags
{
183 PAGE_CLAIMED
, /* by either reclaim or free */
187 * handle flags, go under HANDLE_FLAG_MASK
189 enum z3fold_handle_flags
{
194 * Forward declarations
196 static struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*, size_t, bool);
197 static void compact_page_work(struct work_struct
*w
);
203 /* Converts an allocation size in bytes to size in z3fold chunks */
204 static int size_to_chunks(size_t size
)
206 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
209 #define for_each_unbuddied_list(_iter, _begin) \
210 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
212 static inline struct z3fold_buddy_slots
*alloc_slots(struct z3fold_pool
*pool
,
215 struct z3fold_buddy_slots
*slots
;
217 slots
= kmem_cache_zalloc(pool
->c_handle
,
218 (gfp
& ~(__GFP_HIGHMEM
| __GFP_MOVABLE
)));
221 /* It will be freed separately in free_handle(). */
222 kmemleak_not_leak(slots
);
223 slots
->pool
= (unsigned long)pool
;
224 rwlock_init(&slots
->lock
);
230 static inline struct z3fold_pool
*slots_to_pool(struct z3fold_buddy_slots
*s
)
232 return (struct z3fold_pool
*)(s
->pool
& ~HANDLE_FLAG_MASK
);
235 static inline struct z3fold_buddy_slots
*handle_to_slots(unsigned long handle
)
237 return (struct z3fold_buddy_slots
*)(handle
& ~(SLOTS_ALIGN
- 1));
240 /* Lock a z3fold page */
241 static inline void z3fold_page_lock(struct z3fold_header
*zhdr
)
243 spin_lock(&zhdr
->page_lock
);
246 /* Try to lock a z3fold page */
247 static inline int z3fold_page_trylock(struct z3fold_header
*zhdr
)
249 return spin_trylock(&zhdr
->page_lock
);
252 /* Unlock a z3fold page */
253 static inline void z3fold_page_unlock(struct z3fold_header
*zhdr
)
255 spin_unlock(&zhdr
->page_lock
);
258 /* return locked z3fold page if it's not headless */
259 static inline struct z3fold_header
*get_z3fold_header(unsigned long handle
)
261 struct z3fold_buddy_slots
*slots
;
262 struct z3fold_header
*zhdr
;
265 if (!(handle
& (1 << PAGE_HEADLESS
))) {
266 slots
= handle_to_slots(handle
);
270 read_lock(&slots
->lock
);
271 addr
= *(unsigned long *)handle
;
272 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
273 locked
= z3fold_page_trylock(zhdr
);
274 read_unlock(&slots
->lock
);
280 zhdr
= (struct z3fold_header
*)(handle
& PAGE_MASK
);
286 static inline void put_z3fold_header(struct z3fold_header
*zhdr
)
288 struct page
*page
= virt_to_page(zhdr
);
290 if (!test_bit(PAGE_HEADLESS
, &page
->private))
291 z3fold_page_unlock(zhdr
);
294 static inline void free_handle(unsigned long handle
, struct z3fold_header
*zhdr
)
296 struct z3fold_buddy_slots
*slots
;
300 if (handle
& (1 << PAGE_HEADLESS
))
303 if (WARN_ON(*(unsigned long *)handle
== 0))
306 slots
= handle_to_slots(handle
);
307 write_lock(&slots
->lock
);
308 *(unsigned long *)handle
= 0;
310 if (test_bit(HANDLES_NOFREE
, &slots
->pool
)) {
311 write_unlock(&slots
->lock
);
312 return; /* simple case, nothing else to do */
315 if (zhdr
->slots
!= slots
)
316 zhdr
->foreign_handles
--;
319 for (i
= 0; i
<= BUDDY_MASK
; i
++) {
320 if (slots
->slot
[i
]) {
325 write_unlock(&slots
->lock
);
328 struct z3fold_pool
*pool
= slots_to_pool(slots
);
330 if (zhdr
->slots
== slots
)
332 kmem_cache_free(pool
->c_handle
, slots
);
336 static int z3fold_init_fs_context(struct fs_context
*fc
)
338 return init_pseudo(fc
, Z3FOLD_MAGIC
) ? 0 : -ENOMEM
;
341 static struct file_system_type z3fold_fs
= {
343 .init_fs_context
= z3fold_init_fs_context
,
344 .kill_sb
= kill_anon_super
,
347 static struct vfsmount
*z3fold_mnt
;
348 static int z3fold_mount(void)
352 z3fold_mnt
= kern_mount(&z3fold_fs
);
353 if (IS_ERR(z3fold_mnt
))
354 ret
= PTR_ERR(z3fold_mnt
);
359 static void z3fold_unmount(void)
361 kern_unmount(z3fold_mnt
);
364 static const struct address_space_operations z3fold_aops
;
365 static int z3fold_register_migration(struct z3fold_pool
*pool
)
367 pool
->inode
= alloc_anon_inode(z3fold_mnt
->mnt_sb
);
368 if (IS_ERR(pool
->inode
)) {
373 pool
->inode
->i_mapping
->private_data
= pool
;
374 pool
->inode
->i_mapping
->a_ops
= &z3fold_aops
;
378 static void z3fold_unregister_migration(struct z3fold_pool
*pool
)
384 /* Initializes the z3fold header of a newly allocated z3fold page */
385 static struct z3fold_header
*init_z3fold_page(struct page
*page
, bool headless
,
386 struct z3fold_pool
*pool
, gfp_t gfp
)
388 struct z3fold_header
*zhdr
= page_address(page
);
389 struct z3fold_buddy_slots
*slots
;
391 INIT_LIST_HEAD(&page
->lru
);
392 clear_bit(PAGE_HEADLESS
, &page
->private);
393 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
394 clear_bit(NEEDS_COMPACTING
, &page
->private);
395 clear_bit(PAGE_STALE
, &page
->private);
396 clear_bit(PAGE_CLAIMED
, &page
->private);
400 slots
= alloc_slots(pool
, gfp
);
404 memset(zhdr
, 0, sizeof(*zhdr
));
405 spin_lock_init(&zhdr
->page_lock
);
406 kref_init(&zhdr
->refcount
);
410 INIT_LIST_HEAD(&zhdr
->buddy
);
411 INIT_WORK(&zhdr
->work
, compact_page_work
);
415 /* Resets the struct page fields and frees the page */
416 static void free_z3fold_page(struct page
*page
, bool headless
)
420 __ClearPageMovable(page
);
423 ClearPagePrivate(page
);
427 /* Helper function to build the index */
428 static inline int __idx(struct z3fold_header
*zhdr
, enum buddy bud
)
430 return (bud
+ zhdr
->first_num
) & BUDDY_MASK
;
434 * Encodes the handle of a particular buddy within a z3fold page
435 * Pool lock should be held as this function accesses first_num
437 static unsigned long __encode_handle(struct z3fold_header
*zhdr
,
438 struct z3fold_buddy_slots
*slots
,
441 unsigned long h
= (unsigned long)zhdr
;
445 * For a headless page, its handle is its pointer with the extra
446 * PAGE_HEADLESS bit set
449 return h
| (1 << PAGE_HEADLESS
);
451 /* otherwise, return pointer to encoded handle */
452 idx
= __idx(zhdr
, bud
);
455 h
|= (zhdr
->last_chunks
<< BUDDY_SHIFT
);
457 write_lock(&slots
->lock
);
458 slots
->slot
[idx
] = h
;
459 write_unlock(&slots
->lock
);
460 return (unsigned long)&slots
->slot
[idx
];
463 static unsigned long encode_handle(struct z3fold_header
*zhdr
, enum buddy bud
)
465 return __encode_handle(zhdr
, zhdr
->slots
, bud
);
468 /* only for LAST bud, returns zero otherwise */
469 static unsigned short handle_to_chunks(unsigned long handle
)
471 struct z3fold_buddy_slots
*slots
= handle_to_slots(handle
);
474 read_lock(&slots
->lock
);
475 addr
= *(unsigned long *)handle
;
476 read_unlock(&slots
->lock
);
477 return (addr
& ~PAGE_MASK
) >> BUDDY_SHIFT
;
481 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
482 * but that doesn't matter. because the masking will result in the
483 * correct buddy number.
485 static enum buddy
handle_to_buddy(unsigned long handle
)
487 struct z3fold_header
*zhdr
;
488 struct z3fold_buddy_slots
*slots
= handle_to_slots(handle
);
491 read_lock(&slots
->lock
);
492 WARN_ON(handle
& (1 << PAGE_HEADLESS
));
493 addr
= *(unsigned long *)handle
;
494 read_unlock(&slots
->lock
);
495 zhdr
= (struct z3fold_header
*)(addr
& PAGE_MASK
);
496 return (addr
- zhdr
->first_num
) & BUDDY_MASK
;
499 static inline struct z3fold_pool
*zhdr_to_pool(struct z3fold_header
*zhdr
)
504 static void __release_z3fold_page(struct z3fold_header
*zhdr
, bool locked
)
506 struct page
*page
= virt_to_page(zhdr
);
507 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
509 WARN_ON(!list_empty(&zhdr
->buddy
));
510 set_bit(PAGE_STALE
, &page
->private);
511 clear_bit(NEEDS_COMPACTING
, &page
->private);
512 spin_lock(&pool
->lock
);
513 if (!list_empty(&page
->lru
))
514 list_del_init(&page
->lru
);
515 spin_unlock(&pool
->lock
);
518 z3fold_page_unlock(zhdr
);
520 spin_lock(&pool
->stale_lock
);
521 list_add(&zhdr
->buddy
, &pool
->stale
);
522 queue_work(pool
->release_wq
, &pool
->work
);
523 spin_unlock(&pool
->stale_lock
);
526 static void release_z3fold_page(struct kref
*ref
)
528 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
530 __release_z3fold_page(zhdr
, false);
533 static void release_z3fold_page_locked(struct kref
*ref
)
535 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
537 WARN_ON(z3fold_page_trylock(zhdr
));
538 __release_z3fold_page(zhdr
, true);
541 static void release_z3fold_page_locked_list(struct kref
*ref
)
543 struct z3fold_header
*zhdr
= container_of(ref
, struct z3fold_header
,
545 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
547 spin_lock(&pool
->lock
);
548 list_del_init(&zhdr
->buddy
);
549 spin_unlock(&pool
->lock
);
551 WARN_ON(z3fold_page_trylock(zhdr
));
552 __release_z3fold_page(zhdr
, true);
555 static void free_pages_work(struct work_struct
*w
)
557 struct z3fold_pool
*pool
= container_of(w
, struct z3fold_pool
, work
);
559 spin_lock(&pool
->stale_lock
);
560 while (!list_empty(&pool
->stale
)) {
561 struct z3fold_header
*zhdr
= list_first_entry(&pool
->stale
,
562 struct z3fold_header
, buddy
);
563 struct page
*page
= virt_to_page(zhdr
);
565 list_del(&zhdr
->buddy
);
566 if (WARN_ON(!test_bit(PAGE_STALE
, &page
->private)))
568 spin_unlock(&pool
->stale_lock
);
569 cancel_work_sync(&zhdr
->work
);
570 free_z3fold_page(page
, false);
572 spin_lock(&pool
->stale_lock
);
574 spin_unlock(&pool
->stale_lock
);
578 * Returns the number of free chunks in a z3fold page.
579 * NB: can't be used with HEADLESS pages.
581 static int num_free_chunks(struct z3fold_header
*zhdr
)
585 * If there is a middle object, pick up the bigger free space
586 * either before or after it. Otherwise just subtract the number
587 * of chunks occupied by the first and the last objects.
589 if (zhdr
->middle_chunks
!= 0) {
590 int nfree_before
= zhdr
->first_chunks
?
591 0 : zhdr
->start_middle
- ZHDR_CHUNKS
;
592 int nfree_after
= zhdr
->last_chunks
?
594 (zhdr
->start_middle
+ zhdr
->middle_chunks
);
595 nfree
= max(nfree_before
, nfree_after
);
597 nfree
= NCHUNKS
- zhdr
->first_chunks
- zhdr
->last_chunks
;
601 /* Add to the appropriate unbuddied list */
602 static inline void add_to_unbuddied(struct z3fold_pool
*pool
,
603 struct z3fold_header
*zhdr
)
605 if (zhdr
->first_chunks
== 0 || zhdr
->last_chunks
== 0 ||
606 zhdr
->middle_chunks
== 0) {
607 struct list_head
*unbuddied
;
608 int freechunks
= num_free_chunks(zhdr
);
611 unbuddied
= this_cpu_ptr(pool
->unbuddied
);
612 spin_lock(&pool
->lock
);
613 list_add(&zhdr
->buddy
, &unbuddied
[freechunks
]);
614 spin_unlock(&pool
->lock
);
615 zhdr
->cpu
= smp_processor_id();
620 static inline enum buddy
get_free_buddy(struct z3fold_header
*zhdr
, int chunks
)
622 enum buddy bud
= HEADLESS
;
624 if (zhdr
->middle_chunks
) {
625 if (!zhdr
->first_chunks
&&
626 chunks
<= zhdr
->start_middle
- ZHDR_CHUNKS
)
628 else if (!zhdr
->last_chunks
)
631 if (!zhdr
->first_chunks
)
633 else if (!zhdr
->last_chunks
)
642 static inline void *mchunk_memmove(struct z3fold_header
*zhdr
,
643 unsigned short dst_chunk
)
646 return memmove(beg
+ (dst_chunk
<< CHUNK_SHIFT
),
647 beg
+ (zhdr
->start_middle
<< CHUNK_SHIFT
),
648 zhdr
->middle_chunks
<< CHUNK_SHIFT
);
651 static inline bool buddy_single(struct z3fold_header
*zhdr
)
653 return !((zhdr
->first_chunks
&& zhdr
->middle_chunks
) ||
654 (zhdr
->first_chunks
&& zhdr
->last_chunks
) ||
655 (zhdr
->middle_chunks
&& zhdr
->last_chunks
));
658 static struct z3fold_header
*compact_single_buddy(struct z3fold_header
*zhdr
)
660 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
662 unsigned long old_handle
= 0;
664 struct z3fold_header
*new_zhdr
= NULL
;
665 int first_idx
= __idx(zhdr
, FIRST
);
666 int middle_idx
= __idx(zhdr
, MIDDLE
);
667 int last_idx
= __idx(zhdr
, LAST
);
668 unsigned short *moved_chunks
= NULL
;
671 * No need to protect slots here -- all the slots are "local" and
672 * the page lock is already taken
674 if (zhdr
->first_chunks
&& zhdr
->slots
->slot
[first_idx
]) {
675 p
+= ZHDR_SIZE_ALIGNED
;
676 sz
= zhdr
->first_chunks
<< CHUNK_SHIFT
;
677 old_handle
= (unsigned long)&zhdr
->slots
->slot
[first_idx
];
678 moved_chunks
= &zhdr
->first_chunks
;
679 } else if (zhdr
->middle_chunks
&& zhdr
->slots
->slot
[middle_idx
]) {
680 p
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
681 sz
= zhdr
->middle_chunks
<< CHUNK_SHIFT
;
682 old_handle
= (unsigned long)&zhdr
->slots
->slot
[middle_idx
];
683 moved_chunks
= &zhdr
->middle_chunks
;
684 } else if (zhdr
->last_chunks
&& zhdr
->slots
->slot
[last_idx
]) {
685 p
+= PAGE_SIZE
- (zhdr
->last_chunks
<< CHUNK_SHIFT
);
686 sz
= zhdr
->last_chunks
<< CHUNK_SHIFT
;
687 old_handle
= (unsigned long)&zhdr
->slots
->slot
[last_idx
];
688 moved_chunks
= &zhdr
->last_chunks
;
692 enum buddy new_bud
= HEADLESS
;
693 short chunks
= size_to_chunks(sz
);
696 new_zhdr
= __z3fold_alloc(pool
, sz
, false);
700 if (WARN_ON(new_zhdr
== zhdr
))
703 new_bud
= get_free_buddy(new_zhdr
, chunks
);
707 new_zhdr
->first_chunks
= chunks
;
708 q
+= ZHDR_SIZE_ALIGNED
;
711 new_zhdr
->middle_chunks
= chunks
;
712 new_zhdr
->start_middle
=
713 new_zhdr
->first_chunks
+ ZHDR_CHUNKS
;
714 q
+= new_zhdr
->start_middle
<< CHUNK_SHIFT
;
717 new_zhdr
->last_chunks
= chunks
;
718 q
+= PAGE_SIZE
- (new_zhdr
->last_chunks
<< CHUNK_SHIFT
);
723 new_zhdr
->foreign_handles
++;
725 write_lock(&zhdr
->slots
->lock
);
726 *(unsigned long *)old_handle
= (unsigned long)new_zhdr
+
727 __idx(new_zhdr
, new_bud
);
729 *(unsigned long *)old_handle
|=
730 (new_zhdr
->last_chunks
<< BUDDY_SHIFT
);
731 write_unlock(&zhdr
->slots
->lock
);
732 add_to_unbuddied(pool
, new_zhdr
);
733 z3fold_page_unlock(new_zhdr
);
742 if (kref_put(&new_zhdr
->refcount
, release_z3fold_page_locked
))
743 atomic64_dec(&pool
->pages_nr
);
745 add_to_unbuddied(pool
, new_zhdr
);
746 z3fold_page_unlock(new_zhdr
);
753 #define BIG_CHUNK_GAP 3
754 /* Has to be called with lock held */
755 static int z3fold_compact_page(struct z3fold_header
*zhdr
)
757 struct page
*page
= virt_to_page(zhdr
);
759 if (test_bit(MIDDLE_CHUNK_MAPPED
, &page
->private))
760 return 0; /* can't move middle chunk, it's used */
762 if (unlikely(PageIsolated(page
)))
765 if (zhdr
->middle_chunks
== 0)
766 return 0; /* nothing to compact */
768 if (zhdr
->first_chunks
== 0 && zhdr
->last_chunks
== 0) {
769 /* move to the beginning */
770 mchunk_memmove(zhdr
, ZHDR_CHUNKS
);
771 zhdr
->first_chunks
= zhdr
->middle_chunks
;
772 zhdr
->middle_chunks
= 0;
773 zhdr
->start_middle
= 0;
779 * moving data is expensive, so let's only do that if
780 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
782 if (zhdr
->first_chunks
!= 0 && zhdr
->last_chunks
== 0 &&
783 zhdr
->start_middle
- (zhdr
->first_chunks
+ ZHDR_CHUNKS
) >=
785 mchunk_memmove(zhdr
, zhdr
->first_chunks
+ ZHDR_CHUNKS
);
786 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
788 } else if (zhdr
->last_chunks
!= 0 && zhdr
->first_chunks
== 0 &&
789 TOTAL_CHUNKS
- (zhdr
->last_chunks
+ zhdr
->start_middle
790 + zhdr
->middle_chunks
) >=
792 unsigned short new_start
= TOTAL_CHUNKS
- zhdr
->last_chunks
-
794 mchunk_memmove(zhdr
, new_start
);
795 zhdr
->start_middle
= new_start
;
802 static void do_compact_page(struct z3fold_header
*zhdr
, bool locked
)
804 struct z3fold_pool
*pool
= zhdr_to_pool(zhdr
);
807 page
= virt_to_page(zhdr
);
809 WARN_ON(z3fold_page_trylock(zhdr
));
811 z3fold_page_lock(zhdr
);
812 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING
, &page
->private))) {
813 z3fold_page_unlock(zhdr
);
816 spin_lock(&pool
->lock
);
817 list_del_init(&zhdr
->buddy
);
818 spin_unlock(&pool
->lock
);
820 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
821 atomic64_dec(&pool
->pages_nr
);
825 if (test_bit(PAGE_STALE
, &page
->private) ||
826 test_and_set_bit(PAGE_CLAIMED
, &page
->private)) {
827 z3fold_page_unlock(zhdr
);
831 if (!zhdr
->foreign_handles
&& buddy_single(zhdr
) &&
832 zhdr
->mapped_count
== 0 && compact_single_buddy(zhdr
)) {
833 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
))
834 atomic64_dec(&pool
->pages_nr
);
836 clear_bit(PAGE_CLAIMED
, &page
->private);
837 z3fold_page_unlock(zhdr
);
842 z3fold_compact_page(zhdr
);
843 add_to_unbuddied(pool
, zhdr
);
844 clear_bit(PAGE_CLAIMED
, &page
->private);
845 z3fold_page_unlock(zhdr
);
848 static void compact_page_work(struct work_struct
*w
)
850 struct z3fold_header
*zhdr
= container_of(w
, struct z3fold_header
,
853 do_compact_page(zhdr
, false);
856 /* returns _locked_ z3fold page header or NULL */
857 static inline struct z3fold_header
*__z3fold_alloc(struct z3fold_pool
*pool
,
858 size_t size
, bool can_sleep
)
860 struct z3fold_header
*zhdr
= NULL
;
862 struct list_head
*unbuddied
;
863 int chunks
= size_to_chunks(size
), i
;
867 /* First, try to find an unbuddied z3fold page. */
868 unbuddied
= this_cpu_ptr(pool
->unbuddied
);
869 for_each_unbuddied_list(i
, chunks
) {
870 struct list_head
*l
= &unbuddied
[i
];
872 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
873 struct z3fold_header
, buddy
);
878 /* Re-check under lock. */
879 spin_lock(&pool
->lock
);
881 if (unlikely(zhdr
!= list_first_entry(READ_ONCE(l
),
882 struct z3fold_header
, buddy
)) ||
883 !z3fold_page_trylock(zhdr
)) {
884 spin_unlock(&pool
->lock
);
891 list_del_init(&zhdr
->buddy
);
893 spin_unlock(&pool
->lock
);
895 page
= virt_to_page(zhdr
);
896 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
897 test_bit(PAGE_CLAIMED
, &page
->private)) {
898 z3fold_page_unlock(zhdr
);
907 * this page could not be removed from its unbuddied
908 * list while pool lock was held, and then we've taken
909 * page lock so kref_put could not be called before
910 * we got here, so it's safe to just call kref_get()
912 kref_get(&zhdr
->refcount
);
920 /* look for _exact_ match on other cpus' lists */
921 for_each_online_cpu(cpu
) {
924 unbuddied
= per_cpu_ptr(pool
->unbuddied
, cpu
);
925 spin_lock(&pool
->lock
);
926 l
= &unbuddied
[chunks
];
928 zhdr
= list_first_entry_or_null(READ_ONCE(l
),
929 struct z3fold_header
, buddy
);
931 if (!zhdr
|| !z3fold_page_trylock(zhdr
)) {
932 spin_unlock(&pool
->lock
);
936 list_del_init(&zhdr
->buddy
);
938 spin_unlock(&pool
->lock
);
940 page
= virt_to_page(zhdr
);
941 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
942 test_bit(PAGE_CLAIMED
, &page
->private)) {
943 z3fold_page_unlock(zhdr
);
949 kref_get(&zhdr
->refcount
);
954 if (zhdr
&& !zhdr
->slots
)
955 zhdr
->slots
= alloc_slots(pool
,
956 can_sleep
? GFP_NOIO
: GFP_ATOMIC
);
965 * z3fold_create_pool() - create a new z3fold pool
967 * @gfp: gfp flags when allocating the z3fold pool structure
968 * @ops: user-defined operations for the z3fold pool
970 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
973 static struct z3fold_pool
*z3fold_create_pool(const char *name
, gfp_t gfp
,
974 const struct z3fold_ops
*ops
)
976 struct z3fold_pool
*pool
= NULL
;
979 pool
= kzalloc(sizeof(struct z3fold_pool
), gfp
);
982 pool
->c_handle
= kmem_cache_create("z3fold_handle",
983 sizeof(struct z3fold_buddy_slots
),
984 SLOTS_ALIGN
, 0, NULL
);
987 spin_lock_init(&pool
->lock
);
988 spin_lock_init(&pool
->stale_lock
);
989 pool
->unbuddied
= __alloc_percpu(sizeof(struct list_head
) * NCHUNKS
,
990 __alignof__(struct list_head
));
991 if (!pool
->unbuddied
)
993 for_each_possible_cpu(cpu
) {
994 struct list_head
*unbuddied
=
995 per_cpu_ptr(pool
->unbuddied
, cpu
);
996 for_each_unbuddied_list(i
, 0)
997 INIT_LIST_HEAD(&unbuddied
[i
]);
999 INIT_LIST_HEAD(&pool
->lru
);
1000 INIT_LIST_HEAD(&pool
->stale
);
1001 atomic64_set(&pool
->pages_nr
, 0);
1003 pool
->compact_wq
= create_singlethread_workqueue(pool
->name
);
1004 if (!pool
->compact_wq
)
1006 pool
->release_wq
= create_singlethread_workqueue(pool
->name
);
1007 if (!pool
->release_wq
)
1009 if (z3fold_register_migration(pool
))
1011 INIT_WORK(&pool
->work
, free_pages_work
);
1016 destroy_workqueue(pool
->release_wq
);
1018 destroy_workqueue(pool
->compact_wq
);
1020 free_percpu(pool
->unbuddied
);
1022 kmem_cache_destroy(pool
->c_handle
);
1030 * z3fold_destroy_pool() - destroys an existing z3fold pool
1031 * @pool: the z3fold pool to be destroyed
1033 * The pool should be emptied before this function is called.
1035 static void z3fold_destroy_pool(struct z3fold_pool
*pool
)
1037 kmem_cache_destroy(pool
->c_handle
);
1040 * We need to destroy pool->compact_wq before pool->release_wq,
1041 * as any pending work on pool->compact_wq will call
1042 * queue_work(pool->release_wq, &pool->work).
1044 * There are still outstanding pages until both workqueues are drained,
1045 * so we cannot unregister migration until then.
1048 destroy_workqueue(pool
->compact_wq
);
1049 destroy_workqueue(pool
->release_wq
);
1050 z3fold_unregister_migration(pool
);
1051 free_percpu(pool
->unbuddied
);
1056 * z3fold_alloc() - allocates a region of a given size
1057 * @pool: z3fold pool from which to allocate
1058 * @size: size in bytes of the desired allocation
1059 * @gfp: gfp flags used if the pool needs to grow
1060 * @handle: handle of the new allocation
1062 * This function will attempt to find a free region in the pool large enough to
1063 * satisfy the allocation request. A search of the unbuddied lists is
1064 * performed first. If no suitable free region is found, then a new page is
1065 * allocated and added to the pool to satisfy the request.
1067 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
1068 * as z3fold pool pages.
1070 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
1071 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
1074 static int z3fold_alloc(struct z3fold_pool
*pool
, size_t size
, gfp_t gfp
,
1075 unsigned long *handle
)
1077 int chunks
= size_to_chunks(size
);
1078 struct z3fold_header
*zhdr
= NULL
;
1079 struct page
*page
= NULL
;
1081 bool can_sleep
= gfpflags_allow_blocking(gfp
);
1086 if (size
> PAGE_SIZE
)
1089 if (size
> PAGE_SIZE
- ZHDR_SIZE_ALIGNED
- CHUNK_SIZE
)
1093 zhdr
= __z3fold_alloc(pool
, size
, can_sleep
);
1095 bud
= get_free_buddy(zhdr
, chunks
);
1096 if (bud
== HEADLESS
) {
1097 if (kref_put(&zhdr
->refcount
,
1098 release_z3fold_page_locked
))
1099 atomic64_dec(&pool
->pages_nr
);
1101 z3fold_page_unlock(zhdr
);
1102 pr_err("No free chunks in unbuddied\n");
1106 page
= virt_to_page(zhdr
);
1114 spin_lock(&pool
->stale_lock
);
1115 zhdr
= list_first_entry_or_null(&pool
->stale
,
1116 struct z3fold_header
, buddy
);
1118 * Before allocating a page, let's see if we can take one from
1119 * the stale pages list. cancel_work_sync() can sleep so we
1120 * limit this case to the contexts where we can sleep
1123 list_del(&zhdr
->buddy
);
1124 spin_unlock(&pool
->stale_lock
);
1125 cancel_work_sync(&zhdr
->work
);
1126 page
= virt_to_page(zhdr
);
1128 spin_unlock(&pool
->stale_lock
);
1132 page
= alloc_page(gfp
);
1137 zhdr
= init_z3fold_page(page
, bud
== HEADLESS
, pool
, gfp
);
1142 atomic64_inc(&pool
->pages_nr
);
1144 if (bud
== HEADLESS
) {
1145 set_bit(PAGE_HEADLESS
, &page
->private);
1150 __SetPageMovable(page
, pool
->inode
->i_mapping
);
1153 if (trylock_page(page
)) {
1154 __SetPageMovable(page
, pool
->inode
->i_mapping
);
1158 z3fold_page_lock(zhdr
);
1162 zhdr
->first_chunks
= chunks
;
1163 else if (bud
== LAST
)
1164 zhdr
->last_chunks
= chunks
;
1166 zhdr
->middle_chunks
= chunks
;
1167 zhdr
->start_middle
= zhdr
->first_chunks
+ ZHDR_CHUNKS
;
1169 add_to_unbuddied(pool
, zhdr
);
1172 spin_lock(&pool
->lock
);
1173 /* Add/move z3fold page to beginning of LRU */
1174 if (!list_empty(&page
->lru
))
1175 list_del(&page
->lru
);
1177 list_add(&page
->lru
, &pool
->lru
);
1179 *handle
= encode_handle(zhdr
, bud
);
1180 spin_unlock(&pool
->lock
);
1181 if (bud
!= HEADLESS
)
1182 z3fold_page_unlock(zhdr
);
1188 * z3fold_free() - frees the allocation associated with the given handle
1189 * @pool: pool in which the allocation resided
1190 * @handle: handle associated with the allocation returned by z3fold_alloc()
1192 * In the case that the z3fold page in which the allocation resides is under
1193 * reclaim, as indicated by the PG_reclaim flag being set, this function
1194 * only sets the first|last_chunks to 0. The page is actually freed
1195 * once both buddies are evicted (see z3fold_reclaim_page() below).
1197 static void z3fold_free(struct z3fold_pool
*pool
, unsigned long handle
)
1199 struct z3fold_header
*zhdr
;
1204 zhdr
= get_z3fold_header(handle
);
1205 page
= virt_to_page(zhdr
);
1206 page_claimed
= test_and_set_bit(PAGE_CLAIMED
, &page
->private);
1208 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1209 /* if a headless page is under reclaim, just leave.
1210 * NB: we use test_and_set_bit for a reason: if the bit
1211 * has not been set before, we release this page
1212 * immediately so we don't care about its value any more.
1214 if (!page_claimed
) {
1215 spin_lock(&pool
->lock
);
1216 list_del(&page
->lru
);
1217 spin_unlock(&pool
->lock
);
1218 put_z3fold_header(zhdr
);
1219 free_z3fold_page(page
, true);
1220 atomic64_dec(&pool
->pages_nr
);
1225 /* Non-headless case */
1226 bud
= handle_to_buddy(handle
);
1230 zhdr
->first_chunks
= 0;
1233 zhdr
->middle_chunks
= 0;
1236 zhdr
->last_chunks
= 0;
1239 pr_err("%s: unknown bud %d\n", __func__
, bud
);
1241 put_z3fold_header(zhdr
);
1246 free_handle(handle
, zhdr
);
1247 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked_list
)) {
1248 atomic64_dec(&pool
->pages_nr
);
1252 /* the page has not been claimed by us */
1253 z3fold_page_unlock(zhdr
);
1256 if (test_and_set_bit(NEEDS_COMPACTING
, &page
->private)) {
1257 put_z3fold_header(zhdr
);
1258 clear_bit(PAGE_CLAIMED
, &page
->private);
1261 if (zhdr
->cpu
< 0 || !cpu_online(zhdr
->cpu
)) {
1262 spin_lock(&pool
->lock
);
1263 list_del_init(&zhdr
->buddy
);
1264 spin_unlock(&pool
->lock
);
1266 kref_get(&zhdr
->refcount
);
1267 clear_bit(PAGE_CLAIMED
, &page
->private);
1268 do_compact_page(zhdr
, true);
1271 kref_get(&zhdr
->refcount
);
1272 clear_bit(PAGE_CLAIMED
, &page
->private);
1273 queue_work_on(zhdr
->cpu
, pool
->compact_wq
, &zhdr
->work
);
1274 put_z3fold_header(zhdr
);
1278 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1279 * @pool: pool from which a page will attempt to be evicted
1280 * @retries: number of pages on the LRU list for which eviction will
1281 * be attempted before failing
1283 * z3fold reclaim is different from normal system reclaim in that it is done
1284 * from the bottom, up. This is because only the bottom layer, z3fold, has
1285 * information on how the allocations are organized within each z3fold page.
1286 * This has the potential to create interesting locking situations between
1287 * z3fold and the user, however.
1289 * To avoid these, this is how z3fold_reclaim_page() should be called:
1291 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1292 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1293 * call the user-defined eviction handler with the pool and handle as
1296 * If the handle can not be evicted, the eviction handler should return
1297 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1298 * appropriate list and try the next z3fold page on the LRU up to
1299 * a user defined number of retries.
1301 * If the handle is successfully evicted, the eviction handler should
1302 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1303 * contains logic to delay freeing the page if the page is under reclaim,
1304 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1306 * If all buddies in the z3fold page are successfully evicted, then the
1307 * z3fold page can be freed.
1309 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1310 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1311 * the retry limit was hit.
1313 static int z3fold_reclaim_page(struct z3fold_pool
*pool
, unsigned int retries
)
1316 struct z3fold_header
*zhdr
= NULL
;
1317 struct page
*page
= NULL
;
1318 struct list_head
*pos
;
1319 unsigned long first_handle
= 0, middle_handle
= 0, last_handle
= 0;
1320 struct z3fold_buddy_slots slots
__attribute__((aligned(SLOTS_ALIGN
)));
1322 rwlock_init(&slots
.lock
);
1323 slots
.pool
= (unsigned long)pool
| (1 << HANDLES_NOFREE
);
1325 spin_lock(&pool
->lock
);
1326 if (!pool
->ops
|| !pool
->ops
->evict
|| retries
== 0) {
1327 spin_unlock(&pool
->lock
);
1330 for (i
= 0; i
< retries
; i
++) {
1331 if (list_empty(&pool
->lru
)) {
1332 spin_unlock(&pool
->lock
);
1335 list_for_each_prev(pos
, &pool
->lru
) {
1336 page
= list_entry(pos
, struct page
, lru
);
1338 zhdr
= page_address(page
);
1339 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1341 * For non-headless pages, we wait to do this
1342 * until we have the page lock to avoid racing
1343 * with __z3fold_alloc(). Headless pages don't
1344 * have a lock (and __z3fold_alloc() will never
1345 * see them), but we still need to test and set
1346 * PAGE_CLAIMED to avoid racing with
1347 * z3fold_free(), so just do it now before
1350 if (test_and_set_bit(PAGE_CLAIMED
, &page
->private))
1356 if (kref_get_unless_zero(&zhdr
->refcount
) == 0) {
1360 if (!z3fold_page_trylock(zhdr
)) {
1361 if (kref_put(&zhdr
->refcount
,
1362 release_z3fold_page
))
1363 atomic64_dec(&pool
->pages_nr
);
1365 continue; /* can't evict at this point */
1368 /* test_and_set_bit is of course atomic, but we still
1369 * need to do it under page lock, otherwise checking
1370 * that bit in __z3fold_alloc wouldn't make sense
1372 if (zhdr
->foreign_handles
||
1373 test_and_set_bit(PAGE_CLAIMED
, &page
->private)) {
1374 if (kref_put(&zhdr
->refcount
,
1375 release_z3fold_page_locked
))
1376 atomic64_dec(&pool
->pages_nr
);
1378 z3fold_page_unlock(zhdr
);
1380 continue; /* can't evict such page */
1382 list_del_init(&zhdr
->buddy
);
1390 list_del_init(&page
->lru
);
1391 spin_unlock(&pool
->lock
);
1393 if (!test_bit(PAGE_HEADLESS
, &page
->private)) {
1395 * We need encode the handles before unlocking, and
1396 * use our local slots structure because z3fold_free
1397 * can zero out zhdr->slots and we can't do much
1403 memset(slots
.slot
, 0, sizeof(slots
.slot
));
1404 if (zhdr
->first_chunks
)
1405 first_handle
= __encode_handle(zhdr
, &slots
,
1407 if (zhdr
->middle_chunks
)
1408 middle_handle
= __encode_handle(zhdr
, &slots
,
1410 if (zhdr
->last_chunks
)
1411 last_handle
= __encode_handle(zhdr
, &slots
,
1414 * it's safe to unlock here because we hold a
1415 * reference to this page
1417 z3fold_page_unlock(zhdr
);
1419 first_handle
= encode_handle(zhdr
, HEADLESS
);
1420 last_handle
= middle_handle
= 0;
1422 /* Issue the eviction callback(s) */
1423 if (middle_handle
) {
1424 ret
= pool
->ops
->evict(pool
, middle_handle
);
1429 ret
= pool
->ops
->evict(pool
, first_handle
);
1434 ret
= pool
->ops
->evict(pool
, last_handle
);
1439 if (test_bit(PAGE_HEADLESS
, &page
->private)) {
1441 free_z3fold_page(page
, true);
1442 atomic64_dec(&pool
->pages_nr
);
1445 spin_lock(&pool
->lock
);
1446 list_add(&page
->lru
, &pool
->lru
);
1447 spin_unlock(&pool
->lock
);
1448 clear_bit(PAGE_CLAIMED
, &page
->private);
1450 struct z3fold_buddy_slots
*slots
= zhdr
->slots
;
1451 z3fold_page_lock(zhdr
);
1452 if (kref_put(&zhdr
->refcount
,
1453 release_z3fold_page_locked
)) {
1454 kmem_cache_free(pool
->c_handle
, slots
);
1455 atomic64_dec(&pool
->pages_nr
);
1459 * if we are here, the page is still not completely
1460 * free. Take the global pool lock then to be able
1461 * to add it back to the lru list
1463 spin_lock(&pool
->lock
);
1464 list_add(&page
->lru
, &pool
->lru
);
1465 spin_unlock(&pool
->lock
);
1466 z3fold_page_unlock(zhdr
);
1467 clear_bit(PAGE_CLAIMED
, &page
->private);
1470 /* We started off locked to we need to lock the pool back */
1471 spin_lock(&pool
->lock
);
1473 spin_unlock(&pool
->lock
);
1478 * z3fold_map() - maps the allocation associated with the given handle
1479 * @pool: pool in which the allocation resides
1480 * @handle: handle associated with the allocation to be mapped
1482 * Extracts the buddy number from handle and constructs the pointer to the
1483 * correct starting chunk within the page.
1485 * Returns: a pointer to the mapped allocation
1487 static void *z3fold_map(struct z3fold_pool
*pool
, unsigned long handle
)
1489 struct z3fold_header
*zhdr
;
1494 zhdr
= get_z3fold_header(handle
);
1496 page
= virt_to_page(zhdr
);
1498 if (test_bit(PAGE_HEADLESS
, &page
->private))
1501 buddy
= handle_to_buddy(handle
);
1504 addr
+= ZHDR_SIZE_ALIGNED
;
1507 addr
+= zhdr
->start_middle
<< CHUNK_SHIFT
;
1508 set_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1511 addr
+= PAGE_SIZE
- (handle_to_chunks(handle
) << CHUNK_SHIFT
);
1514 pr_err("unknown buddy id %d\n", buddy
);
1521 zhdr
->mapped_count
++;
1523 put_z3fold_header(zhdr
);
1528 * z3fold_unmap() - unmaps the allocation associated with the given handle
1529 * @pool: pool in which the allocation resides
1530 * @handle: handle associated with the allocation to be unmapped
1532 static void z3fold_unmap(struct z3fold_pool
*pool
, unsigned long handle
)
1534 struct z3fold_header
*zhdr
;
1538 zhdr
= get_z3fold_header(handle
);
1539 page
= virt_to_page(zhdr
);
1541 if (test_bit(PAGE_HEADLESS
, &page
->private))
1544 buddy
= handle_to_buddy(handle
);
1545 if (buddy
== MIDDLE
)
1546 clear_bit(MIDDLE_CHUNK_MAPPED
, &page
->private);
1547 zhdr
->mapped_count
--;
1548 put_z3fold_header(zhdr
);
1552 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1553 * @pool: pool whose size is being queried
1555 * Returns: size in pages of the given pool.
1557 static u64
z3fold_get_pool_size(struct z3fold_pool
*pool
)
1559 return atomic64_read(&pool
->pages_nr
);
1562 static bool z3fold_page_isolate(struct page
*page
, isolate_mode_t mode
)
1564 struct z3fold_header
*zhdr
;
1565 struct z3fold_pool
*pool
;
1567 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1568 VM_BUG_ON_PAGE(PageIsolated(page
), page
);
1570 if (test_bit(PAGE_HEADLESS
, &page
->private))
1573 zhdr
= page_address(page
);
1574 z3fold_page_lock(zhdr
);
1575 if (test_bit(NEEDS_COMPACTING
, &page
->private) ||
1576 test_bit(PAGE_STALE
, &page
->private))
1579 if (zhdr
->mapped_count
!= 0 || zhdr
->foreign_handles
!= 0)
1582 if (test_and_set_bit(PAGE_CLAIMED
, &page
->private))
1584 pool
= zhdr_to_pool(zhdr
);
1585 spin_lock(&pool
->lock
);
1586 if (!list_empty(&zhdr
->buddy
))
1587 list_del_init(&zhdr
->buddy
);
1588 if (!list_empty(&page
->lru
))
1589 list_del_init(&page
->lru
);
1590 spin_unlock(&pool
->lock
);
1592 kref_get(&zhdr
->refcount
);
1593 z3fold_page_unlock(zhdr
);
1597 z3fold_page_unlock(zhdr
);
1601 static int z3fold_page_migrate(struct address_space
*mapping
, struct page
*newpage
,
1602 struct page
*page
, enum migrate_mode mode
)
1604 struct z3fold_header
*zhdr
, *new_zhdr
;
1605 struct z3fold_pool
*pool
;
1606 struct address_space
*new_mapping
;
1608 VM_BUG_ON_PAGE(!PageMovable(page
), page
);
1609 VM_BUG_ON_PAGE(!PageIsolated(page
), page
);
1610 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED
, &page
->private), page
);
1611 VM_BUG_ON_PAGE(!PageLocked(newpage
), newpage
);
1613 zhdr
= page_address(page
);
1614 pool
= zhdr_to_pool(zhdr
);
1616 if (!z3fold_page_trylock(zhdr
))
1618 if (zhdr
->mapped_count
!= 0 || zhdr
->foreign_handles
!= 0) {
1619 z3fold_page_unlock(zhdr
);
1620 clear_bit(PAGE_CLAIMED
, &page
->private);
1623 if (work_pending(&zhdr
->work
)) {
1624 z3fold_page_unlock(zhdr
);
1627 new_zhdr
= page_address(newpage
);
1628 memcpy(new_zhdr
, zhdr
, PAGE_SIZE
);
1629 newpage
->private = page
->private;
1631 z3fold_page_unlock(zhdr
);
1632 spin_lock_init(&new_zhdr
->page_lock
);
1633 INIT_WORK(&new_zhdr
->work
, compact_page_work
);
1635 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1636 * so we only have to reinitialize it.
1638 INIT_LIST_HEAD(&new_zhdr
->buddy
);
1639 new_mapping
= page_mapping(page
);
1640 __ClearPageMovable(page
);
1641 ClearPagePrivate(page
);
1644 z3fold_page_lock(new_zhdr
);
1645 if (new_zhdr
->first_chunks
)
1646 encode_handle(new_zhdr
, FIRST
);
1647 if (new_zhdr
->last_chunks
)
1648 encode_handle(new_zhdr
, LAST
);
1649 if (new_zhdr
->middle_chunks
)
1650 encode_handle(new_zhdr
, MIDDLE
);
1651 set_bit(NEEDS_COMPACTING
, &newpage
->private);
1652 new_zhdr
->cpu
= smp_processor_id();
1653 spin_lock(&pool
->lock
);
1654 list_add(&newpage
->lru
, &pool
->lru
);
1655 spin_unlock(&pool
->lock
);
1656 __SetPageMovable(newpage
, new_mapping
);
1657 z3fold_page_unlock(new_zhdr
);
1659 queue_work_on(new_zhdr
->cpu
, pool
->compact_wq
, &new_zhdr
->work
);
1661 page_mapcount_reset(page
);
1662 clear_bit(PAGE_CLAIMED
, &page
->private);
1667 static void z3fold_page_putback(struct page
*page
)
1669 struct z3fold_header
*zhdr
;
1670 struct z3fold_pool
*pool
;
1672 zhdr
= page_address(page
);
1673 pool
= zhdr_to_pool(zhdr
);
1675 z3fold_page_lock(zhdr
);
1676 if (!list_empty(&zhdr
->buddy
))
1677 list_del_init(&zhdr
->buddy
);
1678 INIT_LIST_HEAD(&page
->lru
);
1679 if (kref_put(&zhdr
->refcount
, release_z3fold_page_locked
)) {
1680 atomic64_dec(&pool
->pages_nr
);
1683 spin_lock(&pool
->lock
);
1684 list_add(&page
->lru
, &pool
->lru
);
1685 spin_unlock(&pool
->lock
);
1686 clear_bit(PAGE_CLAIMED
, &page
->private);
1687 z3fold_page_unlock(zhdr
);
1690 static const struct address_space_operations z3fold_aops
= {
1691 .isolate_page
= z3fold_page_isolate
,
1692 .migratepage
= z3fold_page_migrate
,
1693 .putback_page
= z3fold_page_putback
,
1700 static int z3fold_zpool_evict(struct z3fold_pool
*pool
, unsigned long handle
)
1702 if (pool
->zpool
&& pool
->zpool_ops
&& pool
->zpool_ops
->evict
)
1703 return pool
->zpool_ops
->evict(pool
->zpool
, handle
);
1708 static const struct z3fold_ops z3fold_zpool_ops
= {
1709 .evict
= z3fold_zpool_evict
1712 static void *z3fold_zpool_create(const char *name
, gfp_t gfp
,
1713 const struct zpool_ops
*zpool_ops
,
1714 struct zpool
*zpool
)
1716 struct z3fold_pool
*pool
;
1718 pool
= z3fold_create_pool(name
, gfp
,
1719 zpool_ops
? &z3fold_zpool_ops
: NULL
);
1721 pool
->zpool
= zpool
;
1722 pool
->zpool_ops
= zpool_ops
;
1727 static void z3fold_zpool_destroy(void *pool
)
1729 z3fold_destroy_pool(pool
);
1732 static int z3fold_zpool_malloc(void *pool
, size_t size
, gfp_t gfp
,
1733 unsigned long *handle
)
1735 return z3fold_alloc(pool
, size
, gfp
, handle
);
1737 static void z3fold_zpool_free(void *pool
, unsigned long handle
)
1739 z3fold_free(pool
, handle
);
1742 static int z3fold_zpool_shrink(void *pool
, unsigned int pages
,
1743 unsigned int *reclaimed
)
1745 unsigned int total
= 0;
1748 while (total
< pages
) {
1749 ret
= z3fold_reclaim_page(pool
, 8);
1761 static void *z3fold_zpool_map(void *pool
, unsigned long handle
,
1762 enum zpool_mapmode mm
)
1764 return z3fold_map(pool
, handle
);
1766 static void z3fold_zpool_unmap(void *pool
, unsigned long handle
)
1768 z3fold_unmap(pool
, handle
);
1771 static u64
z3fold_zpool_total_size(void *pool
)
1773 return z3fold_get_pool_size(pool
) * PAGE_SIZE
;
1776 static struct zpool_driver z3fold_zpool_driver
= {
1778 .sleep_mapped
= true,
1779 .owner
= THIS_MODULE
,
1780 .create
= z3fold_zpool_create
,
1781 .destroy
= z3fold_zpool_destroy
,
1782 .malloc
= z3fold_zpool_malloc
,
1783 .free
= z3fold_zpool_free
,
1784 .shrink
= z3fold_zpool_shrink
,
1785 .map
= z3fold_zpool_map
,
1786 .unmap
= z3fold_zpool_unmap
,
1787 .total_size
= z3fold_zpool_total_size
,
1790 MODULE_ALIAS("zpool-z3fold");
1792 static int __init
init_z3fold(void)
1797 * Make sure the z3fold header is not larger than the page size and
1798 * there has remaining spaces for its buddy.
1800 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED
> PAGE_SIZE
- CHUNK_SIZE
);
1801 ret
= z3fold_mount();
1805 zpool_register_driver(&z3fold_zpool_driver
);
1810 static void __exit
exit_z3fold(void)
1813 zpool_unregister_driver(&z3fold_zpool_driver
);
1816 module_init(init_z3fold
);
1817 module_exit(exit_z3fold
);
1819 MODULE_LICENSE("GPL");
1820 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1821 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");