]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - mm/z3fold.c
efi/tpm: Fix sanity check of unsigned tbl_size being less than zero
[mirror_ubuntu-focal-kernel.git] / mm / z3fold.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * z3fold.c
4 *
5 * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6 * Copyright (C) 2016, Sony Mobile Communications Inc.
7 *
8 * This implementation is based on zbud written by Seth Jennings.
9 *
10 * z3fold is an special purpose allocator for storing compressed pages. It
11 * can store up to three compressed pages per page which improves the
12 * compression ratio of zbud while retaining its main concepts (e. g. always
13 * storing an integral number of objects per page) and simplicity.
14 * It still has simple and deterministic reclaim properties that make it
15 * preferable to a higher density approach (with no requirement on integral
16 * number of object per page) when reclaim is used.
17 *
18 * As in zbud, pages are divided into "chunks". The size of the chunks is
19 * fixed at compile time and is determined by NCHUNKS_ORDER below.
20 *
21 * z3fold doesn't export any API and is meant to be used via zpool API.
22 */
23
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46
47 /*
48 * NCHUNKS_ORDER determines the internal allocation granularity, effectively
49 * adjusting internal fragmentation. It also determines the number of
50 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
51 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
52 * in the beginning of an allocated page are occupied by z3fold header, so
53 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
54 * which shows the max number of free chunks in z3fold page, also there will
55 * be 63, or 62, respectively, freelists per pool.
56 */
57 #define NCHUNKS_ORDER 6
58
59 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER)
60 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
61 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
62 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
63 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
64 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
65
66 #define BUDDY_MASK (0x3)
67 #define BUDDY_SHIFT 2
68 #define SLOTS_ALIGN (0x40)
69
70 /*****************
71 * Structures
72 *****************/
73 struct z3fold_pool;
74 struct z3fold_ops {
75 int (*evict)(struct z3fold_pool *pool, unsigned long handle);
76 };
77
78 enum buddy {
79 HEADLESS = 0,
80 FIRST,
81 MIDDLE,
82 LAST,
83 BUDDIES_MAX = LAST
84 };
85
86 struct z3fold_buddy_slots {
87 /*
88 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
89 * be enough slots to hold all possible variants
90 */
91 unsigned long slot[BUDDY_MASK + 1];
92 unsigned long pool; /* back link + flags */
93 };
94 #define HANDLE_FLAG_MASK (0x03)
95
96 /*
97 * struct z3fold_header - z3fold page metadata occupying first chunks of each
98 * z3fold page, except for HEADLESS pages
99 * @buddy: links the z3fold page into the relevant list in the
100 * pool
101 * @page_lock: per-page lock
102 * @refcount: reference count for the z3fold page
103 * @work: work_struct for page layout optimization
104 * @slots: pointer to the structure holding buddy slots
105 * @pool: pointer to the containing pool
106 * @cpu: CPU which this page "belongs" to
107 * @first_chunks: the size of the first buddy in chunks, 0 if free
108 * @middle_chunks: the size of the middle buddy in chunks, 0 if free
109 * @last_chunks: the size of the last buddy in chunks, 0 if free
110 * @first_num: the starting number (for the first handle)
111 * @mapped_count: the number of objects currently mapped
112 */
113 struct z3fold_header {
114 struct list_head buddy;
115 spinlock_t page_lock;
116 struct kref refcount;
117 struct work_struct work;
118 struct z3fold_buddy_slots *slots;
119 struct z3fold_pool *pool;
120 short cpu;
121 unsigned short first_chunks;
122 unsigned short middle_chunks;
123 unsigned short last_chunks;
124 unsigned short start_middle;
125 unsigned short first_num:2;
126 unsigned short mapped_count:2;
127 };
128
129 /**
130 * struct z3fold_pool - stores metadata for each z3fold pool
131 * @name: pool name
132 * @lock: protects pool unbuddied/lru lists
133 * @stale_lock: protects pool stale page list
134 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2-
135 * buddies; the list each z3fold page is added to depends on
136 * the size of its free region.
137 * @lru: list tracking the z3fold pages in LRU order by most recently
138 * added buddy.
139 * @stale: list of pages marked for freeing
140 * @pages_nr: number of z3fold pages in the pool.
141 * @c_handle: cache for z3fold_buddy_slots allocation
142 * @ops: pointer to a structure of user defined operations specified at
143 * pool creation time.
144 * @compact_wq: workqueue for page layout background optimization
145 * @release_wq: workqueue for safe page release
146 * @work: work_struct for safe page release
147 * @inode: inode for z3fold pseudo filesystem
148 *
149 * This structure is allocated at pool creation time and maintains metadata
150 * pertaining to a particular z3fold pool.
151 */
152 struct z3fold_pool {
153 const char *name;
154 spinlock_t lock;
155 spinlock_t stale_lock;
156 struct list_head *unbuddied;
157 struct list_head lru;
158 struct list_head stale;
159 atomic64_t pages_nr;
160 struct kmem_cache *c_handle;
161 const struct z3fold_ops *ops;
162 struct zpool *zpool;
163 const struct zpool_ops *zpool_ops;
164 struct workqueue_struct *compact_wq;
165 struct workqueue_struct *release_wq;
166 struct work_struct work;
167 struct inode *inode;
168 };
169
170 /*
171 * Internal z3fold page flags
172 */
173 enum z3fold_page_flags {
174 PAGE_HEADLESS = 0,
175 MIDDLE_CHUNK_MAPPED,
176 NEEDS_COMPACTING,
177 PAGE_STALE,
178 PAGE_CLAIMED, /* by either reclaim or free */
179 };
180
181 /*****************
182 * Helpers
183 *****************/
184
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size)
187 {
188 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190
191 #define for_each_unbuddied_list(_iter, _begin) \
192 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193
194 static void compact_page_work(struct work_struct *w);
195
196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 gfp_t gfp)
198 {
199 struct z3fold_buddy_slots *slots;
200
201 slots = kmem_cache_alloc(pool->c_handle,
202 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
203
204 if (slots) {
205 memset(slots->slot, 0, sizeof(slots->slot));
206 slots->pool = (unsigned long)pool;
207 }
208
209 return slots;
210 }
211
212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213 {
214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215 }
216
217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218 {
219 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220 }
221
222 static inline void free_handle(unsigned long handle)
223 {
224 struct z3fold_buddy_slots *slots;
225 int i;
226 bool is_free;
227
228 if (handle & (1 << PAGE_HEADLESS))
229 return;
230
231 WARN_ON(*(unsigned long *)handle == 0);
232 *(unsigned long *)handle = 0;
233 slots = handle_to_slots(handle);
234 is_free = true;
235 for (i = 0; i <= BUDDY_MASK; i++) {
236 if (slots->slot[i]) {
237 is_free = false;
238 break;
239 }
240 }
241
242 if (is_free) {
243 struct z3fold_pool *pool = slots_to_pool(slots);
244
245 kmem_cache_free(pool->c_handle, slots);
246 }
247 }
248
249 static int z3fold_init_fs_context(struct fs_context *fc)
250 {
251 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
252 }
253
254 static struct file_system_type z3fold_fs = {
255 .name = "z3fold",
256 .init_fs_context = z3fold_init_fs_context,
257 .kill_sb = kill_anon_super,
258 };
259
260 static struct vfsmount *z3fold_mnt;
261 static int z3fold_mount(void)
262 {
263 int ret = 0;
264
265 z3fold_mnt = kern_mount(&z3fold_fs);
266 if (IS_ERR(z3fold_mnt))
267 ret = PTR_ERR(z3fold_mnt);
268
269 return ret;
270 }
271
272 static void z3fold_unmount(void)
273 {
274 kern_unmount(z3fold_mnt);
275 }
276
277 static const struct address_space_operations z3fold_aops;
278 static int z3fold_register_migration(struct z3fold_pool *pool)
279 {
280 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
281 if (IS_ERR(pool->inode)) {
282 pool->inode = NULL;
283 return 1;
284 }
285
286 pool->inode->i_mapping->private_data = pool;
287 pool->inode->i_mapping->a_ops = &z3fold_aops;
288 return 0;
289 }
290
291 static void z3fold_unregister_migration(struct z3fold_pool *pool)
292 {
293 if (pool->inode)
294 iput(pool->inode);
295 }
296
297 /* Initializes the z3fold header of a newly allocated z3fold page */
298 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
299 struct z3fold_pool *pool, gfp_t gfp)
300 {
301 struct z3fold_header *zhdr = page_address(page);
302 struct z3fold_buddy_slots *slots;
303
304 INIT_LIST_HEAD(&page->lru);
305 clear_bit(PAGE_HEADLESS, &page->private);
306 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
307 clear_bit(NEEDS_COMPACTING, &page->private);
308 clear_bit(PAGE_STALE, &page->private);
309 clear_bit(PAGE_CLAIMED, &page->private);
310 if (headless)
311 return zhdr;
312
313 slots = alloc_slots(pool, gfp);
314 if (!slots)
315 return NULL;
316
317 spin_lock_init(&zhdr->page_lock);
318 kref_init(&zhdr->refcount);
319 zhdr->first_chunks = 0;
320 zhdr->middle_chunks = 0;
321 zhdr->last_chunks = 0;
322 zhdr->first_num = 0;
323 zhdr->start_middle = 0;
324 zhdr->cpu = -1;
325 zhdr->slots = slots;
326 zhdr->pool = pool;
327 INIT_LIST_HEAD(&zhdr->buddy);
328 INIT_WORK(&zhdr->work, compact_page_work);
329 return zhdr;
330 }
331
332 /* Resets the struct page fields and frees the page */
333 static void free_z3fold_page(struct page *page, bool headless)
334 {
335 if (!headless) {
336 lock_page(page);
337 __ClearPageMovable(page);
338 unlock_page(page);
339 }
340 ClearPagePrivate(page);
341 __free_page(page);
342 }
343
344 /* Lock a z3fold page */
345 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
346 {
347 spin_lock(&zhdr->page_lock);
348 }
349
350 /* Try to lock a z3fold page */
351 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
352 {
353 return spin_trylock(&zhdr->page_lock);
354 }
355
356 /* Unlock a z3fold page */
357 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
358 {
359 spin_unlock(&zhdr->page_lock);
360 }
361
362 /* Helper function to build the index */
363 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
364 {
365 return (bud + zhdr->first_num) & BUDDY_MASK;
366 }
367
368 /*
369 * Encodes the handle of a particular buddy within a z3fold page
370 * Pool lock should be held as this function accesses first_num
371 */
372 static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 struct z3fold_buddy_slots *slots,
374 enum buddy bud)
375 {
376 unsigned long h = (unsigned long)zhdr;
377 int idx = 0;
378
379 /*
380 * For a headless page, its handle is its pointer with the extra
381 * PAGE_HEADLESS bit set
382 */
383 if (bud == HEADLESS)
384 return h | (1 << PAGE_HEADLESS);
385
386 /* otherwise, return pointer to encoded handle */
387 idx = __idx(zhdr, bud);
388 h += idx;
389 if (bud == LAST)
390 h |= (zhdr->last_chunks << BUDDY_SHIFT);
391
392 slots->slot[idx] = h;
393 return (unsigned long)&slots->slot[idx];
394 }
395
396 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
397 {
398 return __encode_handle(zhdr, zhdr->slots, bud);
399 }
400
401 /* Returns the z3fold page where a given handle is stored */
402 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
403 {
404 unsigned long addr = h;
405
406 if (!(addr & (1 << PAGE_HEADLESS)))
407 addr = *(unsigned long *)h;
408
409 return (struct z3fold_header *)(addr & PAGE_MASK);
410 }
411
412 /* only for LAST bud, returns zero otherwise */
413 static unsigned short handle_to_chunks(unsigned long handle)
414 {
415 unsigned long addr = *(unsigned long *)handle;
416
417 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
418 }
419
420 /*
421 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
422 * but that doesn't matter. because the masking will result in the
423 * correct buddy number.
424 */
425 static enum buddy handle_to_buddy(unsigned long handle)
426 {
427 struct z3fold_header *zhdr;
428 unsigned long addr;
429
430 WARN_ON(handle & (1 << PAGE_HEADLESS));
431 addr = *(unsigned long *)handle;
432 zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
433 return (addr - zhdr->first_num) & BUDDY_MASK;
434 }
435
436 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
437 {
438 return zhdr->pool;
439 }
440
441 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
442 {
443 struct page *page = virt_to_page(zhdr);
444 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
445
446 WARN_ON(!list_empty(&zhdr->buddy));
447 set_bit(PAGE_STALE, &page->private);
448 clear_bit(NEEDS_COMPACTING, &page->private);
449 spin_lock(&pool->lock);
450 if (!list_empty(&page->lru))
451 list_del_init(&page->lru);
452 spin_unlock(&pool->lock);
453 if (locked)
454 z3fold_page_unlock(zhdr);
455 spin_lock(&pool->stale_lock);
456 list_add(&zhdr->buddy, &pool->stale);
457 queue_work(pool->release_wq, &pool->work);
458 spin_unlock(&pool->stale_lock);
459 }
460
461 static void __attribute__((__unused__))
462 release_z3fold_page(struct kref *ref)
463 {
464 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
465 refcount);
466 __release_z3fold_page(zhdr, false);
467 }
468
469 static void release_z3fold_page_locked(struct kref *ref)
470 {
471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 refcount);
473 WARN_ON(z3fold_page_trylock(zhdr));
474 __release_z3fold_page(zhdr, true);
475 }
476
477 static void release_z3fold_page_locked_list(struct kref *ref)
478 {
479 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
480 refcount);
481 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
482 spin_lock(&pool->lock);
483 list_del_init(&zhdr->buddy);
484 spin_unlock(&pool->lock);
485
486 WARN_ON(z3fold_page_trylock(zhdr));
487 __release_z3fold_page(zhdr, true);
488 }
489
490 static void free_pages_work(struct work_struct *w)
491 {
492 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
493
494 spin_lock(&pool->stale_lock);
495 while (!list_empty(&pool->stale)) {
496 struct z3fold_header *zhdr = list_first_entry(&pool->stale,
497 struct z3fold_header, buddy);
498 struct page *page = virt_to_page(zhdr);
499
500 list_del(&zhdr->buddy);
501 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
502 continue;
503 spin_unlock(&pool->stale_lock);
504 cancel_work_sync(&zhdr->work);
505 free_z3fold_page(page, false);
506 cond_resched();
507 spin_lock(&pool->stale_lock);
508 }
509 spin_unlock(&pool->stale_lock);
510 }
511
512 /*
513 * Returns the number of free chunks in a z3fold page.
514 * NB: can't be used with HEADLESS pages.
515 */
516 static int num_free_chunks(struct z3fold_header *zhdr)
517 {
518 int nfree;
519 /*
520 * If there is a middle object, pick up the bigger free space
521 * either before or after it. Otherwise just subtract the number
522 * of chunks occupied by the first and the last objects.
523 */
524 if (zhdr->middle_chunks != 0) {
525 int nfree_before = zhdr->first_chunks ?
526 0 : zhdr->start_middle - ZHDR_CHUNKS;
527 int nfree_after = zhdr->last_chunks ?
528 0 : TOTAL_CHUNKS -
529 (zhdr->start_middle + zhdr->middle_chunks);
530 nfree = max(nfree_before, nfree_after);
531 } else
532 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
533 return nfree;
534 }
535
536 /* Add to the appropriate unbuddied list */
537 static inline void add_to_unbuddied(struct z3fold_pool *pool,
538 struct z3fold_header *zhdr)
539 {
540 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
541 zhdr->middle_chunks == 0) {
542 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
543
544 int freechunks = num_free_chunks(zhdr);
545 spin_lock(&pool->lock);
546 list_add(&zhdr->buddy, &unbuddied[freechunks]);
547 spin_unlock(&pool->lock);
548 zhdr->cpu = smp_processor_id();
549 put_cpu_ptr(pool->unbuddied);
550 }
551 }
552
553 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
554 unsigned short dst_chunk)
555 {
556 void *beg = zhdr;
557 return memmove(beg + (dst_chunk << CHUNK_SHIFT),
558 beg + (zhdr->start_middle << CHUNK_SHIFT),
559 zhdr->middle_chunks << CHUNK_SHIFT);
560 }
561
562 #define BIG_CHUNK_GAP 3
563 /* Has to be called with lock held */
564 static int z3fold_compact_page(struct z3fold_header *zhdr)
565 {
566 struct page *page = virt_to_page(zhdr);
567
568 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
569 return 0; /* can't move middle chunk, it's used */
570
571 if (unlikely(PageIsolated(page)))
572 return 0;
573
574 if (zhdr->middle_chunks == 0)
575 return 0; /* nothing to compact */
576
577 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
578 /* move to the beginning */
579 mchunk_memmove(zhdr, ZHDR_CHUNKS);
580 zhdr->first_chunks = zhdr->middle_chunks;
581 zhdr->middle_chunks = 0;
582 zhdr->start_middle = 0;
583 zhdr->first_num++;
584 return 1;
585 }
586
587 /*
588 * moving data is expensive, so let's only do that if
589 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
590 */
591 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
592 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
593 BIG_CHUNK_GAP) {
594 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
595 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
596 return 1;
597 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
598 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
599 + zhdr->middle_chunks) >=
600 BIG_CHUNK_GAP) {
601 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
602 zhdr->middle_chunks;
603 mchunk_memmove(zhdr, new_start);
604 zhdr->start_middle = new_start;
605 return 1;
606 }
607
608 return 0;
609 }
610
611 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
612 {
613 struct z3fold_pool *pool = zhdr_to_pool(zhdr);
614 struct page *page;
615
616 page = virt_to_page(zhdr);
617 if (locked)
618 WARN_ON(z3fold_page_trylock(zhdr));
619 else
620 z3fold_page_lock(zhdr);
621 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
622 z3fold_page_unlock(zhdr);
623 return;
624 }
625 spin_lock(&pool->lock);
626 list_del_init(&zhdr->buddy);
627 spin_unlock(&pool->lock);
628
629 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
630 atomic64_dec(&pool->pages_nr);
631 return;
632 }
633
634 if (unlikely(PageIsolated(page) ||
635 test_bit(PAGE_CLAIMED, &page->private) ||
636 test_bit(PAGE_STALE, &page->private))) {
637 z3fold_page_unlock(zhdr);
638 return;
639 }
640
641 z3fold_compact_page(zhdr);
642 add_to_unbuddied(pool, zhdr);
643 z3fold_page_unlock(zhdr);
644 }
645
646 static void compact_page_work(struct work_struct *w)
647 {
648 struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
649 work);
650
651 do_compact_page(zhdr, false);
652 }
653
654 /* returns _locked_ z3fold page header or NULL */
655 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
656 size_t size, bool can_sleep)
657 {
658 struct z3fold_header *zhdr = NULL;
659 struct page *page;
660 struct list_head *unbuddied;
661 int chunks = size_to_chunks(size), i;
662
663 lookup:
664 /* First, try to find an unbuddied z3fold page. */
665 unbuddied = get_cpu_ptr(pool->unbuddied);
666 for_each_unbuddied_list(i, chunks) {
667 struct list_head *l = &unbuddied[i];
668
669 zhdr = list_first_entry_or_null(READ_ONCE(l),
670 struct z3fold_header, buddy);
671
672 if (!zhdr)
673 continue;
674
675 /* Re-check under lock. */
676 spin_lock(&pool->lock);
677 l = &unbuddied[i];
678 if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
679 struct z3fold_header, buddy)) ||
680 !z3fold_page_trylock(zhdr)) {
681 spin_unlock(&pool->lock);
682 zhdr = NULL;
683 put_cpu_ptr(pool->unbuddied);
684 if (can_sleep)
685 cond_resched();
686 goto lookup;
687 }
688 list_del_init(&zhdr->buddy);
689 zhdr->cpu = -1;
690 spin_unlock(&pool->lock);
691
692 page = virt_to_page(zhdr);
693 if (test_bit(NEEDS_COMPACTING, &page->private)) {
694 z3fold_page_unlock(zhdr);
695 zhdr = NULL;
696 put_cpu_ptr(pool->unbuddied);
697 if (can_sleep)
698 cond_resched();
699 goto lookup;
700 }
701
702 /*
703 * this page could not be removed from its unbuddied
704 * list while pool lock was held, and then we've taken
705 * page lock so kref_put could not be called before
706 * we got here, so it's safe to just call kref_get()
707 */
708 kref_get(&zhdr->refcount);
709 break;
710 }
711 put_cpu_ptr(pool->unbuddied);
712
713 if (!zhdr) {
714 int cpu;
715
716 /* look for _exact_ match on other cpus' lists */
717 for_each_online_cpu(cpu) {
718 struct list_head *l;
719
720 unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
721 spin_lock(&pool->lock);
722 l = &unbuddied[chunks];
723
724 zhdr = list_first_entry_or_null(READ_ONCE(l),
725 struct z3fold_header, buddy);
726
727 if (!zhdr || !z3fold_page_trylock(zhdr)) {
728 spin_unlock(&pool->lock);
729 zhdr = NULL;
730 continue;
731 }
732 list_del_init(&zhdr->buddy);
733 zhdr->cpu = -1;
734 spin_unlock(&pool->lock);
735
736 page = virt_to_page(zhdr);
737 if (test_bit(NEEDS_COMPACTING, &page->private)) {
738 z3fold_page_unlock(zhdr);
739 zhdr = NULL;
740 if (can_sleep)
741 cond_resched();
742 continue;
743 }
744 kref_get(&zhdr->refcount);
745 break;
746 }
747 }
748
749 return zhdr;
750 }
751
752 /*
753 * API Functions
754 */
755
756 /**
757 * z3fold_create_pool() - create a new z3fold pool
758 * @name: pool name
759 * @gfp: gfp flags when allocating the z3fold pool structure
760 * @ops: user-defined operations for the z3fold pool
761 *
762 * Return: pointer to the new z3fold pool or NULL if the metadata allocation
763 * failed.
764 */
765 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
766 const struct z3fold_ops *ops)
767 {
768 struct z3fold_pool *pool = NULL;
769 int i, cpu;
770
771 pool = kzalloc(sizeof(struct z3fold_pool), gfp);
772 if (!pool)
773 goto out;
774 pool->c_handle = kmem_cache_create("z3fold_handle",
775 sizeof(struct z3fold_buddy_slots),
776 SLOTS_ALIGN, 0, NULL);
777 if (!pool->c_handle)
778 goto out_c;
779 spin_lock_init(&pool->lock);
780 spin_lock_init(&pool->stale_lock);
781 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
782 if (!pool->unbuddied)
783 goto out_pool;
784 for_each_possible_cpu(cpu) {
785 struct list_head *unbuddied =
786 per_cpu_ptr(pool->unbuddied, cpu);
787 for_each_unbuddied_list(i, 0)
788 INIT_LIST_HEAD(&unbuddied[i]);
789 }
790 INIT_LIST_HEAD(&pool->lru);
791 INIT_LIST_HEAD(&pool->stale);
792 atomic64_set(&pool->pages_nr, 0);
793 pool->name = name;
794 pool->compact_wq = create_singlethread_workqueue(pool->name);
795 if (!pool->compact_wq)
796 goto out_unbuddied;
797 pool->release_wq = create_singlethread_workqueue(pool->name);
798 if (!pool->release_wq)
799 goto out_wq;
800 if (z3fold_register_migration(pool))
801 goto out_rwq;
802 INIT_WORK(&pool->work, free_pages_work);
803 pool->ops = ops;
804 return pool;
805
806 out_rwq:
807 destroy_workqueue(pool->release_wq);
808 out_wq:
809 destroy_workqueue(pool->compact_wq);
810 out_unbuddied:
811 free_percpu(pool->unbuddied);
812 out_pool:
813 kmem_cache_destroy(pool->c_handle);
814 out_c:
815 kfree(pool);
816 out:
817 return NULL;
818 }
819
820 /**
821 * z3fold_destroy_pool() - destroys an existing z3fold pool
822 * @pool: the z3fold pool to be destroyed
823 *
824 * The pool should be emptied before this function is called.
825 */
826 static void z3fold_destroy_pool(struct z3fold_pool *pool)
827 {
828 kmem_cache_destroy(pool->c_handle);
829
830 /*
831 * We need to destroy pool->compact_wq before pool->release_wq,
832 * as any pending work on pool->compact_wq will call
833 * queue_work(pool->release_wq, &pool->work).
834 *
835 * There are still outstanding pages until both workqueues are drained,
836 * so we cannot unregister migration until then.
837 */
838
839 destroy_workqueue(pool->compact_wq);
840 destroy_workqueue(pool->release_wq);
841 z3fold_unregister_migration(pool);
842 kfree(pool);
843 }
844
845 /**
846 * z3fold_alloc() - allocates a region of a given size
847 * @pool: z3fold pool from which to allocate
848 * @size: size in bytes of the desired allocation
849 * @gfp: gfp flags used if the pool needs to grow
850 * @handle: handle of the new allocation
851 *
852 * This function will attempt to find a free region in the pool large enough to
853 * satisfy the allocation request. A search of the unbuddied lists is
854 * performed first. If no suitable free region is found, then a new page is
855 * allocated and added to the pool to satisfy the request.
856 *
857 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
858 * as z3fold pool pages.
859 *
860 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
861 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
862 * a new page.
863 */
864 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
865 unsigned long *handle)
866 {
867 int chunks = size_to_chunks(size);
868 struct z3fold_header *zhdr = NULL;
869 struct page *page = NULL;
870 enum buddy bud;
871 bool can_sleep = gfpflags_allow_blocking(gfp);
872
873 if (!size)
874 return -EINVAL;
875
876 if (size > PAGE_SIZE)
877 return -ENOSPC;
878
879 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
880 bud = HEADLESS;
881 else {
882 retry:
883 zhdr = __z3fold_alloc(pool, size, can_sleep);
884 if (zhdr) {
885 if (zhdr->first_chunks == 0) {
886 if (zhdr->middle_chunks != 0 &&
887 chunks >= zhdr->start_middle)
888 bud = LAST;
889 else
890 bud = FIRST;
891 } else if (zhdr->last_chunks == 0)
892 bud = LAST;
893 else if (zhdr->middle_chunks == 0)
894 bud = MIDDLE;
895 else {
896 if (kref_put(&zhdr->refcount,
897 release_z3fold_page_locked))
898 atomic64_dec(&pool->pages_nr);
899 else
900 z3fold_page_unlock(zhdr);
901 pr_err("No free chunks in unbuddied\n");
902 WARN_ON(1);
903 goto retry;
904 }
905 page = virt_to_page(zhdr);
906 goto found;
907 }
908 bud = FIRST;
909 }
910
911 page = NULL;
912 if (can_sleep) {
913 spin_lock(&pool->stale_lock);
914 zhdr = list_first_entry_or_null(&pool->stale,
915 struct z3fold_header, buddy);
916 /*
917 * Before allocating a page, let's see if we can take one from
918 * the stale pages list. cancel_work_sync() can sleep so we
919 * limit this case to the contexts where we can sleep
920 */
921 if (zhdr) {
922 list_del(&zhdr->buddy);
923 spin_unlock(&pool->stale_lock);
924 cancel_work_sync(&zhdr->work);
925 page = virt_to_page(zhdr);
926 } else {
927 spin_unlock(&pool->stale_lock);
928 }
929 }
930 if (!page)
931 page = alloc_page(gfp);
932
933 if (!page)
934 return -ENOMEM;
935
936 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
937 if (!zhdr) {
938 __free_page(page);
939 return -ENOMEM;
940 }
941 atomic64_inc(&pool->pages_nr);
942
943 if (bud == HEADLESS) {
944 set_bit(PAGE_HEADLESS, &page->private);
945 goto headless;
946 }
947 if (can_sleep) {
948 lock_page(page);
949 __SetPageMovable(page, pool->inode->i_mapping);
950 unlock_page(page);
951 } else {
952 if (trylock_page(page)) {
953 __SetPageMovable(page, pool->inode->i_mapping);
954 unlock_page(page);
955 }
956 }
957 z3fold_page_lock(zhdr);
958
959 found:
960 if (bud == FIRST)
961 zhdr->first_chunks = chunks;
962 else if (bud == LAST)
963 zhdr->last_chunks = chunks;
964 else {
965 zhdr->middle_chunks = chunks;
966 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
967 }
968 add_to_unbuddied(pool, zhdr);
969
970 headless:
971 spin_lock(&pool->lock);
972 /* Add/move z3fold page to beginning of LRU */
973 if (!list_empty(&page->lru))
974 list_del(&page->lru);
975
976 list_add(&page->lru, &pool->lru);
977
978 *handle = encode_handle(zhdr, bud);
979 spin_unlock(&pool->lock);
980 if (bud != HEADLESS)
981 z3fold_page_unlock(zhdr);
982
983 return 0;
984 }
985
986 /**
987 * z3fold_free() - frees the allocation associated with the given handle
988 * @pool: pool in which the allocation resided
989 * @handle: handle associated with the allocation returned by z3fold_alloc()
990 *
991 * In the case that the z3fold page in which the allocation resides is under
992 * reclaim, as indicated by the PG_reclaim flag being set, this function
993 * only sets the first|last_chunks to 0. The page is actually freed
994 * once both buddies are evicted (see z3fold_reclaim_page() below).
995 */
996 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
997 {
998 struct z3fold_header *zhdr;
999 struct page *page;
1000 enum buddy bud;
1001
1002 zhdr = handle_to_z3fold_header(handle);
1003 page = virt_to_page(zhdr);
1004
1005 if (test_bit(PAGE_HEADLESS, &page->private)) {
1006 /* if a headless page is under reclaim, just leave.
1007 * NB: we use test_and_set_bit for a reason: if the bit
1008 * has not been set before, we release this page
1009 * immediately so we don't care about its value any more.
1010 */
1011 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1012 spin_lock(&pool->lock);
1013 list_del(&page->lru);
1014 spin_unlock(&pool->lock);
1015 free_z3fold_page(page, true);
1016 atomic64_dec(&pool->pages_nr);
1017 }
1018 return;
1019 }
1020
1021 /* Non-headless case */
1022 z3fold_page_lock(zhdr);
1023 bud = handle_to_buddy(handle);
1024
1025 switch (bud) {
1026 case FIRST:
1027 zhdr->first_chunks = 0;
1028 break;
1029 case MIDDLE:
1030 zhdr->middle_chunks = 0;
1031 break;
1032 case LAST:
1033 zhdr->last_chunks = 0;
1034 break;
1035 default:
1036 pr_err("%s: unknown bud %d\n", __func__, bud);
1037 WARN_ON(1);
1038 z3fold_page_unlock(zhdr);
1039 return;
1040 }
1041
1042 free_handle(handle);
1043 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1044 atomic64_dec(&pool->pages_nr);
1045 return;
1046 }
1047 if (test_bit(PAGE_CLAIMED, &page->private)) {
1048 z3fold_page_unlock(zhdr);
1049 return;
1050 }
1051 if (unlikely(PageIsolated(page)) ||
1052 test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1053 z3fold_page_unlock(zhdr);
1054 return;
1055 }
1056 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1057 spin_lock(&pool->lock);
1058 list_del_init(&zhdr->buddy);
1059 spin_unlock(&pool->lock);
1060 zhdr->cpu = -1;
1061 kref_get(&zhdr->refcount);
1062 do_compact_page(zhdr, true);
1063 return;
1064 }
1065 kref_get(&zhdr->refcount);
1066 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1067 z3fold_page_unlock(zhdr);
1068 }
1069
1070 /**
1071 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1072 * @pool: pool from which a page will attempt to be evicted
1073 * @retries: number of pages on the LRU list for which eviction will
1074 * be attempted before failing
1075 *
1076 * z3fold reclaim is different from normal system reclaim in that it is done
1077 * from the bottom, up. This is because only the bottom layer, z3fold, has
1078 * information on how the allocations are organized within each z3fold page.
1079 * This has the potential to create interesting locking situations between
1080 * z3fold and the user, however.
1081 *
1082 * To avoid these, this is how z3fold_reclaim_page() should be called:
1083 *
1084 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1085 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1086 * call the user-defined eviction handler with the pool and handle as
1087 * arguments.
1088 *
1089 * If the handle can not be evicted, the eviction handler should return
1090 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1091 * appropriate list and try the next z3fold page on the LRU up to
1092 * a user defined number of retries.
1093 *
1094 * If the handle is successfully evicted, the eviction handler should
1095 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1096 * contains logic to delay freeing the page if the page is under reclaim,
1097 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1098 *
1099 * If all buddies in the z3fold page are successfully evicted, then the
1100 * z3fold page can be freed.
1101 *
1102 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1103 * no pages to evict or an eviction handler is not registered, -EAGAIN if
1104 * the retry limit was hit.
1105 */
1106 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1107 {
1108 int i, ret = 0;
1109 struct z3fold_header *zhdr = NULL;
1110 struct page *page = NULL;
1111 struct list_head *pos;
1112 struct z3fold_buddy_slots slots;
1113 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1114
1115 spin_lock(&pool->lock);
1116 if (!pool->ops || !pool->ops->evict || retries == 0) {
1117 spin_unlock(&pool->lock);
1118 return -EINVAL;
1119 }
1120 for (i = 0; i < retries; i++) {
1121 if (list_empty(&pool->lru)) {
1122 spin_unlock(&pool->lock);
1123 return -EINVAL;
1124 }
1125 list_for_each_prev(pos, &pool->lru) {
1126 page = list_entry(pos, struct page, lru);
1127
1128 /* this bit could have been set by free, in which case
1129 * we pass over to the next page in the pool.
1130 */
1131 if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1132 page = NULL;
1133 continue;
1134 }
1135
1136 if (unlikely(PageIsolated(page))) {
1137 clear_bit(PAGE_CLAIMED, &page->private);
1138 page = NULL;
1139 continue;
1140 }
1141 zhdr = page_address(page);
1142 if (test_bit(PAGE_HEADLESS, &page->private))
1143 break;
1144
1145 if (!z3fold_page_trylock(zhdr)) {
1146 clear_bit(PAGE_CLAIMED, &page->private);
1147 zhdr = NULL;
1148 continue; /* can't evict at this point */
1149 }
1150 kref_get(&zhdr->refcount);
1151 list_del_init(&zhdr->buddy);
1152 zhdr->cpu = -1;
1153 break;
1154 }
1155
1156 if (!zhdr)
1157 break;
1158
1159 list_del_init(&page->lru);
1160 spin_unlock(&pool->lock);
1161
1162 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1163 /*
1164 * We need encode the handles before unlocking, and
1165 * use our local slots structure because z3fold_free
1166 * can zero out zhdr->slots and we can't do much
1167 * about that
1168 */
1169 first_handle = 0;
1170 last_handle = 0;
1171 middle_handle = 0;
1172 if (zhdr->first_chunks)
1173 first_handle = __encode_handle(zhdr, &slots,
1174 FIRST);
1175 if (zhdr->middle_chunks)
1176 middle_handle = __encode_handle(zhdr, &slots,
1177 MIDDLE);
1178 if (zhdr->last_chunks)
1179 last_handle = __encode_handle(zhdr, &slots,
1180 LAST);
1181 /*
1182 * it's safe to unlock here because we hold a
1183 * reference to this page
1184 */
1185 z3fold_page_unlock(zhdr);
1186 } else {
1187 first_handle = __encode_handle(zhdr, &slots, HEADLESS);
1188 last_handle = middle_handle = 0;
1189 }
1190
1191 /* Issue the eviction callback(s) */
1192 if (middle_handle) {
1193 ret = pool->ops->evict(pool, middle_handle);
1194 if (ret)
1195 goto next;
1196 }
1197 if (first_handle) {
1198 ret = pool->ops->evict(pool, first_handle);
1199 if (ret)
1200 goto next;
1201 }
1202 if (last_handle) {
1203 ret = pool->ops->evict(pool, last_handle);
1204 if (ret)
1205 goto next;
1206 }
1207 next:
1208 if (test_bit(PAGE_HEADLESS, &page->private)) {
1209 if (ret == 0) {
1210 free_z3fold_page(page, true);
1211 atomic64_dec(&pool->pages_nr);
1212 return 0;
1213 }
1214 spin_lock(&pool->lock);
1215 list_add(&page->lru, &pool->lru);
1216 spin_unlock(&pool->lock);
1217 clear_bit(PAGE_CLAIMED, &page->private);
1218 } else {
1219 z3fold_page_lock(zhdr);
1220 if (kref_put(&zhdr->refcount,
1221 release_z3fold_page_locked)) {
1222 atomic64_dec(&pool->pages_nr);
1223 return 0;
1224 }
1225 /*
1226 * if we are here, the page is still not completely
1227 * free. Take the global pool lock then to be able
1228 * to add it back to the lru list
1229 */
1230 spin_lock(&pool->lock);
1231 list_add(&page->lru, &pool->lru);
1232 spin_unlock(&pool->lock);
1233 z3fold_page_unlock(zhdr);
1234 clear_bit(PAGE_CLAIMED, &page->private);
1235 }
1236
1237 /* We started off locked to we need to lock the pool back */
1238 spin_lock(&pool->lock);
1239 }
1240 spin_unlock(&pool->lock);
1241 return -EAGAIN;
1242 }
1243
1244 /**
1245 * z3fold_map() - maps the allocation associated with the given handle
1246 * @pool: pool in which the allocation resides
1247 * @handle: handle associated with the allocation to be mapped
1248 *
1249 * Extracts the buddy number from handle and constructs the pointer to the
1250 * correct starting chunk within the page.
1251 *
1252 * Returns: a pointer to the mapped allocation
1253 */
1254 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1255 {
1256 struct z3fold_header *zhdr;
1257 struct page *page;
1258 void *addr;
1259 enum buddy buddy;
1260
1261 zhdr = handle_to_z3fold_header(handle);
1262 addr = zhdr;
1263 page = virt_to_page(zhdr);
1264
1265 if (test_bit(PAGE_HEADLESS, &page->private))
1266 goto out;
1267
1268 z3fold_page_lock(zhdr);
1269 buddy = handle_to_buddy(handle);
1270 switch (buddy) {
1271 case FIRST:
1272 addr += ZHDR_SIZE_ALIGNED;
1273 break;
1274 case MIDDLE:
1275 addr += zhdr->start_middle << CHUNK_SHIFT;
1276 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1277 break;
1278 case LAST:
1279 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1280 break;
1281 default:
1282 pr_err("unknown buddy id %d\n", buddy);
1283 WARN_ON(1);
1284 addr = NULL;
1285 break;
1286 }
1287
1288 if (addr)
1289 zhdr->mapped_count++;
1290 z3fold_page_unlock(zhdr);
1291 out:
1292 return addr;
1293 }
1294
1295 /**
1296 * z3fold_unmap() - unmaps the allocation associated with the given handle
1297 * @pool: pool in which the allocation resides
1298 * @handle: handle associated with the allocation to be unmapped
1299 */
1300 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1301 {
1302 struct z3fold_header *zhdr;
1303 struct page *page;
1304 enum buddy buddy;
1305
1306 zhdr = handle_to_z3fold_header(handle);
1307 page = virt_to_page(zhdr);
1308
1309 if (test_bit(PAGE_HEADLESS, &page->private))
1310 return;
1311
1312 z3fold_page_lock(zhdr);
1313 buddy = handle_to_buddy(handle);
1314 if (buddy == MIDDLE)
1315 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1316 zhdr->mapped_count--;
1317 z3fold_page_unlock(zhdr);
1318 }
1319
1320 /**
1321 * z3fold_get_pool_size() - gets the z3fold pool size in pages
1322 * @pool: pool whose size is being queried
1323 *
1324 * Returns: size in pages of the given pool.
1325 */
1326 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1327 {
1328 return atomic64_read(&pool->pages_nr);
1329 }
1330
1331 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1332 {
1333 struct z3fold_header *zhdr;
1334 struct z3fold_pool *pool;
1335
1336 VM_BUG_ON_PAGE(!PageMovable(page), page);
1337 VM_BUG_ON_PAGE(PageIsolated(page), page);
1338
1339 if (test_bit(PAGE_HEADLESS, &page->private) ||
1340 test_bit(PAGE_CLAIMED, &page->private))
1341 return false;
1342
1343 zhdr = page_address(page);
1344 z3fold_page_lock(zhdr);
1345 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1346 test_bit(PAGE_STALE, &page->private))
1347 goto out;
1348
1349 pool = zhdr_to_pool(zhdr);
1350
1351 if (zhdr->mapped_count == 0) {
1352 kref_get(&zhdr->refcount);
1353 if (!list_empty(&zhdr->buddy))
1354 list_del_init(&zhdr->buddy);
1355 spin_lock(&pool->lock);
1356 if (!list_empty(&page->lru))
1357 list_del(&page->lru);
1358 spin_unlock(&pool->lock);
1359 z3fold_page_unlock(zhdr);
1360 return true;
1361 }
1362 out:
1363 z3fold_page_unlock(zhdr);
1364 return false;
1365 }
1366
1367 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1368 struct page *page, enum migrate_mode mode)
1369 {
1370 struct z3fold_header *zhdr, *new_zhdr;
1371 struct z3fold_pool *pool;
1372 struct address_space *new_mapping;
1373
1374 VM_BUG_ON_PAGE(!PageMovable(page), page);
1375 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1376 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1377
1378 zhdr = page_address(page);
1379 pool = zhdr_to_pool(zhdr);
1380
1381 if (!z3fold_page_trylock(zhdr)) {
1382 return -EAGAIN;
1383 }
1384 if (zhdr->mapped_count != 0) {
1385 z3fold_page_unlock(zhdr);
1386 return -EBUSY;
1387 }
1388 if (work_pending(&zhdr->work)) {
1389 z3fold_page_unlock(zhdr);
1390 return -EAGAIN;
1391 }
1392 new_zhdr = page_address(newpage);
1393 memcpy(new_zhdr, zhdr, PAGE_SIZE);
1394 newpage->private = page->private;
1395 page->private = 0;
1396 z3fold_page_unlock(zhdr);
1397 spin_lock_init(&new_zhdr->page_lock);
1398 INIT_WORK(&new_zhdr->work, compact_page_work);
1399 /*
1400 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1401 * so we only have to reinitialize it.
1402 */
1403 INIT_LIST_HEAD(&new_zhdr->buddy);
1404 new_mapping = page_mapping(page);
1405 __ClearPageMovable(page);
1406 ClearPagePrivate(page);
1407
1408 get_page(newpage);
1409 z3fold_page_lock(new_zhdr);
1410 if (new_zhdr->first_chunks)
1411 encode_handle(new_zhdr, FIRST);
1412 if (new_zhdr->last_chunks)
1413 encode_handle(new_zhdr, LAST);
1414 if (new_zhdr->middle_chunks)
1415 encode_handle(new_zhdr, MIDDLE);
1416 set_bit(NEEDS_COMPACTING, &newpage->private);
1417 new_zhdr->cpu = smp_processor_id();
1418 spin_lock(&pool->lock);
1419 list_add(&newpage->lru, &pool->lru);
1420 spin_unlock(&pool->lock);
1421 __SetPageMovable(newpage, new_mapping);
1422 z3fold_page_unlock(new_zhdr);
1423
1424 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1425
1426 page_mapcount_reset(page);
1427 put_page(page);
1428 return 0;
1429 }
1430
1431 static void z3fold_page_putback(struct page *page)
1432 {
1433 struct z3fold_header *zhdr;
1434 struct z3fold_pool *pool;
1435
1436 zhdr = page_address(page);
1437 pool = zhdr_to_pool(zhdr);
1438
1439 z3fold_page_lock(zhdr);
1440 if (!list_empty(&zhdr->buddy))
1441 list_del_init(&zhdr->buddy);
1442 INIT_LIST_HEAD(&page->lru);
1443 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1444 atomic64_dec(&pool->pages_nr);
1445 return;
1446 }
1447 spin_lock(&pool->lock);
1448 list_add(&page->lru, &pool->lru);
1449 spin_unlock(&pool->lock);
1450 z3fold_page_unlock(zhdr);
1451 }
1452
1453 static const struct address_space_operations z3fold_aops = {
1454 .isolate_page = z3fold_page_isolate,
1455 .migratepage = z3fold_page_migrate,
1456 .putback_page = z3fold_page_putback,
1457 };
1458
1459 /*****************
1460 * zpool
1461 ****************/
1462
1463 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1464 {
1465 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1466 return pool->zpool_ops->evict(pool->zpool, handle);
1467 else
1468 return -ENOENT;
1469 }
1470
1471 static const struct z3fold_ops z3fold_zpool_ops = {
1472 .evict = z3fold_zpool_evict
1473 };
1474
1475 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1476 const struct zpool_ops *zpool_ops,
1477 struct zpool *zpool)
1478 {
1479 struct z3fold_pool *pool;
1480
1481 pool = z3fold_create_pool(name, gfp,
1482 zpool_ops ? &z3fold_zpool_ops : NULL);
1483 if (pool) {
1484 pool->zpool = zpool;
1485 pool->zpool_ops = zpool_ops;
1486 }
1487 return pool;
1488 }
1489
1490 static void z3fold_zpool_destroy(void *pool)
1491 {
1492 z3fold_destroy_pool(pool);
1493 }
1494
1495 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1496 unsigned long *handle)
1497 {
1498 return z3fold_alloc(pool, size, gfp, handle);
1499 }
1500 static void z3fold_zpool_free(void *pool, unsigned long handle)
1501 {
1502 z3fold_free(pool, handle);
1503 }
1504
1505 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1506 unsigned int *reclaimed)
1507 {
1508 unsigned int total = 0;
1509 int ret = -EINVAL;
1510
1511 while (total < pages) {
1512 ret = z3fold_reclaim_page(pool, 8);
1513 if (ret < 0)
1514 break;
1515 total++;
1516 }
1517
1518 if (reclaimed)
1519 *reclaimed = total;
1520
1521 return ret;
1522 }
1523
1524 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1525 enum zpool_mapmode mm)
1526 {
1527 return z3fold_map(pool, handle);
1528 }
1529 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1530 {
1531 z3fold_unmap(pool, handle);
1532 }
1533
1534 static u64 z3fold_zpool_total_size(void *pool)
1535 {
1536 return z3fold_get_pool_size(pool) * PAGE_SIZE;
1537 }
1538
1539 static struct zpool_driver z3fold_zpool_driver = {
1540 .type = "z3fold",
1541 .owner = THIS_MODULE,
1542 .create = z3fold_zpool_create,
1543 .destroy = z3fold_zpool_destroy,
1544 .malloc = z3fold_zpool_malloc,
1545 .free = z3fold_zpool_free,
1546 .shrink = z3fold_zpool_shrink,
1547 .map = z3fold_zpool_map,
1548 .unmap = z3fold_zpool_unmap,
1549 .total_size = z3fold_zpool_total_size,
1550 };
1551
1552 MODULE_ALIAS("zpool-z3fold");
1553
1554 static int __init init_z3fold(void)
1555 {
1556 int ret;
1557
1558 /* Make sure the z3fold header is not larger than the page size */
1559 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1560 ret = z3fold_mount();
1561 if (ret)
1562 return ret;
1563
1564 zpool_register_driver(&z3fold_zpool_driver);
1565
1566 return 0;
1567 }
1568
1569 static void __exit exit_z3fold(void)
1570 {
1571 z3fold_unmount();
1572 zpool_unregister_driver(&z3fold_zpool_driver);
1573 }
1574
1575 module_init(init_z3fold);
1576 module_exit(exit_z3fold);
1577
1578 MODULE_LICENSE("GPL");
1579 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1580 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");