4 * Copyright (c) 2010,2011, Dan Magenheimer, Oracle Corp.
5 * Copyright (c) 2010,2011, Nitin Gupta
7 * Zcache provides an in-kernel "host implementation" for transcendent memory
8 * and, thus indirectly, for cleancache and frontswap. Zcache includes two
9 * page-accessible memory [1] interfaces, both utilizing lzo1x compression:
10 * 1) "compression buddies" ("zbud") is used for ephemeral pages
11 * 2) xvmalloc is used for persistent pages.
12 * Xvmalloc (based on the TLSF allocator) has very low fragmentation
13 * so maximizes space efficiency, while zbud allows pairs (and potentially,
14 * in the future, more than a pair of) compressed pages to be closely linked
15 * so that reclaiming can be done via the kernel's physical-page-oriented
16 * "shrinker" interface.
18 * [1] For a definition of page-accessible memory (aka PAM), see:
19 * http://marc.info/?l=linux-mm&m=127811271605009
22 #include <linux/cpu.h>
23 #include <linux/highmem.h>
24 #include <linux/list.h>
25 #include <linux/lzo.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/types.h>
29 #include <linux/atomic.h>
32 #include "../zram/xvmalloc.h" /* if built in drivers/staging */
34 #if (!defined(CONFIG_CLEANCACHE) && !defined(CONFIG_FRONTSWAP))
35 #error "zcache is useless without CONFIG_CLEANCACHE or CONFIG_FRONTSWAP"
37 #ifdef CONFIG_CLEANCACHE
38 #include <linux/cleancache.h>
40 #ifdef CONFIG_FRONTSWAP
41 #include <linux/frontswap.h>
45 /* this is more aggressive but may cause other problems? */
46 #define ZCACHE_GFP_MASK (GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN)
48 #define ZCACHE_GFP_MASK \
49 (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
53 * Compression buddies ("zbud") provides for packing two (or, possibly
54 * in the future, more) compressed ephemeral pages into a single "raw"
55 * (physical) page and tracking them with data structures so that
56 * the raw pages can be easily reclaimed.
58 * A zbud page ("zbpg") is an aligned page containing a list_head,
59 * a lock, and two "zbud headers". The remainder of the physical
60 * page is divided up into aligned 64-byte "chunks" which contain
61 * the compressed data for zero, one, or two zbuds. Each zbpg
62 * resides on: (1) an "unused list" if it has no zbuds; (2) a
63 * "buddied" list if it is fully populated with two zbuds; or
64 * (3) one of PAGE_SIZE/64 "unbuddied" lists indexed by how many chunks
65 * the one unbuddied zbud uses. The data inside a zbpg cannot be
66 * read or written unless the zbpg's lock is held.
69 #define ZBH_SENTINEL 0x43214321
70 #define ZBPG_SENTINEL 0xdeadbeef
72 #define ZBUD_MAX_BUDS 2
78 uint16_t size
; /* compressed size in bytes, zero means unused */
83 struct list_head bud_list
;
85 struct zbud_hdr buddy
[ZBUD_MAX_BUDS
];
87 /* followed by NUM_CHUNK aligned CHUNK_SIZE-byte chunks */
91 #define CHUNK_SIZE (1 << CHUNK_SHIFT)
92 #define CHUNK_MASK (~(CHUNK_SIZE-1))
93 #define NCHUNKS (((PAGE_SIZE - sizeof(struct zbud_page)) & \
94 CHUNK_MASK) >> CHUNK_SHIFT)
95 #define MAX_CHUNK (NCHUNKS-1)
98 struct list_head list
;
100 } zbud_unbuddied
[NCHUNKS
];
101 /* list N contains pages with N chunks USED and NCHUNKS-N unused */
102 /* element 0 is never used but optimizing that isn't worth it */
103 static unsigned long zbud_cumul_chunk_counts
[NCHUNKS
];
105 struct list_head zbud_buddied_list
;
106 static unsigned long zcache_zbud_buddied_count
;
108 /* protects the buddied list and all unbuddied lists */
109 static DEFINE_SPINLOCK(zbud_budlists_spinlock
);
111 static LIST_HEAD(zbpg_unused_list
);
112 static unsigned long zcache_zbpg_unused_list_count
;
114 /* protects the unused page list */
115 static DEFINE_SPINLOCK(zbpg_unused_list_spinlock
);
117 static atomic_t zcache_zbud_curr_raw_pages
;
118 static atomic_t zcache_zbud_curr_zpages
;
119 static unsigned long zcache_zbud_curr_zbytes
;
120 static unsigned long zcache_zbud_cumul_zpages
;
121 static unsigned long zcache_zbud_cumul_zbytes
;
122 static unsigned long zcache_compress_poor
;
124 /* forward references */
125 static void *zcache_get_free_page(void);
126 static void zcache_free_page(void *p
);
129 * zbud helper functions
132 static inline unsigned zbud_max_buddy_size(void)
134 return MAX_CHUNK
<< CHUNK_SHIFT
;
137 static inline unsigned zbud_size_to_chunks(unsigned size
)
139 BUG_ON(size
== 0 || size
> zbud_max_buddy_size());
140 return (size
+ CHUNK_SIZE
- 1) >> CHUNK_SHIFT
;
143 static inline int zbud_budnum(struct zbud_hdr
*zh
)
145 unsigned offset
= (unsigned long)zh
& (PAGE_SIZE
- 1);
146 struct zbud_page
*zbpg
= NULL
;
147 unsigned budnum
= -1U;
150 for (i
= 0; i
< ZBUD_MAX_BUDS
; i
++)
151 if (offset
== offsetof(typeof(*zbpg
), buddy
[i
])) {
155 BUG_ON(budnum
== -1U);
159 static char *zbud_data(struct zbud_hdr
*zh
, unsigned size
)
161 struct zbud_page
*zbpg
;
165 ASSERT_SENTINEL(zh
, ZBH
);
166 budnum
= zbud_budnum(zh
);
167 BUG_ON(size
== 0 || size
> zbud_max_buddy_size());
168 zbpg
= container_of(zh
, struct zbud_page
, buddy
[budnum
]);
169 ASSERT_SPINLOCK(&zbpg
->lock
);
172 p
+= ((sizeof(struct zbud_page
) + CHUNK_SIZE
- 1) &
174 else if (budnum
== 1)
175 p
+= PAGE_SIZE
- ((size
+ CHUNK_SIZE
- 1) & CHUNK_MASK
);
180 * zbud raw page management
183 static struct zbud_page
*zbud_alloc_raw_page(void)
185 struct zbud_page
*zbpg
= NULL
;
186 struct zbud_hdr
*zh0
, *zh1
;
189 /* if any pages on the zbpg list, use one */
190 spin_lock(&zbpg_unused_list_spinlock
);
191 if (!list_empty(&zbpg_unused_list
)) {
192 zbpg
= list_first_entry(&zbpg_unused_list
,
193 struct zbud_page
, bud_list
);
194 list_del_init(&zbpg
->bud_list
);
195 zcache_zbpg_unused_list_count
--;
198 spin_unlock(&zbpg_unused_list_spinlock
);
200 /* none on zbpg list, try to get a kernel page */
201 zbpg
= zcache_get_free_page();
202 if (likely(zbpg
!= NULL
)) {
203 INIT_LIST_HEAD(&zbpg
->bud_list
);
204 zh0
= &zbpg
->buddy
[0]; zh1
= &zbpg
->buddy
[1];
205 spin_lock_init(&zbpg
->lock
);
207 ASSERT_INVERTED_SENTINEL(zbpg
, ZBPG
);
208 SET_SENTINEL(zbpg
, ZBPG
);
209 BUG_ON(zh0
->size
!= 0 || tmem_oid_valid(&zh0
->oid
));
210 BUG_ON(zh1
->size
!= 0 || tmem_oid_valid(&zh1
->oid
));
212 atomic_inc(&zcache_zbud_curr_raw_pages
);
213 INIT_LIST_HEAD(&zbpg
->bud_list
);
214 SET_SENTINEL(zbpg
, ZBPG
);
215 zh0
->size
= 0; zh1
->size
= 0;
216 tmem_oid_set_invalid(&zh0
->oid
);
217 tmem_oid_set_invalid(&zh1
->oid
);
223 static void zbud_free_raw_page(struct zbud_page
*zbpg
)
225 struct zbud_hdr
*zh0
= &zbpg
->buddy
[0], *zh1
= &zbpg
->buddy
[1];
227 ASSERT_SENTINEL(zbpg
, ZBPG
);
228 BUG_ON(!list_empty(&zbpg
->bud_list
));
229 ASSERT_SPINLOCK(&zbpg
->lock
);
230 BUG_ON(zh0
->size
!= 0 || tmem_oid_valid(&zh0
->oid
));
231 BUG_ON(zh1
->size
!= 0 || tmem_oid_valid(&zh1
->oid
));
232 INVERT_SENTINEL(zbpg
, ZBPG
);
233 spin_unlock(&zbpg
->lock
);
234 spin_lock(&zbpg_unused_list_spinlock
);
235 list_add(&zbpg
->bud_list
, &zbpg_unused_list
);
236 zcache_zbpg_unused_list_count
++;
237 spin_unlock(&zbpg_unused_list_spinlock
);
241 * core zbud handling routines
244 static unsigned zbud_free(struct zbud_hdr
*zh
)
248 ASSERT_SENTINEL(zh
, ZBH
);
249 BUG_ON(!tmem_oid_valid(&zh
->oid
));
251 BUG_ON(zh
->size
== 0 || zh
->size
> zbud_max_buddy_size());
253 tmem_oid_set_invalid(&zh
->oid
);
254 INVERT_SENTINEL(zh
, ZBH
);
255 zcache_zbud_curr_zbytes
-= size
;
256 atomic_dec(&zcache_zbud_curr_zpages
);
260 static void zbud_free_and_delist(struct zbud_hdr
*zh
)
263 struct zbud_hdr
*zh_other
;
264 unsigned budnum
= zbud_budnum(zh
), size
;
265 struct zbud_page
*zbpg
=
266 container_of(zh
, struct zbud_page
, buddy
[budnum
]);
268 spin_lock(&zbpg
->lock
);
269 if (list_empty(&zbpg
->bud_list
)) {
270 /* ignore zombie page... see zbud_evict_pages() */
271 spin_unlock(&zbpg
->lock
);
274 size
= zbud_free(zh
);
275 ASSERT_SPINLOCK(&zbpg
->lock
);
276 zh_other
= &zbpg
->buddy
[(budnum
== 0) ? 1 : 0];
277 if (zh_other
->size
== 0) { /* was unbuddied: unlist and free */
278 chunks
= zbud_size_to_chunks(size
) ;
279 spin_lock(&zbud_budlists_spinlock
);
280 BUG_ON(list_empty(&zbud_unbuddied
[chunks
].list
));
281 list_del_init(&zbpg
->bud_list
);
282 zbud_unbuddied
[chunks
].count
--;
283 spin_unlock(&zbud_budlists_spinlock
);
284 zbud_free_raw_page(zbpg
);
285 } else { /* was buddied: move remaining buddy to unbuddied list */
286 chunks
= zbud_size_to_chunks(zh_other
->size
) ;
287 spin_lock(&zbud_budlists_spinlock
);
288 list_del_init(&zbpg
->bud_list
);
289 zcache_zbud_buddied_count
--;
290 list_add_tail(&zbpg
->bud_list
, &zbud_unbuddied
[chunks
].list
);
291 zbud_unbuddied
[chunks
].count
++;
292 spin_unlock(&zbud_budlists_spinlock
);
293 spin_unlock(&zbpg
->lock
);
297 static struct zbud_hdr
*zbud_create(uint32_t pool_id
, struct tmem_oid
*oid
,
298 uint32_t index
, struct page
*page
,
299 void *cdata
, unsigned size
)
301 struct zbud_hdr
*zh0
, *zh1
, *zh
= NULL
;
302 struct zbud_page
*zbpg
= NULL
, *ztmp
;
305 int i
, found_good_buddy
= 0;
307 nchunks
= zbud_size_to_chunks(size
) ;
308 for (i
= MAX_CHUNK
- nchunks
+ 1; i
> 0; i
--) {
309 spin_lock(&zbud_budlists_spinlock
);
310 if (!list_empty(&zbud_unbuddied
[i
].list
)) {
311 list_for_each_entry_safe(zbpg
, ztmp
,
312 &zbud_unbuddied
[i
].list
, bud_list
) {
313 if (spin_trylock(&zbpg
->lock
)) {
314 found_good_buddy
= i
;
315 goto found_unbuddied
;
319 spin_unlock(&zbud_budlists_spinlock
);
321 /* didn't find a good buddy, try allocating a new page */
322 zbpg
= zbud_alloc_raw_page();
323 if (unlikely(zbpg
== NULL
))
325 /* ok, have a page, now compress the data before taking locks */
326 spin_lock(&zbpg
->lock
);
327 spin_lock(&zbud_budlists_spinlock
);
328 list_add_tail(&zbpg
->bud_list
, &zbud_unbuddied
[nchunks
].list
);
329 zbud_unbuddied
[nchunks
].count
++;
330 zh
= &zbpg
->buddy
[0];
334 ASSERT_SPINLOCK(&zbpg
->lock
);
335 zh0
= &zbpg
->buddy
[0]; zh1
= &zbpg
->buddy
[1];
336 BUG_ON(!((zh0
->size
== 0) ^ (zh1
->size
== 0)));
337 if (zh0
->size
!= 0) { /* buddy0 in use, buddy1 is vacant */
338 ASSERT_SENTINEL(zh0
, ZBH
);
340 } else if (zh1
->size
!= 0) { /* buddy1 in use, buddy0 is vacant */
341 ASSERT_SENTINEL(zh1
, ZBH
);
345 list_del_init(&zbpg
->bud_list
);
346 zbud_unbuddied
[found_good_buddy
].count
--;
347 list_add_tail(&zbpg
->bud_list
, &zbud_buddied_list
);
348 zcache_zbud_buddied_count
++;
351 SET_SENTINEL(zh
, ZBH
);
355 zh
->pool_id
= pool_id
;
356 /* can wait to copy the data until the list locks are dropped */
357 spin_unlock(&zbud_budlists_spinlock
);
359 to
= zbud_data(zh
, size
);
360 memcpy(to
, cdata
, size
);
361 spin_unlock(&zbpg
->lock
);
362 zbud_cumul_chunk_counts
[nchunks
]++;
363 atomic_inc(&zcache_zbud_curr_zpages
);
364 zcache_zbud_cumul_zpages
++;
365 zcache_zbud_curr_zbytes
+= size
;
366 zcache_zbud_cumul_zbytes
+= size
;
371 static int zbud_decompress(struct page
*page
, struct zbud_hdr
*zh
)
373 struct zbud_page
*zbpg
;
374 unsigned budnum
= zbud_budnum(zh
);
375 size_t out_len
= PAGE_SIZE
;
376 char *to_va
, *from_va
;
380 zbpg
= container_of(zh
, struct zbud_page
, buddy
[budnum
]);
381 spin_lock(&zbpg
->lock
);
382 if (list_empty(&zbpg
->bud_list
)) {
383 /* ignore zombie page... see zbud_evict_pages() */
387 ASSERT_SENTINEL(zh
, ZBH
);
388 BUG_ON(zh
->size
== 0 || zh
->size
> zbud_max_buddy_size());
389 to_va
= kmap_atomic(page
, KM_USER0
);
391 from_va
= zbud_data(zh
, size
);
392 ret
= lzo1x_decompress_safe(from_va
, size
, to_va
, &out_len
);
393 BUG_ON(ret
!= LZO_E_OK
);
394 BUG_ON(out_len
!= PAGE_SIZE
);
395 kunmap_atomic(to_va
, KM_USER0
);
397 spin_unlock(&zbpg
->lock
);
402 * The following routines handle shrinking of ephemeral pages by evicting
403 * pages "least valuable" first.
406 static unsigned long zcache_evicted_raw_pages
;
407 static unsigned long zcache_evicted_buddied_pages
;
408 static unsigned long zcache_evicted_unbuddied_pages
;
410 static struct tmem_pool
*zcache_get_pool_by_id(uint32_t poolid
);
411 static void zcache_put_pool(struct tmem_pool
*pool
);
414 * Flush and free all zbuds in a zbpg, then free the pageframe
416 static void zbud_evict_zbpg(struct zbud_page
*zbpg
)
420 uint32_t pool_id
[ZBUD_MAX_BUDS
], index
[ZBUD_MAX_BUDS
];
421 struct tmem_oid oid
[ZBUD_MAX_BUDS
];
422 struct tmem_pool
*pool
;
424 ASSERT_SPINLOCK(&zbpg
->lock
);
425 BUG_ON(!list_empty(&zbpg
->bud_list
));
426 for (i
= 0, j
= 0; i
< ZBUD_MAX_BUDS
; i
++) {
427 zh
= &zbpg
->buddy
[i
];
429 pool_id
[j
] = zh
->pool_id
;
431 index
[j
] = zh
->index
;
436 spin_unlock(&zbpg
->lock
);
437 for (i
= 0; i
< j
; i
++) {
438 pool
= zcache_get_pool_by_id(pool_id
[i
]);
440 tmem_flush_page(pool
, &oid
[i
], index
[i
]);
441 zcache_put_pool(pool
);
444 ASSERT_SENTINEL(zbpg
, ZBPG
);
445 spin_lock(&zbpg
->lock
);
446 zbud_free_raw_page(zbpg
);
450 * Free nr pages. This code is funky because we want to hold the locks
451 * protecting various lists for as short a time as possible, and in some
452 * circumstances the list may change asynchronously when the list lock is
453 * not held. In some cases we also trylock not only to avoid waiting on a
454 * page in use by another cpu, but also to avoid potential deadlock due to
457 static void zbud_evict_pages(int nr
)
459 struct zbud_page
*zbpg
;
462 /* first try freeing any pages on unused list */
464 spin_lock_bh(&zbpg_unused_list_spinlock
);
465 if (!list_empty(&zbpg_unused_list
)) {
466 /* can't walk list here, since it may change when unlocked */
467 zbpg
= list_first_entry(&zbpg_unused_list
,
468 struct zbud_page
, bud_list
);
469 list_del_init(&zbpg
->bud_list
);
470 zcache_zbpg_unused_list_count
--;
471 atomic_dec(&zcache_zbud_curr_raw_pages
);
472 spin_unlock_bh(&zbpg_unused_list_spinlock
);
473 zcache_free_page(zbpg
);
474 zcache_evicted_raw_pages
++;
477 goto retry_unused_list
;
479 spin_unlock_bh(&zbpg_unused_list_spinlock
);
481 /* now try freeing unbuddied pages, starting with least space avail */
482 for (i
= 0; i
< MAX_CHUNK
; i
++) {
484 spin_lock_bh(&zbud_budlists_spinlock
);
485 if (list_empty(&zbud_unbuddied
[i
].list
)) {
486 spin_unlock_bh(&zbud_budlists_spinlock
);
489 list_for_each_entry(zbpg
, &zbud_unbuddied
[i
].list
, bud_list
) {
490 if (unlikely(!spin_trylock(&zbpg
->lock
)))
492 list_del_init(&zbpg
->bud_list
);
493 zbud_unbuddied
[i
].count
--;
494 spin_unlock(&zbud_budlists_spinlock
);
495 zcache_evicted_unbuddied_pages
++;
496 /* want budlists unlocked when doing zbpg eviction */
497 zbud_evict_zbpg(zbpg
);
501 goto retry_unbud_list_i
;
503 spin_unlock_bh(&zbud_budlists_spinlock
);
506 /* as a last resort, free buddied pages */
508 spin_lock_bh(&zbud_budlists_spinlock
);
509 if (list_empty(&zbud_buddied_list
)) {
510 spin_unlock_bh(&zbud_budlists_spinlock
);
513 list_for_each_entry(zbpg
, &zbud_buddied_list
, bud_list
) {
514 if (unlikely(!spin_trylock(&zbpg
->lock
)))
516 list_del_init(&zbpg
->bud_list
);
517 zcache_zbud_buddied_count
--;
518 spin_unlock(&zbud_budlists_spinlock
);
519 zcache_evicted_buddied_pages
++;
520 /* want budlists unlocked when doing zbpg eviction */
521 zbud_evict_zbpg(zbpg
);
527 spin_unlock_bh(&zbud_budlists_spinlock
);
532 static void zbud_init(void)
536 INIT_LIST_HEAD(&zbud_buddied_list
);
537 zcache_zbud_buddied_count
= 0;
538 for (i
= 0; i
< NCHUNKS
; i
++) {
539 INIT_LIST_HEAD(&zbud_unbuddied
[i
].list
);
540 zbud_unbuddied
[i
].count
= 0;
546 * These sysfs routines show a nice distribution of how many zbpg's are
547 * currently (and have ever been placed) in each unbuddied list. It's fun
548 * to watch but can probably go away before final merge.
550 static int zbud_show_unbuddied_list_counts(char *buf
)
555 for (i
= 0; i
< NCHUNKS
- 1; i
++)
556 p
+= sprintf(p
, "%u ", zbud_unbuddied
[i
].count
);
557 p
+= sprintf(p
, "%d\n", zbud_unbuddied
[i
].count
);
561 static int zbud_show_cumul_chunk_counts(char *buf
)
563 unsigned long i
, chunks
= 0, total_chunks
= 0, sum_total_chunks
= 0;
564 unsigned long total_chunks_lte_21
= 0, total_chunks_lte_32
= 0;
565 unsigned long total_chunks_lte_42
= 0;
568 for (i
= 0; i
< NCHUNKS
; i
++) {
569 p
+= sprintf(p
, "%lu ", zbud_cumul_chunk_counts
[i
]);
570 chunks
+= zbud_cumul_chunk_counts
[i
];
571 total_chunks
+= zbud_cumul_chunk_counts
[i
];
572 sum_total_chunks
+= i
* zbud_cumul_chunk_counts
[i
];
574 total_chunks_lte_21
= total_chunks
;
576 total_chunks_lte_32
= total_chunks
;
578 total_chunks_lte_42
= total_chunks
;
580 p
+= sprintf(p
, "<=21:%lu <=32:%lu <=42:%lu, mean:%lu\n",
581 total_chunks_lte_21
, total_chunks_lte_32
, total_chunks_lte_42
,
582 chunks
== 0 ? 0 : sum_total_chunks
/ chunks
);
588 * This "zv" PAM implementation combines the TLSF-based xvMalloc
589 * with lzo1x compression to maximize the amount of data that can
590 * be packed into a physical page.
592 * Zv represents a PAM page with the index and object (plus a "size" value
593 * necessary for decompression) immediately preceding the compressed data.
596 #define ZVH_SENTINEL 0x43214321
605 static const int zv_max_page_size
= (PAGE_SIZE
/ 8) * 7;
607 static struct zv_hdr
*zv_create(struct xv_pool
*xvpool
, uint32_t pool_id
,
608 struct tmem_oid
*oid
, uint32_t index
,
609 void *cdata
, unsigned clen
)
612 struct zv_hdr
*zv
= NULL
;
616 BUG_ON(!irqs_disabled());
617 ret
= xv_malloc(xvpool
, clen
+ sizeof(struct zv_hdr
),
618 &page
, &offset
, ZCACHE_GFP_MASK
);
621 zv
= kmap_atomic(page
, KM_USER0
) + offset
;
624 zv
->pool_id
= pool_id
;
625 SET_SENTINEL(zv
, ZVH
);
626 memcpy((char *)zv
+ sizeof(struct zv_hdr
), cdata
, clen
);
627 kunmap_atomic(zv
, KM_USER0
);
632 static void zv_free(struct xv_pool
*xvpool
, struct zv_hdr
*zv
)
639 ASSERT_SENTINEL(zv
, ZVH
);
640 size
= xv_get_object_size(zv
) - sizeof(*zv
);
641 BUG_ON(size
== 0 || size
> zv_max_page_size
);
642 INVERT_SENTINEL(zv
, ZVH
);
643 page
= virt_to_page(zv
);
644 offset
= (unsigned long)zv
& ~PAGE_MASK
;
645 local_irq_save(flags
);
646 xv_free(xvpool
, page
, offset
);
647 local_irq_restore(flags
);
650 static void zv_decompress(struct page
*page
, struct zv_hdr
*zv
)
652 size_t clen
= PAGE_SIZE
;
657 ASSERT_SENTINEL(zv
, ZVH
);
658 size
= xv_get_object_size(zv
) - sizeof(*zv
);
659 BUG_ON(size
== 0 || size
> zv_max_page_size
);
660 to_va
= kmap_atomic(page
, KM_USER0
);
661 ret
= lzo1x_decompress_safe((char *)zv
+ sizeof(*zv
),
663 kunmap_atomic(to_va
, KM_USER0
);
664 BUG_ON(ret
!= LZO_E_OK
);
665 BUG_ON(clen
!= PAGE_SIZE
);
669 * zcache core code starts here
672 /* useful stats not collected by cleancache or frontswap */
673 static unsigned long zcache_flush_total
;
674 static unsigned long zcache_flush_found
;
675 static unsigned long zcache_flobj_total
;
676 static unsigned long zcache_flobj_found
;
677 static unsigned long zcache_failed_eph_puts
;
678 static unsigned long zcache_failed_pers_puts
;
680 #define MAX_POOLS_PER_CLIENT 16
683 struct tmem_pool
*tmem_pools
[MAX_POOLS_PER_CLIENT
];
684 struct xv_pool
*xvpool
;
688 * Tmem operations assume the poolid implies the invoking client.
689 * Zcache only has one client (the kernel itself), so translate
690 * the poolid into the tmem_pool allocated for it. A KVM version
691 * of zcache would have one client per guest and each client might
694 static struct tmem_pool
*zcache_get_pool_by_id(uint32_t poolid
)
696 struct tmem_pool
*pool
= NULL
;
699 pool
= zcache_client
.tmem_pools
[poolid
];
701 atomic_inc(&pool
->refcount
);
706 static void zcache_put_pool(struct tmem_pool
*pool
)
709 atomic_dec(&pool
->refcount
);
712 /* counters for debugging */
713 static unsigned long zcache_failed_get_free_pages
;
714 static unsigned long zcache_failed_alloc
;
715 static unsigned long zcache_put_to_flush
;
716 static unsigned long zcache_aborted_preload
;
717 static unsigned long zcache_aborted_shrink
;
720 * Ensure that memory allocation requests in zcache don't result
721 * in direct reclaim requests via the shrinker, which would cause
722 * an infinite loop. Maybe a GFP flag would be better?
724 static DEFINE_SPINLOCK(zcache_direct_reclaim_lock
);
727 * for now, used named slabs so can easily track usage; later can
728 * either just use kmalloc, or perhaps add a slab-like allocator
729 * to more carefully manage total memory utilization
731 static struct kmem_cache
*zcache_objnode_cache
;
732 static struct kmem_cache
*zcache_obj_cache
;
733 static atomic_t zcache_curr_obj_count
= ATOMIC_INIT(0);
734 static unsigned long zcache_curr_obj_count_max
;
735 static atomic_t zcache_curr_objnode_count
= ATOMIC_INIT(0);
736 static unsigned long zcache_curr_objnode_count_max
;
739 * to avoid memory allocation recursion (e.g. due to direct reclaim), we
740 * preload all necessary data structures so the hostops callbacks never
741 * actually do a malloc
743 struct zcache_preload
{
745 struct tmem_obj
*obj
;
747 struct tmem_objnode
*objnodes
[OBJNODE_TREE_MAX_PATH
];
749 static DEFINE_PER_CPU(struct zcache_preload
, zcache_preloads
) = { 0, };
751 static int zcache_do_preload(struct tmem_pool
*pool
)
753 struct zcache_preload
*kp
;
754 struct tmem_objnode
*objnode
;
755 struct tmem_obj
*obj
;
759 if (unlikely(zcache_objnode_cache
== NULL
))
761 if (unlikely(zcache_obj_cache
== NULL
))
763 if (!spin_trylock(&zcache_direct_reclaim_lock
)) {
764 zcache_aborted_preload
++;
768 kp
= &__get_cpu_var(zcache_preloads
);
769 while (kp
->nr
< ARRAY_SIZE(kp
->objnodes
)) {
770 preempt_enable_no_resched();
771 objnode
= kmem_cache_alloc(zcache_objnode_cache
,
773 if (unlikely(objnode
== NULL
)) {
774 zcache_failed_alloc
++;
778 kp
= &__get_cpu_var(zcache_preloads
);
779 if (kp
->nr
< ARRAY_SIZE(kp
->objnodes
))
780 kp
->objnodes
[kp
->nr
++] = objnode
;
782 kmem_cache_free(zcache_objnode_cache
, objnode
);
784 preempt_enable_no_resched();
785 obj
= kmem_cache_alloc(zcache_obj_cache
, ZCACHE_GFP_MASK
);
786 if (unlikely(obj
== NULL
)) {
787 zcache_failed_alloc
++;
790 page
= (void *)__get_free_page(ZCACHE_GFP_MASK
);
791 if (unlikely(page
== NULL
)) {
792 zcache_failed_get_free_pages
++;
793 kmem_cache_free(zcache_obj_cache
, obj
);
797 kp
= &__get_cpu_var(zcache_preloads
);
801 kmem_cache_free(zcache_obj_cache
, obj
);
802 if (kp
->page
== NULL
)
805 free_page((unsigned long)page
);
808 spin_unlock(&zcache_direct_reclaim_lock
);
813 static void *zcache_get_free_page(void)
815 struct zcache_preload
*kp
;
818 kp
= &__get_cpu_var(zcache_preloads
);
820 BUG_ON(page
== NULL
);
825 static void zcache_free_page(void *p
)
827 free_page((unsigned long)p
);
831 * zcache implementation for tmem host ops
834 static struct tmem_objnode
*zcache_objnode_alloc(struct tmem_pool
*pool
)
836 struct tmem_objnode
*objnode
= NULL
;
838 struct zcache_preload
*kp
;
840 kp
= &__get_cpu_var(zcache_preloads
);
843 objnode
= kp
->objnodes
[kp
->nr
- 1];
844 BUG_ON(objnode
== NULL
);
845 kp
->objnodes
[kp
->nr
- 1] = NULL
;
847 count
= atomic_inc_return(&zcache_curr_objnode_count
);
848 if (count
> zcache_curr_objnode_count_max
)
849 zcache_curr_objnode_count_max
= count
;
854 static void zcache_objnode_free(struct tmem_objnode
*objnode
,
855 struct tmem_pool
*pool
)
857 atomic_dec(&zcache_curr_objnode_count
);
858 BUG_ON(atomic_read(&zcache_curr_objnode_count
) < 0);
859 kmem_cache_free(zcache_objnode_cache
, objnode
);
862 static struct tmem_obj
*zcache_obj_alloc(struct tmem_pool
*pool
)
864 struct tmem_obj
*obj
= NULL
;
866 struct zcache_preload
*kp
;
868 kp
= &__get_cpu_var(zcache_preloads
);
872 count
= atomic_inc_return(&zcache_curr_obj_count
);
873 if (count
> zcache_curr_obj_count_max
)
874 zcache_curr_obj_count_max
= count
;
878 static void zcache_obj_free(struct tmem_obj
*obj
, struct tmem_pool
*pool
)
880 atomic_dec(&zcache_curr_obj_count
);
881 BUG_ON(atomic_read(&zcache_curr_obj_count
) < 0);
882 kmem_cache_free(zcache_obj_cache
, obj
);
885 static struct tmem_hostops zcache_hostops
= {
886 .obj_alloc
= zcache_obj_alloc
,
887 .obj_free
= zcache_obj_free
,
888 .objnode_alloc
= zcache_objnode_alloc
,
889 .objnode_free
= zcache_objnode_free
,
893 * zcache implementations for PAM page descriptor ops
896 static atomic_t zcache_curr_eph_pampd_count
= ATOMIC_INIT(0);
897 static unsigned long zcache_curr_eph_pampd_count_max
;
898 static atomic_t zcache_curr_pers_pampd_count
= ATOMIC_INIT(0);
899 static unsigned long zcache_curr_pers_pampd_count_max
;
901 /* forward reference */
902 static int zcache_compress(struct page
*from
, void **out_va
, size_t *out_len
);
904 static void *zcache_pampd_create(struct tmem_pool
*pool
, struct tmem_oid
*oid
,
905 uint32_t index
, struct page
*page
)
907 void *pampd
= NULL
, *cdata
;
910 bool ephemeral
= is_ephemeral(pool
);
914 ret
= zcache_compress(page
, &cdata
, &clen
);
918 if (clen
== 0 || clen
> zbud_max_buddy_size()) {
919 zcache_compress_poor
++;
922 pampd
= (void *)zbud_create(pool
->pool_id
, oid
, index
,
925 count
= atomic_inc_return(&zcache_curr_eph_pampd_count
);
926 if (count
> zcache_curr_eph_pampd_count_max
)
927 zcache_curr_eph_pampd_count_max
= count
;
931 * FIXME: This is all the "policy" there is for now.
932 * 3/4 totpages should allow ~37% of RAM to be filled with
933 * compressed frontswap pages
935 if (atomic_read(&zcache_curr_pers_pampd_count
) >
936 3 * totalram_pages
/ 4)
938 ret
= zcache_compress(page
, &cdata
, &clen
);
941 if (clen
> zv_max_page_size
) {
942 zcache_compress_poor
++;
945 pampd
= (void *)zv_create(zcache_client
.xvpool
, pool
->pool_id
,
946 oid
, index
, cdata
, clen
);
949 count
= atomic_inc_return(&zcache_curr_pers_pampd_count
);
950 if (count
> zcache_curr_pers_pampd_count_max
)
951 zcache_curr_pers_pampd_count_max
= count
;
958 * fill the pageframe corresponding to the struct page with the data
959 * from the passed pampd
961 static int zcache_pampd_get_data(struct page
*page
, void *pampd
,
962 struct tmem_pool
*pool
)
966 if (is_ephemeral(pool
))
967 ret
= zbud_decompress(page
, pampd
);
969 zv_decompress(page
, pampd
);
974 * free the pampd and remove it from any zcache lists
975 * pampd must no longer be pointed to from any tmem data structures!
977 static void zcache_pampd_free(void *pampd
, struct tmem_pool
*pool
)
979 if (is_ephemeral(pool
)) {
980 zbud_free_and_delist((struct zbud_hdr
*)pampd
);
981 atomic_dec(&zcache_curr_eph_pampd_count
);
982 BUG_ON(atomic_read(&zcache_curr_eph_pampd_count
) < 0);
984 zv_free(zcache_client
.xvpool
, (struct zv_hdr
*)pampd
);
985 atomic_dec(&zcache_curr_pers_pampd_count
);
986 BUG_ON(atomic_read(&zcache_curr_pers_pampd_count
) < 0);
990 static struct tmem_pamops zcache_pamops
= {
991 .create
= zcache_pampd_create
,
992 .get_data
= zcache_pampd_get_data
,
993 .free
= zcache_pampd_free
,
997 * zcache compression/decompression and related per-cpu stuff
1000 #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS
1001 #define LZO_DSTMEM_PAGE_ORDER 1
1002 static DEFINE_PER_CPU(unsigned char *, zcache_workmem
);
1003 static DEFINE_PER_CPU(unsigned char *, zcache_dstmem
);
1005 static int zcache_compress(struct page
*from
, void **out_va
, size_t *out_len
)
1008 unsigned char *dmem
= __get_cpu_var(zcache_dstmem
);
1009 unsigned char *wmem
= __get_cpu_var(zcache_workmem
);
1012 BUG_ON(!irqs_disabled());
1013 if (unlikely(dmem
== NULL
|| wmem
== NULL
))
1014 goto out
; /* no buffer, so can't compress */
1015 from_va
= kmap_atomic(from
, KM_USER0
);
1017 ret
= lzo1x_1_compress(from_va
, PAGE_SIZE
, dmem
, out_len
, wmem
);
1018 BUG_ON(ret
!= LZO_E_OK
);
1020 kunmap_atomic(from_va
, KM_USER0
);
1027 static int zcache_cpu_notifier(struct notifier_block
*nb
,
1028 unsigned long action
, void *pcpu
)
1030 int cpu
= (long)pcpu
;
1031 struct zcache_preload
*kp
;
1034 case CPU_UP_PREPARE
:
1035 per_cpu(zcache_dstmem
, cpu
) = (void *)__get_free_pages(
1036 GFP_KERNEL
| __GFP_REPEAT
,
1037 LZO_DSTMEM_PAGE_ORDER
),
1038 per_cpu(zcache_workmem
, cpu
) =
1039 kzalloc(LZO1X_MEM_COMPRESS
,
1040 GFP_KERNEL
| __GFP_REPEAT
);
1043 case CPU_UP_CANCELED
:
1044 free_pages((unsigned long)per_cpu(zcache_dstmem
, cpu
),
1045 LZO_DSTMEM_PAGE_ORDER
);
1046 per_cpu(zcache_dstmem
, cpu
) = NULL
;
1047 kfree(per_cpu(zcache_workmem
, cpu
));
1048 per_cpu(zcache_workmem
, cpu
) = NULL
;
1049 kp
= &per_cpu(zcache_preloads
, cpu
);
1051 kmem_cache_free(zcache_objnode_cache
,
1052 kp
->objnodes
[kp
->nr
- 1]);
1053 kp
->objnodes
[kp
->nr
- 1] = NULL
;
1056 kmem_cache_free(zcache_obj_cache
, kp
->obj
);
1057 free_page((unsigned long)kp
->page
);
1065 static struct notifier_block zcache_cpu_notifier_block
= {
1066 .notifier_call
= zcache_cpu_notifier
1070 #define ZCACHE_SYSFS_RO(_name) \
1071 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1072 struct kobj_attribute *attr, char *buf) \
1074 return sprintf(buf, "%lu\n", zcache_##_name); \
1076 static struct kobj_attribute zcache_##_name##_attr = { \
1077 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1078 .show = zcache_##_name##_show, \
1081 #define ZCACHE_SYSFS_RO_ATOMIC(_name) \
1082 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1083 struct kobj_attribute *attr, char *buf) \
1085 return sprintf(buf, "%d\n", atomic_read(&zcache_##_name)); \
1087 static struct kobj_attribute zcache_##_name##_attr = { \
1088 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1089 .show = zcache_##_name##_show, \
1092 #define ZCACHE_SYSFS_RO_CUSTOM(_name, _func) \
1093 static ssize_t zcache_##_name##_show(struct kobject *kobj, \
1094 struct kobj_attribute *attr, char *buf) \
1096 return _func(buf); \
1098 static struct kobj_attribute zcache_##_name##_attr = { \
1099 .attr = { .name = __stringify(_name), .mode = 0444 }, \
1100 .show = zcache_##_name##_show, \
1103 ZCACHE_SYSFS_RO(curr_obj_count_max
);
1104 ZCACHE_SYSFS_RO(curr_objnode_count_max
);
1105 ZCACHE_SYSFS_RO(flush_total
);
1106 ZCACHE_SYSFS_RO(flush_found
);
1107 ZCACHE_SYSFS_RO(flobj_total
);
1108 ZCACHE_SYSFS_RO(flobj_found
);
1109 ZCACHE_SYSFS_RO(failed_eph_puts
);
1110 ZCACHE_SYSFS_RO(failed_pers_puts
);
1111 ZCACHE_SYSFS_RO(zbud_curr_zbytes
);
1112 ZCACHE_SYSFS_RO(zbud_cumul_zpages
);
1113 ZCACHE_SYSFS_RO(zbud_cumul_zbytes
);
1114 ZCACHE_SYSFS_RO(zbud_buddied_count
);
1115 ZCACHE_SYSFS_RO(zbpg_unused_list_count
);
1116 ZCACHE_SYSFS_RO(evicted_raw_pages
);
1117 ZCACHE_SYSFS_RO(evicted_unbuddied_pages
);
1118 ZCACHE_SYSFS_RO(evicted_buddied_pages
);
1119 ZCACHE_SYSFS_RO(failed_get_free_pages
);
1120 ZCACHE_SYSFS_RO(failed_alloc
);
1121 ZCACHE_SYSFS_RO(put_to_flush
);
1122 ZCACHE_SYSFS_RO(aborted_preload
);
1123 ZCACHE_SYSFS_RO(aborted_shrink
);
1124 ZCACHE_SYSFS_RO(compress_poor
);
1125 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_raw_pages
);
1126 ZCACHE_SYSFS_RO_ATOMIC(zbud_curr_zpages
);
1127 ZCACHE_SYSFS_RO_ATOMIC(curr_obj_count
);
1128 ZCACHE_SYSFS_RO_ATOMIC(curr_objnode_count
);
1129 ZCACHE_SYSFS_RO_CUSTOM(zbud_unbuddied_list_counts
,
1130 zbud_show_unbuddied_list_counts
);
1131 ZCACHE_SYSFS_RO_CUSTOM(zbud_cumul_chunk_counts
,
1132 zbud_show_cumul_chunk_counts
);
1134 static struct attribute
*zcache_attrs
[] = {
1135 &zcache_curr_obj_count_attr
.attr
,
1136 &zcache_curr_obj_count_max_attr
.attr
,
1137 &zcache_curr_objnode_count_attr
.attr
,
1138 &zcache_curr_objnode_count_max_attr
.attr
,
1139 &zcache_flush_total_attr
.attr
,
1140 &zcache_flobj_total_attr
.attr
,
1141 &zcache_flush_found_attr
.attr
,
1142 &zcache_flobj_found_attr
.attr
,
1143 &zcache_failed_eph_puts_attr
.attr
,
1144 &zcache_failed_pers_puts_attr
.attr
,
1145 &zcache_compress_poor_attr
.attr
,
1146 &zcache_zbud_curr_raw_pages_attr
.attr
,
1147 &zcache_zbud_curr_zpages_attr
.attr
,
1148 &zcache_zbud_curr_zbytes_attr
.attr
,
1149 &zcache_zbud_cumul_zpages_attr
.attr
,
1150 &zcache_zbud_cumul_zbytes_attr
.attr
,
1151 &zcache_zbud_buddied_count_attr
.attr
,
1152 &zcache_zbpg_unused_list_count_attr
.attr
,
1153 &zcache_evicted_raw_pages_attr
.attr
,
1154 &zcache_evicted_unbuddied_pages_attr
.attr
,
1155 &zcache_evicted_buddied_pages_attr
.attr
,
1156 &zcache_failed_get_free_pages_attr
.attr
,
1157 &zcache_failed_alloc_attr
.attr
,
1158 &zcache_put_to_flush_attr
.attr
,
1159 &zcache_aborted_preload_attr
.attr
,
1160 &zcache_aborted_shrink_attr
.attr
,
1161 &zcache_zbud_unbuddied_list_counts_attr
.attr
,
1162 &zcache_zbud_cumul_chunk_counts_attr
.attr
,
1166 static struct attribute_group zcache_attr_group
= {
1167 .attrs
= zcache_attrs
,
1171 #endif /* CONFIG_SYSFS */
1173 * When zcache is disabled ("frozen"), pools can be created and destroyed,
1174 * but all puts (and thus all other operations that require memory allocation)
1175 * must fail. If zcache is unfrozen, accepts puts, then frozen again,
1176 * data consistency requires all puts while frozen to be converted into
1179 static bool zcache_freeze
;
1182 * zcache shrinker interface (only useful for ephemeral pages, so zbud only)
1184 static int shrink_zcache_memory(struct shrinker
*shrink
, int nr
, gfp_t gfp_mask
)
1189 if (!(gfp_mask
& __GFP_FS
))
1190 /* does this case really need to be skipped? */
1192 if (spin_trylock(&zcache_direct_reclaim_lock
)) {
1193 zbud_evict_pages(nr
);
1194 spin_unlock(&zcache_direct_reclaim_lock
);
1196 zcache_aborted_shrink
++;
1198 ret
= (int)atomic_read(&zcache_zbud_curr_raw_pages
);
1203 static struct shrinker zcache_shrinker
= {
1204 .shrink
= shrink_zcache_memory
,
1205 .seeks
= DEFAULT_SEEKS
,
1209 * zcache shims between cleancache/frontswap ops and tmem
1212 static int zcache_put_page(int pool_id
, struct tmem_oid
*oidp
,
1213 uint32_t index
, struct page
*page
)
1215 struct tmem_pool
*pool
;
1218 BUG_ON(!irqs_disabled());
1219 pool
= zcache_get_pool_by_id(pool_id
);
1220 if (unlikely(pool
== NULL
))
1222 if (!zcache_freeze
&& zcache_do_preload(pool
) == 0) {
1223 /* preload does preempt_disable on success */
1224 ret
= tmem_put(pool
, oidp
, index
, page
);
1226 if (is_ephemeral(pool
))
1227 zcache_failed_eph_puts
++;
1229 zcache_failed_pers_puts
++;
1231 zcache_put_pool(pool
);
1232 preempt_enable_no_resched();
1234 zcache_put_to_flush
++;
1235 if (atomic_read(&pool
->obj_count
) > 0)
1236 /* the put fails whether the flush succeeds or not */
1237 (void)tmem_flush_page(pool
, oidp
, index
);
1238 zcache_put_pool(pool
);
1244 static int zcache_get_page(int pool_id
, struct tmem_oid
*oidp
,
1245 uint32_t index
, struct page
*page
)
1247 struct tmem_pool
*pool
;
1249 unsigned long flags
;
1251 local_irq_save(flags
);
1252 pool
= zcache_get_pool_by_id(pool_id
);
1253 if (likely(pool
!= NULL
)) {
1254 if (atomic_read(&pool
->obj_count
) > 0)
1255 ret
= tmem_get(pool
, oidp
, index
, page
);
1256 zcache_put_pool(pool
);
1258 local_irq_restore(flags
);
1262 static int zcache_flush_page(int pool_id
, struct tmem_oid
*oidp
, uint32_t index
)
1264 struct tmem_pool
*pool
;
1266 unsigned long flags
;
1268 local_irq_save(flags
);
1269 zcache_flush_total
++;
1270 pool
= zcache_get_pool_by_id(pool_id
);
1271 if (likely(pool
!= NULL
)) {
1272 if (atomic_read(&pool
->obj_count
) > 0)
1273 ret
= tmem_flush_page(pool
, oidp
, index
);
1274 zcache_put_pool(pool
);
1277 zcache_flush_found
++;
1278 local_irq_restore(flags
);
1282 static int zcache_flush_object(int pool_id
, struct tmem_oid
*oidp
)
1284 struct tmem_pool
*pool
;
1286 unsigned long flags
;
1288 local_irq_save(flags
);
1289 zcache_flobj_total
++;
1290 pool
= zcache_get_pool_by_id(pool_id
);
1291 if (likely(pool
!= NULL
)) {
1292 if (atomic_read(&pool
->obj_count
) > 0)
1293 ret
= tmem_flush_object(pool
, oidp
);
1294 zcache_put_pool(pool
);
1297 zcache_flobj_found
++;
1298 local_irq_restore(flags
);
1302 static int zcache_destroy_pool(int pool_id
)
1304 struct tmem_pool
*pool
= NULL
;
1309 pool
= zcache_client
.tmem_pools
[pool_id
];
1312 zcache_client
.tmem_pools
[pool_id
] = NULL
;
1313 /* wait for pool activity on other cpus to quiesce */
1314 while (atomic_read(&pool
->refcount
) != 0)
1317 ret
= tmem_destroy_pool(pool
);
1320 pr_info("zcache: destroyed pool id=%d\n", pool_id
);
1325 static int zcache_new_pool(uint32_t flags
)
1328 struct tmem_pool
*pool
;
1330 pool
= kmalloc(sizeof(struct tmem_pool
), GFP_KERNEL
);
1332 pr_info("zcache: pool creation failed: out of memory\n");
1336 for (poolid
= 0; poolid
< MAX_POOLS_PER_CLIENT
; poolid
++)
1337 if (zcache_client
.tmem_pools
[poolid
] == NULL
)
1339 if (poolid
>= MAX_POOLS_PER_CLIENT
) {
1340 pr_info("zcache: pool creation failed: max exceeded\n");
1345 atomic_set(&pool
->refcount
, 0);
1346 pool
->client
= &zcache_client
;
1347 pool
->pool_id
= poolid
;
1348 tmem_new_pool(pool
, flags
);
1349 zcache_client
.tmem_pools
[poolid
] = pool
;
1350 pr_info("zcache: created %s tmem pool, id=%d\n",
1351 flags
& TMEM_POOL_PERSIST
? "persistent" : "ephemeral",
1358 * Two kernel functionalities currently can be layered on top of tmem.
1359 * These are "cleancache" which is used as a second-chance cache for clean
1360 * page cache pages; and "frontswap" which is used for swap pages
1361 * to avoid writes to disk. A generic "shim" is provided here for each
1362 * to translate in-kernel semantics to zcache semantics.
1365 #ifdef CONFIG_CLEANCACHE
1366 static void zcache_cleancache_put_page(int pool_id
,
1367 struct cleancache_filekey key
,
1368 pgoff_t index
, struct page
*page
)
1370 u32 ind
= (u32
) index
;
1371 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1373 if (likely(ind
== index
))
1374 (void)zcache_put_page(pool_id
, &oid
, index
, page
);
1377 static int zcache_cleancache_get_page(int pool_id
,
1378 struct cleancache_filekey key
,
1379 pgoff_t index
, struct page
*page
)
1381 u32 ind
= (u32
) index
;
1382 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1385 if (likely(ind
== index
))
1386 ret
= zcache_get_page(pool_id
, &oid
, index
, page
);
1390 static void zcache_cleancache_flush_page(int pool_id
,
1391 struct cleancache_filekey key
,
1394 u32 ind
= (u32
) index
;
1395 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1397 if (likely(ind
== index
))
1398 (void)zcache_flush_page(pool_id
, &oid
, ind
);
1401 static void zcache_cleancache_flush_inode(int pool_id
,
1402 struct cleancache_filekey key
)
1404 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
1406 (void)zcache_flush_object(pool_id
, &oid
);
1409 static void zcache_cleancache_flush_fs(int pool_id
)
1412 (void)zcache_destroy_pool(pool_id
);
1415 static int zcache_cleancache_init_fs(size_t pagesize
)
1417 BUG_ON(sizeof(struct cleancache_filekey
) !=
1418 sizeof(struct tmem_oid
));
1419 BUG_ON(pagesize
!= PAGE_SIZE
);
1420 return zcache_new_pool(0);
1423 static int zcache_cleancache_init_shared_fs(char *uuid
, size_t pagesize
)
1425 /* shared pools are unsupported and map to private */
1426 BUG_ON(sizeof(struct cleancache_filekey
) !=
1427 sizeof(struct tmem_oid
));
1428 BUG_ON(pagesize
!= PAGE_SIZE
);
1429 return zcache_new_pool(0);
1432 static struct cleancache_ops zcache_cleancache_ops
= {
1433 .put_page
= zcache_cleancache_put_page
,
1434 .get_page
= zcache_cleancache_get_page
,
1435 .flush_page
= zcache_cleancache_flush_page
,
1436 .flush_inode
= zcache_cleancache_flush_inode
,
1437 .flush_fs
= zcache_cleancache_flush_fs
,
1438 .init_shared_fs
= zcache_cleancache_init_shared_fs
,
1439 .init_fs
= zcache_cleancache_init_fs
1442 struct cleancache_ops
zcache_cleancache_register_ops(void)
1444 struct cleancache_ops old_ops
=
1445 cleancache_register_ops(&zcache_cleancache_ops
);
1451 #ifdef CONFIG_FRONTSWAP
1452 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1453 static int zcache_frontswap_poolid
= -1;
1456 * Swizzling increases objects per swaptype, increasing tmem concurrency
1457 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1460 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
1461 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
1462 #define iswiz(_ind) (_ind >> SWIZ_BITS)
1464 static inline struct tmem_oid
oswiz(unsigned type
, u32 ind
)
1466 struct tmem_oid oid
= { .oid
= { 0 } };
1467 oid
.oid
[0] = _oswiz(type
, ind
);
1471 static int zcache_frontswap_put_page(unsigned type
, pgoff_t offset
,
1474 u64 ind64
= (u64
)offset
;
1475 u32 ind
= (u32
)offset
;
1476 struct tmem_oid oid
= oswiz(type
, ind
);
1478 unsigned long flags
;
1480 BUG_ON(!PageLocked(page
));
1481 if (likely(ind64
== ind
)) {
1482 local_irq_save(flags
);
1483 ret
= zcache_put_page(zcache_frontswap_poolid
, &oid
,
1485 local_irq_restore(flags
);
1490 /* returns 0 if the page was successfully gotten from frontswap, -1 if
1491 * was not present (should never happen!) */
1492 static int zcache_frontswap_get_page(unsigned type
, pgoff_t offset
,
1495 u64 ind64
= (u64
)offset
;
1496 u32 ind
= (u32
)offset
;
1497 struct tmem_oid oid
= oswiz(type
, ind
);
1500 BUG_ON(!PageLocked(page
));
1501 if (likely(ind64
== ind
))
1502 ret
= zcache_get_page(zcache_frontswap_poolid
, &oid
,
1507 /* flush a single page from frontswap */
1508 static void zcache_frontswap_flush_page(unsigned type
, pgoff_t offset
)
1510 u64 ind64
= (u64
)offset
;
1511 u32 ind
= (u32
)offset
;
1512 struct tmem_oid oid
= oswiz(type
, ind
);
1514 if (likely(ind64
== ind
))
1515 (void)zcache_flush_page(zcache_frontswap_poolid
, &oid
,
1519 /* flush all pages from the passed swaptype */
1520 static void zcache_frontswap_flush_area(unsigned type
)
1522 struct tmem_oid oid
;
1525 for (ind
= SWIZ_MASK
; ind
>= 0; ind
--) {
1526 oid
= oswiz(type
, ind
);
1527 (void)zcache_flush_object(zcache_frontswap_poolid
, &oid
);
1531 static void zcache_frontswap_init(unsigned ignored
)
1533 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
1534 if (zcache_frontswap_poolid
< 0)
1535 zcache_frontswap_poolid
= zcache_new_pool(TMEM_POOL_PERSIST
);
1538 static struct frontswap_ops zcache_frontswap_ops
= {
1539 .put_page
= zcache_frontswap_put_page
,
1540 .get_page
= zcache_frontswap_get_page
,
1541 .flush_page
= zcache_frontswap_flush_page
,
1542 .flush_area
= zcache_frontswap_flush_area
,
1543 .init
= zcache_frontswap_init
1546 struct frontswap_ops
zcache_frontswap_register_ops(void)
1548 struct frontswap_ops old_ops
=
1549 frontswap_register_ops(&zcache_frontswap_ops
);
1556 * zcache initialization
1557 * NOTE FOR NOW zcache MUST BE PROVIDED AS A KERNEL BOOT PARAMETER OR
1561 static int zcache_enabled
;
1563 static int __init
enable_zcache(char *s
)
1568 __setup("zcache", enable_zcache
);
1570 /* allow independent dynamic disabling of cleancache and frontswap */
1572 static int use_cleancache
= 1;
1574 static int __init
no_cleancache(char *s
)
1580 __setup("nocleancache", no_cleancache
);
1582 static int use_frontswap
= 1;
1584 static int __init
no_frontswap(char *s
)
1590 __setup("nofrontswap", no_frontswap
);
1592 static int __init
zcache_init(void)
1597 ret
= sysfs_create_group(mm_kobj
, &zcache_attr_group
);
1599 pr_err("zcache: can't create sysfs\n");
1602 #endif /* CONFIG_SYSFS */
1603 #if defined(CONFIG_CLEANCACHE) || defined(CONFIG_FRONTSWAP)
1604 if (zcache_enabled
) {
1607 tmem_register_hostops(&zcache_hostops
);
1608 tmem_register_pamops(&zcache_pamops
);
1609 ret
= register_cpu_notifier(&zcache_cpu_notifier_block
);
1611 pr_err("zcache: can't register cpu notifier\n");
1614 for_each_online_cpu(cpu
) {
1615 void *pcpu
= (void *)(long)cpu
;
1616 zcache_cpu_notifier(&zcache_cpu_notifier_block
,
1617 CPU_UP_PREPARE
, pcpu
);
1620 zcache_objnode_cache
= kmem_cache_create("zcache_objnode",
1621 sizeof(struct tmem_objnode
), 0, 0, NULL
);
1622 zcache_obj_cache
= kmem_cache_create("zcache_obj",
1623 sizeof(struct tmem_obj
), 0, 0, NULL
);
1625 #ifdef CONFIG_CLEANCACHE
1626 if (zcache_enabled
&& use_cleancache
) {
1627 struct cleancache_ops old_ops
;
1630 register_shrinker(&zcache_shrinker
);
1631 old_ops
= zcache_cleancache_register_ops();
1632 pr_info("zcache: cleancache enabled using kernel "
1633 "transcendent memory and compression buddies\n");
1634 if (old_ops
.init_fs
!= NULL
)
1635 pr_warning("zcache: cleancache_ops overridden");
1638 #ifdef CONFIG_FRONTSWAP
1639 if (zcache_enabled
&& use_frontswap
) {
1640 struct frontswap_ops old_ops
;
1642 zcache_client
.xvpool
= xv_create_pool();
1643 if (zcache_client
.xvpool
== NULL
) {
1644 pr_err("zcache: can't create xvpool\n");
1647 old_ops
= zcache_frontswap_register_ops();
1648 pr_info("zcache: frontswap enabled using kernel "
1649 "transcendent memory and xvmalloc\n");
1650 if (old_ops
.init
!= NULL
)
1651 pr_warning("ktmem: frontswap_ops overridden");
1658 module_init(zcache_init
)