2 * Xen implementation for transcendent memory (tmem)
4 * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
5 * Author: Dan Magenheimer
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/pagemap.h>
13 #include <linux/cleancache.h>
15 /* temporary ifdef until include/linux/frontswap.h is upstream */
16 #ifdef CONFIG_FRONTSWAP
17 #include <linux/frontswap.h>
21 #include <xen/interface/xen.h>
22 #include <asm/xen/hypercall.h>
23 #include <asm/xen/page.h>
24 #include <asm/xen/hypervisor.h>
27 #define TMEM_CONTROL 0
28 #define TMEM_NEW_POOL 1
29 #define TMEM_DESTROY_POOL 2
30 #define TMEM_NEW_PAGE 3
31 #define TMEM_PUT_PAGE 4
32 #define TMEM_GET_PAGE 5
33 #define TMEM_FLUSH_PAGE 6
34 #define TMEM_FLUSH_OBJECT 7
39 /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
40 #define TMEM_POOL_PERSIST 1
41 #define TMEM_POOL_SHARED 2
42 #define TMEM_POOL_PAGESIZE_SHIFT 4
43 #define TMEM_VERSION_SHIFT 24
46 struct tmem_pool_uuid
{
55 #define TMEM_POOL_PRIVATE_UUID { 0, 0 }
57 /* flags for tmem_ops.new_pool */
58 #define TMEM_POOL_PERSIST 1
59 #define TMEM_POOL_SHARED 2
61 /* xen tmem foundation ops/hypercalls */
63 static inline int xen_tmem_op(u32 tmem_cmd
, u32 tmem_pool
, struct tmem_oid oid
,
64 u32 index
, unsigned long gmfn
, u32 tmem_offset
, u32 pfn_offset
, u32 len
)
70 op
.pool_id
= tmem_pool
;
71 op
.u
.gen
.oid
[0] = oid
.oid
[0];
72 op
.u
.gen
.oid
[1] = oid
.oid
[1];
73 op
.u
.gen
.oid
[2] = oid
.oid
[2];
74 op
.u
.gen
.index
= index
;
75 op
.u
.gen
.tmem_offset
= tmem_offset
;
76 op
.u
.gen
.pfn_offset
= pfn_offset
;
78 set_xen_guest_handle(op
.u
.gen
.gmfn
, (void *)gmfn
);
79 rc
= HYPERVISOR_tmem_op(&op
);
83 static int xen_tmem_new_pool(struct tmem_pool_uuid uuid
,
84 u32 flags
, unsigned long pagesize
)
87 int rc
= 0, pageshift
;
89 for (pageshift
= 0; pagesize
!= 1; pageshift
++)
91 flags
|= (pageshift
- 12) << TMEM_POOL_PAGESIZE_SHIFT
;
92 flags
|= TMEM_SPEC_VERSION
<< TMEM_VERSION_SHIFT
;
93 op
.cmd
= TMEM_NEW_POOL
;
94 op
.u
.new.uuid
[0] = uuid
.uuid_lo
;
95 op
.u
.new.uuid
[1] = uuid
.uuid_hi
;
96 op
.u
.new.flags
= flags
;
97 rc
= HYPERVISOR_tmem_op(&op
);
101 /* xen generic tmem ops */
103 static int xen_tmem_put_page(u32 pool_id
, struct tmem_oid oid
,
104 u32 index
, unsigned long pfn
)
106 unsigned long gmfn
= xen_pv_domain() ? pfn_to_mfn(pfn
) : pfn
;
108 return xen_tmem_op(TMEM_PUT_PAGE
, pool_id
, oid
, index
,
112 static int xen_tmem_get_page(u32 pool_id
, struct tmem_oid oid
,
113 u32 index
, unsigned long pfn
)
115 unsigned long gmfn
= xen_pv_domain() ? pfn_to_mfn(pfn
) : pfn
;
117 return xen_tmem_op(TMEM_GET_PAGE
, pool_id
, oid
, index
,
121 static int xen_tmem_flush_page(u32 pool_id
, struct tmem_oid oid
, u32 index
)
123 return xen_tmem_op(TMEM_FLUSH_PAGE
, pool_id
, oid
, index
,
127 static int xen_tmem_flush_object(u32 pool_id
, struct tmem_oid oid
)
129 return xen_tmem_op(TMEM_FLUSH_OBJECT
, pool_id
, oid
, 0, 0, 0, 0, 0);
132 #ifndef CONFIG_XEN_TMEM_MODULE
133 bool __read_mostly tmem_enabled
= false;
135 static int __init
enable_tmem(char *s
)
140 __setup("tmem", enable_tmem
);
143 #ifdef CONFIG_CLEANCACHE
144 static int xen_tmem_destroy_pool(u32 pool_id
)
146 struct tmem_oid oid
= { { 0 } };
148 return xen_tmem_op(TMEM_DESTROY_POOL
, pool_id
, oid
, 0, 0, 0, 0, 0);
153 static void tmem_cleancache_put_page(int pool
, struct cleancache_filekey key
,
154 pgoff_t index
, struct page
*page
)
156 u32 ind
= (u32
) index
;
157 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
158 unsigned long pfn
= page_to_pfn(page
);
164 mb(); /* ensure page is quiescent; tmem may address it with an alias */
165 (void)xen_tmem_put_page((u32
)pool
, oid
, ind
, pfn
);
168 static int tmem_cleancache_get_page(int pool
, struct cleancache_filekey key
,
169 pgoff_t index
, struct page
*page
)
171 u32 ind
= (u32
) index
;
172 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
173 unsigned long pfn
= page_to_pfn(page
);
176 /* translate return values to linux semantics */
181 ret
= xen_tmem_get_page((u32
)pool
, oid
, ind
, pfn
);
188 static void tmem_cleancache_flush_page(int pool
, struct cleancache_filekey key
,
191 u32 ind
= (u32
) index
;
192 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
198 (void)xen_tmem_flush_page((u32
)pool
, oid
, ind
);
201 static void tmem_cleancache_flush_inode(int pool
, struct cleancache_filekey key
)
203 struct tmem_oid oid
= *(struct tmem_oid
*)&key
;
207 (void)xen_tmem_flush_object((u32
)pool
, oid
);
210 static void tmem_cleancache_flush_fs(int pool
)
214 (void)xen_tmem_destroy_pool((u32
)pool
);
217 static int tmem_cleancache_init_fs(size_t pagesize
)
219 struct tmem_pool_uuid uuid_private
= TMEM_POOL_PRIVATE_UUID
;
221 return xen_tmem_new_pool(uuid_private
, 0, pagesize
);
224 static int tmem_cleancache_init_shared_fs(char *uuid
, size_t pagesize
)
226 struct tmem_pool_uuid shared_uuid
;
228 shared_uuid
.uuid_lo
= *(u64
*)uuid
;
229 shared_uuid
.uuid_hi
= *(u64
*)(&uuid
[8]);
230 return xen_tmem_new_pool(shared_uuid
, TMEM_POOL_SHARED
, pagesize
);
233 static bool disable_cleancache __read_mostly
;
234 static bool disable_selfballooning __read_mostly
;
235 #ifdef CONFIG_XEN_TMEM_MODULE
236 module_param(disable_cleancache
, bool, S_IRUGO
);
237 module_param(disable_selfballooning
, bool, S_IRUGO
);
239 static int __init
no_cleancache(char *s
)
241 disable_cleancache
= true;
244 __setup("nocleancache", no_cleancache
);
247 static struct cleancache_ops tmem_cleancache_ops
= {
248 .put_page
= tmem_cleancache_put_page
,
249 .get_page
= tmem_cleancache_get_page
,
250 .invalidate_page
= tmem_cleancache_flush_page
,
251 .invalidate_inode
= tmem_cleancache_flush_inode
,
252 .invalidate_fs
= tmem_cleancache_flush_fs
,
253 .init_shared_fs
= tmem_cleancache_init_shared_fs
,
254 .init_fs
= tmem_cleancache_init_fs
258 #ifdef CONFIG_FRONTSWAP
259 /* frontswap tmem operations */
261 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
262 static int tmem_frontswap_poolid
;
265 * Swizzling increases objects per swaptype, increasing tmem concurrency
266 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
269 #define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
270 #define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
271 #define iswiz(_ind) (_ind >> SWIZ_BITS)
273 static inline struct tmem_oid
oswiz(unsigned type
, u32 ind
)
275 struct tmem_oid oid
= { .oid
= { 0 } };
276 oid
.oid
[0] = _oswiz(type
, ind
);
280 /* returns 0 if the page was successfully put into frontswap, -1 if not */
281 static int tmem_frontswap_store(unsigned type
, pgoff_t offset
,
284 u64 ind64
= (u64
)offset
;
285 u32 ind
= (u32
)offset
;
286 unsigned long pfn
= page_to_pfn(page
);
287 int pool
= tmem_frontswap_poolid
;
294 mb(); /* ensure page is quiescent; tmem may address it with an alias */
295 ret
= xen_tmem_put_page(pool
, oswiz(type
, ind
), iswiz(ind
), pfn
);
296 /* translate Xen tmem return values to linux semantics */
304 * returns 0 if the page was successfully gotten from frontswap, -1 if
305 * was not present (should never happen!)
307 static int tmem_frontswap_load(unsigned type
, pgoff_t offset
,
310 u64 ind64
= (u64
)offset
;
311 u32 ind
= (u32
)offset
;
312 unsigned long pfn
= page_to_pfn(page
);
313 int pool
= tmem_frontswap_poolid
;
320 ret
= xen_tmem_get_page(pool
, oswiz(type
, ind
), iswiz(ind
), pfn
);
321 /* translate Xen tmem return values to linux semantics */
328 /* flush a single page from frontswap */
329 static void tmem_frontswap_flush_page(unsigned type
, pgoff_t offset
)
331 u64 ind64
= (u64
)offset
;
332 u32 ind
= (u32
)offset
;
333 int pool
= tmem_frontswap_poolid
;
339 (void) xen_tmem_flush_page(pool
, oswiz(type
, ind
), iswiz(ind
));
342 /* flush all pages from the passed swaptype */
343 static void tmem_frontswap_flush_area(unsigned type
)
345 int pool
= tmem_frontswap_poolid
;
350 for (ind
= SWIZ_MASK
; ind
>= 0; ind
--)
351 (void)xen_tmem_flush_object(pool
, oswiz(type
, ind
));
354 static void tmem_frontswap_init(unsigned ignored
)
356 struct tmem_pool_uuid
private = TMEM_POOL_PRIVATE_UUID
;
358 /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
359 if (tmem_frontswap_poolid
< 0)
360 tmem_frontswap_poolid
=
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST
, PAGE_SIZE
);
364 static bool disable_frontswap __read_mostly
;
365 static bool disable_frontswap_selfshrinking __read_mostly
;
366 #ifdef CONFIG_XEN_TMEM_MODULE
367 module_param(disable_frontswap
, bool, S_IRUGO
);
368 module_param(disable_frontswap_selfshrinking
, bool, S_IRUGO
);
370 static int __init
no_frontswap(char *s
)
372 disable_frontswap
= true;
375 __setup("nofrontswap", no_frontswap
);
378 static struct frontswap_ops tmem_frontswap_ops
= {
379 .store
= tmem_frontswap_store
,
380 .load
= tmem_frontswap_load
,
381 .invalidate_page
= tmem_frontswap_flush_page
,
382 .invalidate_area
= tmem_frontswap_flush_area
,
383 .init
= tmem_frontswap_init
385 #else /* CONFIG_FRONTSWAP */
386 #define disable_frontswap_selfshrinking 1
389 static int xen_tmem_init(void)
393 #ifdef CONFIG_FRONTSWAP
394 if (tmem_enabled
&& !disable_frontswap
) {
396 struct frontswap_ops
*old_ops
=
397 frontswap_register_ops(&tmem_frontswap_ops
);
399 tmem_frontswap_poolid
= -1;
400 if (IS_ERR(old_ops
) || old_ops
) {
402 return PTR_ERR(old_ops
);
403 s
= " (WARNING: frontswap_ops overridden)";
405 printk(KERN_INFO
"frontswap enabled, RAM provided by "
406 "Xen Transcendent Memory%s\n", s
);
409 #ifdef CONFIG_CLEANCACHE
410 BUG_ON(sizeof(struct cleancache_filekey
) != sizeof(struct tmem_oid
));
411 if (tmem_enabled
&& !disable_cleancache
) {
413 struct cleancache_ops
*old_ops
=
414 cleancache_register_ops(&tmem_cleancache_ops
);
416 s
= " (WARNING: cleancache_ops overridden)";
417 printk(KERN_INFO
"cleancache enabled, RAM provided by "
418 "Xen Transcendent Memory%s\n", s
);
421 #ifdef CONFIG_XEN_SELFBALLOONING
422 xen_selfballoon_init(!disable_selfballooning
,
423 !disable_frontswap_selfshrinking
);
428 module_init(xen_tmem_init
)
429 MODULE_LICENSE("GPL");
430 MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
431 MODULE_DESCRIPTION("Shim to Xen transcendent memory");