]>
Commit | Line | Data |
---|---|---|
f8af4da3 | 1 | /* |
31dbd01f IE |
2 | * Memory merging support. |
3 | * | |
4 | * This code enables dynamic sharing of identical pages found in different | |
5 | * memory areas, even if they are not shared by fork() | |
6 | * | |
36b2528d | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
8 | * Authors: |
9 | * Izik Eidus | |
10 | * Andrea Arcangeli | |
11 | * Chris Wright | |
36b2528d | 12 | * Hugh Dickins |
31dbd01f IE |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. | |
f8af4da3 HD |
15 | */ |
16 | ||
17 | #include <linux/errno.h> | |
31dbd01f IE |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> | |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f IE |
21 | #include <linux/sched.h> |
22 | #include <linux/rwsem.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/rmap.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/jhash.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/kthread.h> | |
29 | #include <linux/wait.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/rbtree.h> | |
32 | #include <linux/mmu_notifier.h> | |
2c6854fd | 33 | #include <linux/swap.h> |
f8af4da3 HD |
34 | #include <linux/ksm.h> |
35 | ||
31dbd01f | 36 | #include <asm/tlbflush.h> |
73848b46 | 37 | #include "internal.h" |
31dbd01f IE |
38 | |
39 | /* | |
40 | * A few notes about the KSM scanning process, | |
41 | * to make it easier to understand the data structures below: | |
42 | * | |
43 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
44 | * contents into a data structure that holds pointers to the pages' locations. | |
45 | * | |
46 | * Since the contents of the pages may change at any moment, KSM cannot just | |
47 | * insert the pages into a normal sorted tree and expect it to find anything. | |
48 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
49 | * | |
50 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
51 | * by their contents. Because each such page is write-protected, searching on | |
52 | * this tree is fully assured to be working (except when pages are unmapped), | |
53 | * and therefore this tree is called the stable tree. | |
54 | * | |
55 | * In addition to the stable tree, KSM uses a second data structure called the | |
56 | * unstable tree: this tree holds pointers to pages which have been found to | |
57 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
58 | * by their contents, but since they are not write-protected, KSM cannot rely | |
59 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
60 | * be corrupted as its contents are modified, and so it is called unstable. | |
61 | * | |
62 | * KSM solves this problem by several techniques: | |
63 | * | |
64 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
65 | * memory areas, and then the tree is rebuilt again from the beginning. | |
66 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
67 | * has not changed since the previous scan of all memory areas. | |
68 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
69 | * colors of the nodes and not on their contents, assuring that even when | |
70 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
71 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
72 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
73 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
74 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
75 | * it is secured in the stable tree. (When we scan a new page, we first | |
76 | * compare it against the stable tree, and then against the unstable tree.) | |
77 | */ | |
78 | ||
79 | /** | |
80 | * struct mm_slot - ksm information per mm that is being scanned | |
81 | * @link: link to the mm_slots hash list | |
82 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | |
6514d511 | 83 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
31dbd01f IE |
84 | * @mm: the mm that this information is valid for |
85 | */ | |
86 | struct mm_slot { | |
87 | struct hlist_node link; | |
88 | struct list_head mm_list; | |
6514d511 | 89 | struct rmap_item *rmap_list; |
31dbd01f IE |
90 | struct mm_struct *mm; |
91 | }; | |
92 | ||
93 | /** | |
94 | * struct ksm_scan - cursor for scanning | |
95 | * @mm_slot: the current mm_slot we are scanning | |
96 | * @address: the next address inside that to be scanned | |
6514d511 | 97 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
31dbd01f IE |
98 | * @seqnr: count of completed full scans (needed when removing unstable node) |
99 | * | |
100 | * There is only the one ksm_scan instance of this cursor structure. | |
101 | */ | |
102 | struct ksm_scan { | |
103 | struct mm_slot *mm_slot; | |
104 | unsigned long address; | |
6514d511 | 105 | struct rmap_item **rmap_list; |
31dbd01f IE |
106 | unsigned long seqnr; |
107 | }; | |
108 | ||
7b6ba2c7 HD |
109 | /** |
110 | * struct stable_node - node of the stable rbtree | |
08beca44 | 111 | * @page: pointer to struct page of the ksm page |
7b6ba2c7 HD |
112 | * @node: rb node of this ksm page in the stable tree |
113 | * @hlist: hlist head of rmap_items using this ksm page | |
114 | */ | |
115 | struct stable_node { | |
08beca44 | 116 | struct page *page; |
7b6ba2c7 HD |
117 | struct rb_node node; |
118 | struct hlist_head hlist; | |
119 | }; | |
120 | ||
31dbd01f IE |
121 | /** |
122 | * struct rmap_item - reverse mapping item for virtual addresses | |
6514d511 | 123 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
db114b83 | 124 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
31dbd01f IE |
125 | * @mm: the memory structure this rmap_item is pointing into |
126 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
127 | * @oldchecksum: previous checksum of the page at that virtual address | |
7b6ba2c7 HD |
128 | * @node: rb node of this rmap_item in the unstable tree |
129 | * @head: pointer to stable_node heading this list in the stable tree | |
130 | * @hlist: link into hlist of rmap_items hanging off that stable_node | |
31dbd01f IE |
131 | */ |
132 | struct rmap_item { | |
6514d511 | 133 | struct rmap_item *rmap_list; |
db114b83 | 134 | struct anon_vma *anon_vma; /* when stable */ |
31dbd01f IE |
135 | struct mm_struct *mm; |
136 | unsigned long address; /* + low bits used for flags below */ | |
7b6ba2c7 | 137 | unsigned int oldchecksum; /* when unstable */ |
31dbd01f | 138 | union { |
7b6ba2c7 HD |
139 | struct rb_node node; /* when node of unstable tree */ |
140 | struct { /* when listed from stable tree */ | |
141 | struct stable_node *head; | |
142 | struct hlist_node hlist; | |
143 | }; | |
31dbd01f IE |
144 | }; |
145 | }; | |
146 | ||
147 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
7b6ba2c7 HD |
148 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
149 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ | |
31dbd01f IE |
150 | |
151 | /* The stable and unstable tree heads */ | |
152 | static struct rb_root root_stable_tree = RB_ROOT; | |
153 | static struct rb_root root_unstable_tree = RB_ROOT; | |
154 | ||
155 | #define MM_SLOTS_HASH_HEADS 1024 | |
156 | static struct hlist_head *mm_slots_hash; | |
157 | ||
158 | static struct mm_slot ksm_mm_head = { | |
159 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | |
160 | }; | |
161 | static struct ksm_scan ksm_scan = { | |
162 | .mm_slot = &ksm_mm_head, | |
163 | }; | |
164 | ||
165 | static struct kmem_cache *rmap_item_cache; | |
7b6ba2c7 | 166 | static struct kmem_cache *stable_node_cache; |
31dbd01f IE |
167 | static struct kmem_cache *mm_slot_cache; |
168 | ||
169 | /* The number of nodes in the stable tree */ | |
b4028260 | 170 | static unsigned long ksm_pages_shared; |
31dbd01f | 171 | |
e178dfde | 172 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 173 | static unsigned long ksm_pages_sharing; |
31dbd01f | 174 | |
473b0ce4 HD |
175 | /* The number of nodes in the unstable tree */ |
176 | static unsigned long ksm_pages_unshared; | |
177 | ||
178 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
179 | static unsigned long ksm_rmap_items; | |
180 | ||
31dbd01f | 181 | /* Limit on the number of unswappable pages used */ |
2c6854fd | 182 | static unsigned long ksm_max_kernel_pages; |
31dbd01f IE |
183 | |
184 | /* Number of pages ksmd should scan in one batch */ | |
2c6854fd | 185 | static unsigned int ksm_thread_pages_to_scan = 100; |
31dbd01f IE |
186 | |
187 | /* Milliseconds ksmd should sleep between batches */ | |
2ffd8679 | 188 | static unsigned int ksm_thread_sleep_millisecs = 20; |
31dbd01f IE |
189 | |
190 | #define KSM_RUN_STOP 0 | |
191 | #define KSM_RUN_MERGE 1 | |
192 | #define KSM_RUN_UNMERGE 2 | |
2c6854fd | 193 | static unsigned int ksm_run = KSM_RUN_STOP; |
31dbd01f IE |
194 | |
195 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
196 | static DEFINE_MUTEX(ksm_thread_mutex); | |
197 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
198 | ||
199 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | |
200 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
201 | (__flags), NULL) | |
202 | ||
203 | static int __init ksm_slab_init(void) | |
204 | { | |
205 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | |
206 | if (!rmap_item_cache) | |
207 | goto out; | |
208 | ||
7b6ba2c7 HD |
209 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
210 | if (!stable_node_cache) | |
211 | goto out_free1; | |
212 | ||
31dbd01f IE |
213 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
214 | if (!mm_slot_cache) | |
7b6ba2c7 | 215 | goto out_free2; |
31dbd01f IE |
216 | |
217 | return 0; | |
218 | ||
7b6ba2c7 HD |
219 | out_free2: |
220 | kmem_cache_destroy(stable_node_cache); | |
221 | out_free1: | |
31dbd01f IE |
222 | kmem_cache_destroy(rmap_item_cache); |
223 | out: | |
224 | return -ENOMEM; | |
225 | } | |
226 | ||
227 | static void __init ksm_slab_free(void) | |
228 | { | |
229 | kmem_cache_destroy(mm_slot_cache); | |
7b6ba2c7 | 230 | kmem_cache_destroy(stable_node_cache); |
31dbd01f IE |
231 | kmem_cache_destroy(rmap_item_cache); |
232 | mm_slot_cache = NULL; | |
233 | } | |
234 | ||
235 | static inline struct rmap_item *alloc_rmap_item(void) | |
236 | { | |
473b0ce4 HD |
237 | struct rmap_item *rmap_item; |
238 | ||
239 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); | |
240 | if (rmap_item) | |
241 | ksm_rmap_items++; | |
242 | return rmap_item; | |
31dbd01f IE |
243 | } |
244 | ||
245 | static inline void free_rmap_item(struct rmap_item *rmap_item) | |
246 | { | |
473b0ce4 | 247 | ksm_rmap_items--; |
31dbd01f IE |
248 | rmap_item->mm = NULL; /* debug safety */ |
249 | kmem_cache_free(rmap_item_cache, rmap_item); | |
250 | } | |
251 | ||
7b6ba2c7 HD |
252 | static inline struct stable_node *alloc_stable_node(void) |
253 | { | |
254 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); | |
255 | } | |
256 | ||
257 | static inline void free_stable_node(struct stable_node *stable_node) | |
258 | { | |
259 | kmem_cache_free(stable_node_cache, stable_node); | |
260 | } | |
261 | ||
31dbd01f IE |
262 | static inline struct mm_slot *alloc_mm_slot(void) |
263 | { | |
264 | if (!mm_slot_cache) /* initialization failed */ | |
265 | return NULL; | |
266 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
267 | } | |
268 | ||
269 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
270 | { | |
271 | kmem_cache_free(mm_slot_cache, mm_slot); | |
272 | } | |
273 | ||
274 | static int __init mm_slots_hash_init(void) | |
275 | { | |
276 | mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head), | |
277 | GFP_KERNEL); | |
278 | if (!mm_slots_hash) | |
279 | return -ENOMEM; | |
280 | return 0; | |
281 | } | |
282 | ||
283 | static void __init mm_slots_hash_free(void) | |
284 | { | |
285 | kfree(mm_slots_hash); | |
286 | } | |
287 | ||
288 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) | |
289 | { | |
290 | struct mm_slot *mm_slot; | |
291 | struct hlist_head *bucket; | |
292 | struct hlist_node *node; | |
293 | ||
294 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) | |
295 | % MM_SLOTS_HASH_HEADS]; | |
296 | hlist_for_each_entry(mm_slot, node, bucket, link) { | |
297 | if (mm == mm_slot->mm) | |
298 | return mm_slot; | |
299 | } | |
300 | return NULL; | |
301 | } | |
302 | ||
303 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
304 | struct mm_slot *mm_slot) | |
305 | { | |
306 | struct hlist_head *bucket; | |
307 | ||
308 | bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct)) | |
309 | % MM_SLOTS_HASH_HEADS]; | |
310 | mm_slot->mm = mm; | |
31dbd01f IE |
311 | hlist_add_head(&mm_slot->link, bucket); |
312 | } | |
313 | ||
314 | static inline int in_stable_tree(struct rmap_item *rmap_item) | |
315 | { | |
316 | return rmap_item->address & STABLE_FLAG; | |
317 | } | |
318 | ||
db114b83 HD |
319 | static void hold_anon_vma(struct rmap_item *rmap_item, |
320 | struct anon_vma *anon_vma) | |
321 | { | |
322 | rmap_item->anon_vma = anon_vma; | |
323 | atomic_inc(&anon_vma->ksm_refcount); | |
324 | } | |
325 | ||
326 | static void drop_anon_vma(struct rmap_item *rmap_item) | |
327 | { | |
328 | struct anon_vma *anon_vma = rmap_item->anon_vma; | |
329 | ||
330 | if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) { | |
331 | int empty = list_empty(&anon_vma->head); | |
332 | spin_unlock(&anon_vma->lock); | |
333 | if (empty) | |
334 | anon_vma_free(anon_vma); | |
335 | } | |
336 | } | |
337 | ||
a913e182 HD |
338 | /* |
339 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | |
340 | * page tables after it has passed through ksm_exit() - which, if necessary, | |
341 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set | |
342 | * a special flag: they can just back out as soon as mm_users goes to zero. | |
343 | * ksm_test_exit() is used throughout to make this test for exit: in some | |
344 | * places for correctness, in some places just to avoid unnecessary work. | |
345 | */ | |
346 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
347 | { | |
348 | return atomic_read(&mm->mm_users) == 0; | |
349 | } | |
350 | ||
31dbd01f IE |
351 | /* |
352 | * We use break_ksm to break COW on a ksm page: it's a stripped down | |
353 | * | |
354 | * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) | |
355 | * put_page(page); | |
356 | * | |
357 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | |
358 | * in case the application has unmapped and remapped mm,addr meanwhile. | |
359 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
360 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. | |
361 | */ | |
d952b791 | 362 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
31dbd01f IE |
363 | { |
364 | struct page *page; | |
d952b791 | 365 | int ret = 0; |
31dbd01f IE |
366 | |
367 | do { | |
368 | cond_resched(); | |
369 | page = follow_page(vma, addr, FOLL_GET); | |
370 | if (!page) | |
371 | break; | |
372 | if (PageKsm(page)) | |
373 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | |
374 | FAULT_FLAG_WRITE); | |
375 | else | |
376 | ret = VM_FAULT_WRITE; | |
377 | put_page(page); | |
d952b791 HD |
378 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); |
379 | /* | |
380 | * We must loop because handle_mm_fault() may back out if there's | |
381 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | |
382 | * | |
383 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that | |
384 | * COW has been broken, even if the vma does not permit VM_WRITE; | |
385 | * but note that a concurrent fault might break PageKsm for us. | |
386 | * | |
387 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
388 | * backing file, which also invalidates anonymous pages: that's | |
389 | * okay, that truncation will have unmapped the PageKsm for us. | |
390 | * | |
391 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
392 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
393 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
394 | * to user; and ksmd, having no mm, would never be chosen for that. | |
395 | * | |
396 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
397 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
398 | * even ksmd can fail in this way - though it's usually breaking ksm | |
399 | * just to undo a merge it made a moment before, so unlikely to oom. | |
400 | * | |
401 | * That's a pity: we might therefore have more kernel pages allocated | |
402 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
403 | * will retry to break_cow on each pass, so should recover the page | |
404 | * in due course. The important thing is to not let VM_MERGEABLE | |
405 | * be cleared while any such pages might remain in the area. | |
406 | */ | |
407 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
408 | } |
409 | ||
8dd3557a | 410 | static void break_cow(struct rmap_item *rmap_item) |
31dbd01f | 411 | { |
8dd3557a HD |
412 | struct mm_struct *mm = rmap_item->mm; |
413 | unsigned long addr = rmap_item->address; | |
31dbd01f IE |
414 | struct vm_area_struct *vma; |
415 | ||
81464e30 | 416 | down_read(&mm->mmap_sem); |
9ba69294 HD |
417 | if (ksm_test_exit(mm)) |
418 | goto out; | |
31dbd01f IE |
419 | vma = find_vma(mm, addr); |
420 | if (!vma || vma->vm_start > addr) | |
81464e30 | 421 | goto out; |
31dbd01f | 422 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
81464e30 | 423 | goto out; |
31dbd01f | 424 | break_ksm(vma, addr); |
81464e30 | 425 | out: |
31dbd01f IE |
426 | up_read(&mm->mmap_sem); |
427 | } | |
428 | ||
429 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |
430 | { | |
431 | struct mm_struct *mm = rmap_item->mm; | |
432 | unsigned long addr = rmap_item->address; | |
433 | struct vm_area_struct *vma; | |
434 | struct page *page; | |
435 | ||
436 | down_read(&mm->mmap_sem); | |
9ba69294 HD |
437 | if (ksm_test_exit(mm)) |
438 | goto out; | |
31dbd01f IE |
439 | vma = find_vma(mm, addr); |
440 | if (!vma || vma->vm_start > addr) | |
441 | goto out; | |
442 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
443 | goto out; | |
444 | ||
445 | page = follow_page(vma, addr, FOLL_GET); | |
446 | if (!page) | |
447 | goto out; | |
448 | if (PageAnon(page)) { | |
449 | flush_anon_page(vma, page, addr); | |
450 | flush_dcache_page(page); | |
451 | } else { | |
452 | put_page(page); | |
453 | out: page = NULL; | |
454 | } | |
455 | up_read(&mm->mmap_sem); | |
456 | return page; | |
457 | } | |
458 | ||
31dbd01f IE |
459 | /* |
460 | * Removing rmap_item from stable or unstable tree. | |
461 | * This function will clean the information from the stable/unstable tree. | |
462 | */ | |
463 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |
464 | { | |
7b6ba2c7 HD |
465 | if (rmap_item->address & STABLE_FLAG) { |
466 | struct stable_node *stable_node; | |
5ad64688 | 467 | struct page *page; |
31dbd01f | 468 | |
7b6ba2c7 | 469 | stable_node = rmap_item->head; |
5ad64688 HD |
470 | page = stable_node->page; |
471 | lock_page(page); | |
472 | ||
7b6ba2c7 | 473 | hlist_del(&rmap_item->hlist); |
5ad64688 HD |
474 | if (stable_node->hlist.first) { |
475 | unlock_page(page); | |
e178dfde | 476 | ksm_pages_sharing--; |
5ad64688 HD |
477 | } else { |
478 | set_page_stable_node(page, NULL); | |
479 | unlock_page(page); | |
480 | put_page(page); | |
08beca44 | 481 | |
7b6ba2c7 HD |
482 | rb_erase(&stable_node->node, &root_stable_tree); |
483 | free_stable_node(stable_node); | |
484 | ksm_pages_shared--; | |
31dbd01f IE |
485 | } |
486 | ||
db114b83 | 487 | drop_anon_vma(rmap_item); |
93d17715 | 488 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 489 | |
7b6ba2c7 | 490 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
31dbd01f IE |
491 | unsigned char age; |
492 | /* | |
9ba69294 | 493 | * Usually ksmd can and must skip the rb_erase, because |
31dbd01f | 494 | * root_unstable_tree was already reset to RB_ROOT. |
9ba69294 HD |
495 | * But be careful when an mm is exiting: do the rb_erase |
496 | * if this rmap_item was inserted by this scan, rather | |
497 | * than left over from before. | |
31dbd01f IE |
498 | */ |
499 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 500 | BUG_ON(age > 1); |
31dbd01f IE |
501 | if (!age) |
502 | rb_erase(&rmap_item->node, &root_unstable_tree); | |
93d17715 | 503 | |
473b0ce4 | 504 | ksm_pages_unshared--; |
93d17715 | 505 | rmap_item->address &= PAGE_MASK; |
31dbd01f IE |
506 | } |
507 | ||
31dbd01f IE |
508 | cond_resched(); /* we're called from many long loops */ |
509 | } | |
510 | ||
31dbd01f | 511 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
6514d511 | 512 | struct rmap_item **rmap_list) |
31dbd01f | 513 | { |
6514d511 HD |
514 | while (*rmap_list) { |
515 | struct rmap_item *rmap_item = *rmap_list; | |
516 | *rmap_list = rmap_item->rmap_list; | |
31dbd01f | 517 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
518 | free_rmap_item(rmap_item); |
519 | } | |
520 | } | |
521 | ||
522 | /* | |
523 | * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather | |
524 | * than check every pte of a given vma, the locking doesn't quite work for | |
525 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
526 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing | |
527 | * rmap_items from parent to child at fork time (so as not to waste time | |
528 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
529 | * |
530 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
531 | * and freeing memory) when unmerging an area, it's easier to leave that | |
532 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
533 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 534 | */ |
d952b791 HD |
535 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
536 | unsigned long start, unsigned long end) | |
31dbd01f IE |
537 | { |
538 | unsigned long addr; | |
d952b791 | 539 | int err = 0; |
31dbd01f | 540 | |
d952b791 | 541 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
9ba69294 HD |
542 | if (ksm_test_exit(vma->vm_mm)) |
543 | break; | |
d952b791 HD |
544 | if (signal_pending(current)) |
545 | err = -ERESTARTSYS; | |
546 | else | |
547 | err = break_ksm(vma, addr); | |
548 | } | |
549 | return err; | |
31dbd01f IE |
550 | } |
551 | ||
2ffd8679 HD |
552 | #ifdef CONFIG_SYSFS |
553 | /* | |
554 | * Only called through the sysfs control interface: | |
555 | */ | |
d952b791 | 556 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f IE |
557 | { |
558 | struct mm_slot *mm_slot; | |
559 | struct mm_struct *mm; | |
560 | struct vm_area_struct *vma; | |
d952b791 HD |
561 | int err = 0; |
562 | ||
563 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 564 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
d952b791 HD |
565 | struct mm_slot, mm_list); |
566 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f | 567 | |
9ba69294 HD |
568 | for (mm_slot = ksm_scan.mm_slot; |
569 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { | |
31dbd01f IE |
570 | mm = mm_slot->mm; |
571 | down_read(&mm->mmap_sem); | |
572 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
9ba69294 HD |
573 | if (ksm_test_exit(mm)) |
574 | break; | |
31dbd01f IE |
575 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
576 | continue; | |
d952b791 HD |
577 | err = unmerge_ksm_pages(vma, |
578 | vma->vm_start, vma->vm_end); | |
9ba69294 HD |
579 | if (err) |
580 | goto error; | |
31dbd01f | 581 | } |
9ba69294 | 582 | |
6514d511 | 583 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
d952b791 HD |
584 | |
585 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 586 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
d952b791 | 587 | struct mm_slot, mm_list); |
9ba69294 HD |
588 | if (ksm_test_exit(mm)) { |
589 | hlist_del(&mm_slot->link); | |
590 | list_del(&mm_slot->mm_list); | |
591 | spin_unlock(&ksm_mmlist_lock); | |
592 | ||
593 | free_mm_slot(mm_slot); | |
594 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
595 | up_read(&mm->mmap_sem); | |
596 | mmdrop(mm); | |
597 | } else { | |
598 | spin_unlock(&ksm_mmlist_lock); | |
599 | up_read(&mm->mmap_sem); | |
600 | } | |
31dbd01f IE |
601 | } |
602 | ||
d952b791 | 603 | ksm_scan.seqnr = 0; |
9ba69294 HD |
604 | return 0; |
605 | ||
606 | error: | |
607 | up_read(&mm->mmap_sem); | |
31dbd01f | 608 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 609 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 610 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 611 | return err; |
31dbd01f | 612 | } |
2ffd8679 | 613 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 614 | |
31dbd01f IE |
615 | static u32 calc_checksum(struct page *page) |
616 | { | |
617 | u32 checksum; | |
618 | void *addr = kmap_atomic(page, KM_USER0); | |
619 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); | |
620 | kunmap_atomic(addr, KM_USER0); | |
621 | return checksum; | |
622 | } | |
623 | ||
624 | static int memcmp_pages(struct page *page1, struct page *page2) | |
625 | { | |
626 | char *addr1, *addr2; | |
627 | int ret; | |
628 | ||
629 | addr1 = kmap_atomic(page1, KM_USER0); | |
630 | addr2 = kmap_atomic(page2, KM_USER1); | |
631 | ret = memcmp(addr1, addr2, PAGE_SIZE); | |
632 | kunmap_atomic(addr2, KM_USER1); | |
633 | kunmap_atomic(addr1, KM_USER0); | |
634 | return ret; | |
635 | } | |
636 | ||
637 | static inline int pages_identical(struct page *page1, struct page *page2) | |
638 | { | |
639 | return !memcmp_pages(page1, page2); | |
640 | } | |
641 | ||
642 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |
643 | pte_t *orig_pte) | |
644 | { | |
645 | struct mm_struct *mm = vma->vm_mm; | |
646 | unsigned long addr; | |
647 | pte_t *ptep; | |
648 | spinlock_t *ptl; | |
649 | int swapped; | |
650 | int err = -EFAULT; | |
651 | ||
652 | addr = page_address_in_vma(page, vma); | |
653 | if (addr == -EFAULT) | |
654 | goto out; | |
655 | ||
656 | ptep = page_check_address(page, mm, addr, &ptl, 0); | |
657 | if (!ptep) | |
658 | goto out; | |
659 | ||
660 | if (pte_write(*ptep)) { | |
661 | pte_t entry; | |
662 | ||
663 | swapped = PageSwapCache(page); | |
664 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
665 | /* | |
666 | * Ok this is tricky, when get_user_pages_fast() run it doesnt | |
667 | * take any lock, therefore the check that we are going to make | |
668 | * with the pagecount against the mapcount is racey and | |
669 | * O_DIRECT can happen right after the check. | |
670 | * So we clear the pte and flush the tlb before the check | |
671 | * this assure us that no O_DIRECT can happen after the check | |
672 | * or in the middle of the check. | |
673 | */ | |
674 | entry = ptep_clear_flush(vma, addr, ptep); | |
675 | /* | |
676 | * Check that no O_DIRECT or similar I/O is in progress on the | |
677 | * page | |
678 | */ | |
31e855ea | 679 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
31dbd01f IE |
680 | set_pte_at_notify(mm, addr, ptep, entry); |
681 | goto out_unlock; | |
682 | } | |
683 | entry = pte_wrprotect(entry); | |
684 | set_pte_at_notify(mm, addr, ptep, entry); | |
685 | } | |
686 | *orig_pte = *ptep; | |
687 | err = 0; | |
688 | ||
689 | out_unlock: | |
690 | pte_unmap_unlock(ptep, ptl); | |
691 | out: | |
692 | return err; | |
693 | } | |
694 | ||
695 | /** | |
696 | * replace_page - replace page in vma by new ksm page | |
8dd3557a HD |
697 | * @vma: vma that holds the pte pointing to page |
698 | * @page: the page we are replacing by kpage | |
699 | * @kpage: the ksm page we replace page by | |
31dbd01f IE |
700 | * @orig_pte: the original value of the pte |
701 | * | |
702 | * Returns 0 on success, -EFAULT on failure. | |
703 | */ | |
8dd3557a HD |
704 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
705 | struct page *kpage, pte_t orig_pte) | |
31dbd01f IE |
706 | { |
707 | struct mm_struct *mm = vma->vm_mm; | |
708 | pgd_t *pgd; | |
709 | pud_t *pud; | |
710 | pmd_t *pmd; | |
711 | pte_t *ptep; | |
712 | spinlock_t *ptl; | |
713 | unsigned long addr; | |
31dbd01f IE |
714 | int err = -EFAULT; |
715 | ||
8dd3557a | 716 | addr = page_address_in_vma(page, vma); |
31dbd01f IE |
717 | if (addr == -EFAULT) |
718 | goto out; | |
719 | ||
720 | pgd = pgd_offset(mm, addr); | |
721 | if (!pgd_present(*pgd)) | |
722 | goto out; | |
723 | ||
724 | pud = pud_offset(pgd, addr); | |
725 | if (!pud_present(*pud)) | |
726 | goto out; | |
727 | ||
728 | pmd = pmd_offset(pud, addr); | |
729 | if (!pmd_present(*pmd)) | |
730 | goto out; | |
731 | ||
732 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); | |
733 | if (!pte_same(*ptep, orig_pte)) { | |
734 | pte_unmap_unlock(ptep, ptl); | |
735 | goto out; | |
736 | } | |
737 | ||
8dd3557a | 738 | get_page(kpage); |
5ad64688 | 739 | page_add_anon_rmap(kpage, vma, addr); |
31dbd01f IE |
740 | |
741 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
742 | ptep_clear_flush(vma, addr, ptep); | |
8dd3557a | 743 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
31dbd01f | 744 | |
8dd3557a HD |
745 | page_remove_rmap(page); |
746 | put_page(page); | |
31dbd01f IE |
747 | |
748 | pte_unmap_unlock(ptep, ptl); | |
749 | err = 0; | |
750 | out: | |
751 | return err; | |
752 | } | |
753 | ||
754 | /* | |
755 | * try_to_merge_one_page - take two pages and merge them into one | |
8dd3557a HD |
756 | * @vma: the vma that holds the pte pointing to page |
757 | * @page: the PageAnon page that we want to replace with kpage | |
08beca44 | 758 | * @kpage: the PageKsm page that we want to map instead of page |
31dbd01f IE |
759 | * |
760 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
761 | */ | |
762 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
8dd3557a | 763 | struct page *page, struct page *kpage) |
31dbd01f IE |
764 | { |
765 | pte_t orig_pte = __pte(0); | |
766 | int err = -EFAULT; | |
767 | ||
db114b83 HD |
768 | if (page == kpage) /* ksm page forked */ |
769 | return 0; | |
770 | ||
31dbd01f IE |
771 | if (!(vma->vm_flags & VM_MERGEABLE)) |
772 | goto out; | |
8dd3557a | 773 | if (!PageAnon(page)) |
31dbd01f IE |
774 | goto out; |
775 | ||
31dbd01f IE |
776 | /* |
777 | * We need the page lock to read a stable PageSwapCache in | |
778 | * write_protect_page(). We use trylock_page() instead of | |
779 | * lock_page() because we don't want to wait here - we | |
780 | * prefer to continue scanning and merging different pages, | |
781 | * then come back to this page when it is unlocked. | |
782 | */ | |
8dd3557a | 783 | if (!trylock_page(page)) |
31e855ea | 784 | goto out; |
31dbd01f IE |
785 | /* |
786 | * If this anonymous page is mapped only here, its pte may need | |
787 | * to be write-protected. If it's mapped elsewhere, all of its | |
788 | * ptes are necessarily already write-protected. But in either | |
789 | * case, we need to lock and check page_count is not raised. | |
790 | */ | |
8dd3557a HD |
791 | if (write_protect_page(vma, page, &orig_pte) == 0 && |
792 | pages_identical(page, kpage)) | |
793 | err = replace_page(vma, page, kpage, orig_pte); | |
31dbd01f | 794 | |
5ad64688 | 795 | if ((vma->vm_flags & VM_LOCKED) && !err) { |
73848b46 | 796 | munlock_vma_page(page); |
5ad64688 HD |
797 | if (!PageMlocked(kpage)) { |
798 | unlock_page(page); | |
799 | lru_add_drain(); | |
800 | lock_page(kpage); | |
801 | mlock_vma_page(kpage); | |
802 | page = kpage; /* for final unlock */ | |
803 | } | |
804 | } | |
73848b46 | 805 | |
8dd3557a | 806 | unlock_page(page); |
31dbd01f IE |
807 | out: |
808 | return err; | |
809 | } | |
810 | ||
81464e30 HD |
811 | /* |
812 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
813 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
8dd3557a HD |
814 | * |
815 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
81464e30 | 816 | */ |
8dd3557a HD |
817 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
818 | struct page *page, struct page *kpage) | |
81464e30 | 819 | { |
8dd3557a | 820 | struct mm_struct *mm = rmap_item->mm; |
81464e30 HD |
821 | struct vm_area_struct *vma; |
822 | int err = -EFAULT; | |
823 | ||
8dd3557a HD |
824 | down_read(&mm->mmap_sem); |
825 | if (ksm_test_exit(mm)) | |
9ba69294 | 826 | goto out; |
8dd3557a HD |
827 | vma = find_vma(mm, rmap_item->address); |
828 | if (!vma || vma->vm_start > rmap_item->address) | |
81464e30 HD |
829 | goto out; |
830 | ||
8dd3557a | 831 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
832 | if (err) |
833 | goto out; | |
834 | ||
835 | /* Must get reference to anon_vma while still holding mmap_sem */ | |
836 | hold_anon_vma(rmap_item, vma->anon_vma); | |
81464e30 | 837 | out: |
8dd3557a | 838 | up_read(&mm->mmap_sem); |
81464e30 HD |
839 | return err; |
840 | } | |
841 | ||
31dbd01f IE |
842 | /* |
843 | * try_to_merge_two_pages - take two identical pages and prepare them | |
844 | * to be merged into one page. | |
845 | * | |
8dd3557a HD |
846 | * This function returns the kpage if we successfully merged two identical |
847 | * pages into one ksm page, NULL otherwise. | |
31dbd01f IE |
848 | * |
849 | * Note that this function allocates a new kernel page: if one of the pages | |
850 | * is already a ksm page, try_to_merge_with_ksm_page should be used. | |
851 | */ | |
8dd3557a HD |
852 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
853 | struct page *page, | |
854 | struct rmap_item *tree_rmap_item, | |
855 | struct page *tree_page) | |
31dbd01f | 856 | { |
8dd3557a | 857 | struct mm_struct *mm = rmap_item->mm; |
31dbd01f IE |
858 | struct vm_area_struct *vma; |
859 | struct page *kpage; | |
860 | int err = -EFAULT; | |
861 | ||
862 | /* | |
863 | * The number of nodes in the stable tree | |
864 | * is the number of kernel pages that we hold. | |
865 | */ | |
866 | if (ksm_max_kernel_pages && | |
b4028260 | 867 | ksm_max_kernel_pages <= ksm_pages_shared) |
8dd3557a | 868 | return NULL; |
31dbd01f IE |
869 | |
870 | kpage = alloc_page(GFP_HIGHUSER); | |
871 | if (!kpage) | |
8dd3557a | 872 | return NULL; |
31dbd01f | 873 | |
8dd3557a HD |
874 | down_read(&mm->mmap_sem); |
875 | if (ksm_test_exit(mm)) | |
876 | goto up; | |
877 | vma = find_vma(mm, rmap_item->address); | |
878 | if (!vma || vma->vm_start > rmap_item->address) | |
879 | goto up; | |
880 | ||
881 | copy_user_highpage(kpage, page, rmap_item->address, vma); | |
08beca44 | 882 | |
5ad64688 HD |
883 | SetPageDirty(kpage); |
884 | __SetPageUptodate(kpage); | |
885 | SetPageSwapBacked(kpage); | |
08beca44 | 886 | set_page_stable_node(kpage, NULL); /* mark it PageKsm */ |
5ad64688 | 887 | lru_cache_add_lru(kpage, LRU_ACTIVE_ANON); |
08beca44 | 888 | |
8dd3557a | 889 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
890 | if (err) |
891 | goto up; | |
892 | ||
893 | /* Must get reference to anon_vma while still holding mmap_sem */ | |
894 | hold_anon_vma(rmap_item, vma->anon_vma); | |
8dd3557a HD |
895 | up: |
896 | up_read(&mm->mmap_sem); | |
31dbd01f IE |
897 | |
898 | if (!err) { | |
8dd3557a HD |
899 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
900 | tree_page, kpage); | |
31dbd01f | 901 | /* |
81464e30 HD |
902 | * If that fails, we have a ksm page with only one pte |
903 | * pointing to it: so break it. | |
31dbd01f | 904 | */ |
db114b83 HD |
905 | if (err) { |
906 | drop_anon_vma(rmap_item); | |
8dd3557a | 907 | break_cow(rmap_item); |
db114b83 | 908 | } |
31dbd01f | 909 | } |
8dd3557a HD |
910 | if (err) { |
911 | put_page(kpage); | |
912 | kpage = NULL; | |
913 | } | |
914 | return kpage; | |
31dbd01f IE |
915 | } |
916 | ||
31dbd01f | 917 | /* |
8dd3557a | 918 | * stable_tree_search - search for page inside the stable tree |
31dbd01f IE |
919 | * |
920 | * This function checks if there is a page inside the stable tree | |
921 | * with identical content to the page that we are scanning right now. | |
922 | * | |
7b6ba2c7 | 923 | * This function returns the stable tree node of identical content if found, |
31dbd01f IE |
924 | * NULL otherwise. |
925 | */ | |
08beca44 | 926 | static struct stable_node *stable_tree_search(struct page *page) |
31dbd01f IE |
927 | { |
928 | struct rb_node *node = root_stable_tree.rb_node; | |
7b6ba2c7 | 929 | struct stable_node *stable_node; |
31dbd01f | 930 | |
08beca44 HD |
931 | stable_node = page_stable_node(page); |
932 | if (stable_node) { /* ksm page forked */ | |
933 | get_page(page); | |
934 | return stable_node; | |
935 | } | |
936 | ||
31dbd01f | 937 | while (node) { |
31dbd01f IE |
938 | int ret; |
939 | ||
08beca44 | 940 | cond_resched(); |
7b6ba2c7 | 941 | stable_node = rb_entry(node, struct stable_node, node); |
31dbd01f | 942 | |
08beca44 | 943 | ret = memcmp_pages(page, stable_node->page); |
31dbd01f | 944 | |
08beca44 | 945 | if (ret < 0) |
31dbd01f | 946 | node = node->rb_left; |
08beca44 | 947 | else if (ret > 0) |
31dbd01f | 948 | node = node->rb_right; |
08beca44 HD |
949 | else { |
950 | get_page(stable_node->page); | |
7b6ba2c7 | 951 | return stable_node; |
31dbd01f IE |
952 | } |
953 | } | |
954 | ||
955 | return NULL; | |
956 | } | |
957 | ||
958 | /* | |
959 | * stable_tree_insert - insert rmap_item pointing to new ksm page | |
960 | * into the stable tree. | |
961 | * | |
7b6ba2c7 HD |
962 | * This function returns the stable tree node just allocated on success, |
963 | * NULL otherwise. | |
31dbd01f | 964 | */ |
7b6ba2c7 | 965 | static struct stable_node *stable_tree_insert(struct page *kpage) |
31dbd01f IE |
966 | { |
967 | struct rb_node **new = &root_stable_tree.rb_node; | |
968 | struct rb_node *parent = NULL; | |
7b6ba2c7 | 969 | struct stable_node *stable_node; |
31dbd01f IE |
970 | |
971 | while (*new) { | |
31dbd01f IE |
972 | int ret; |
973 | ||
08beca44 | 974 | cond_resched(); |
7b6ba2c7 | 975 | stable_node = rb_entry(*new, struct stable_node, node); |
31dbd01f | 976 | |
08beca44 | 977 | ret = memcmp_pages(kpage, stable_node->page); |
31dbd01f IE |
978 | |
979 | parent = *new; | |
980 | if (ret < 0) | |
981 | new = &parent->rb_left; | |
982 | else if (ret > 0) | |
983 | new = &parent->rb_right; | |
984 | else { | |
985 | /* | |
986 | * It is not a bug that stable_tree_search() didn't | |
987 | * find this node: because at that time our page was | |
988 | * not yet write-protected, so may have changed since. | |
989 | */ | |
990 | return NULL; | |
991 | } | |
992 | } | |
993 | ||
7b6ba2c7 HD |
994 | stable_node = alloc_stable_node(); |
995 | if (!stable_node) | |
996 | return NULL; | |
31dbd01f | 997 | |
7b6ba2c7 HD |
998 | rb_link_node(&stable_node->node, parent, new); |
999 | rb_insert_color(&stable_node->node, &root_stable_tree); | |
1000 | ||
1001 | INIT_HLIST_HEAD(&stable_node->hlist); | |
1002 | ||
08beca44 HD |
1003 | get_page(kpage); |
1004 | stable_node->page = kpage; | |
1005 | set_page_stable_node(kpage, stable_node); | |
1006 | ||
7b6ba2c7 | 1007 | return stable_node; |
31dbd01f IE |
1008 | } |
1009 | ||
1010 | /* | |
8dd3557a HD |
1011 | * unstable_tree_search_insert - search for identical page, |
1012 | * else insert rmap_item into the unstable tree. | |
31dbd01f IE |
1013 | * |
1014 | * This function searches for a page in the unstable tree identical to the | |
1015 | * page currently being scanned; and if no identical page is found in the | |
1016 | * tree, we insert rmap_item as a new object into the unstable tree. | |
1017 | * | |
1018 | * This function returns pointer to rmap_item found to be identical | |
1019 | * to the currently scanned page, NULL otherwise. | |
1020 | * | |
1021 | * This function does both searching and inserting, because they share | |
1022 | * the same walking algorithm in an rbtree. | |
1023 | */ | |
8dd3557a HD |
1024 | static |
1025 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |
1026 | struct page *page, | |
1027 | struct page **tree_pagep) | |
1028 | ||
31dbd01f IE |
1029 | { |
1030 | struct rb_node **new = &root_unstable_tree.rb_node; | |
1031 | struct rb_node *parent = NULL; | |
1032 | ||
1033 | while (*new) { | |
1034 | struct rmap_item *tree_rmap_item; | |
8dd3557a | 1035 | struct page *tree_page; |
31dbd01f IE |
1036 | int ret; |
1037 | ||
d178f27f | 1038 | cond_resched(); |
31dbd01f | 1039 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
8dd3557a HD |
1040 | tree_page = get_mergeable_page(tree_rmap_item); |
1041 | if (!tree_page) | |
31dbd01f IE |
1042 | return NULL; |
1043 | ||
1044 | /* | |
8dd3557a | 1045 | * Don't substitute a ksm page for a forked page. |
31dbd01f | 1046 | */ |
8dd3557a HD |
1047 | if (page == tree_page) { |
1048 | put_page(tree_page); | |
31dbd01f IE |
1049 | return NULL; |
1050 | } | |
1051 | ||
8dd3557a | 1052 | ret = memcmp_pages(page, tree_page); |
31dbd01f IE |
1053 | |
1054 | parent = *new; | |
1055 | if (ret < 0) { | |
8dd3557a | 1056 | put_page(tree_page); |
31dbd01f IE |
1057 | new = &parent->rb_left; |
1058 | } else if (ret > 0) { | |
8dd3557a | 1059 | put_page(tree_page); |
31dbd01f IE |
1060 | new = &parent->rb_right; |
1061 | } else { | |
8dd3557a | 1062 | *tree_pagep = tree_page; |
31dbd01f IE |
1063 | return tree_rmap_item; |
1064 | } | |
1065 | } | |
1066 | ||
7b6ba2c7 | 1067 | rmap_item->address |= UNSTABLE_FLAG; |
31dbd01f IE |
1068 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
1069 | rb_link_node(&rmap_item->node, parent, new); | |
1070 | rb_insert_color(&rmap_item->node, &root_unstable_tree); | |
1071 | ||
473b0ce4 | 1072 | ksm_pages_unshared++; |
31dbd01f IE |
1073 | return NULL; |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * stable_tree_append - add another rmap_item to the linked list of | |
1078 | * rmap_items hanging off a given node of the stable tree, all sharing | |
1079 | * the same ksm page. | |
1080 | */ | |
1081 | static void stable_tree_append(struct rmap_item *rmap_item, | |
7b6ba2c7 | 1082 | struct stable_node *stable_node) |
31dbd01f | 1083 | { |
7b6ba2c7 | 1084 | rmap_item->head = stable_node; |
31dbd01f | 1085 | rmap_item->address |= STABLE_FLAG; |
7b6ba2c7 | 1086 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
e178dfde | 1087 | |
7b6ba2c7 HD |
1088 | if (rmap_item->hlist.next) |
1089 | ksm_pages_sharing++; | |
1090 | else | |
1091 | ksm_pages_shared++; | |
31dbd01f IE |
1092 | } |
1093 | ||
1094 | /* | |
81464e30 HD |
1095 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1096 | * if not, compare checksum to previous and if it's the same, see if page can | |
1097 | * be inserted into the unstable tree, or merged with a page already there and | |
1098 | * both transferred to the stable tree. | |
31dbd01f IE |
1099 | * |
1100 | * @page: the page that we are searching identical page to. | |
1101 | * @rmap_item: the reverse mapping into the virtual address of this page | |
1102 | */ | |
1103 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |
1104 | { | |
31dbd01f | 1105 | struct rmap_item *tree_rmap_item; |
8dd3557a | 1106 | struct page *tree_page = NULL; |
7b6ba2c7 | 1107 | struct stable_node *stable_node; |
8dd3557a | 1108 | struct page *kpage; |
31dbd01f IE |
1109 | unsigned int checksum; |
1110 | int err; | |
1111 | ||
93d17715 | 1112 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1113 | |
1114 | /* We first start with searching the page inside the stable tree */ | |
08beca44 | 1115 | stable_node = stable_tree_search(page); |
7b6ba2c7 | 1116 | if (stable_node) { |
08beca44 HD |
1117 | kpage = stable_node->page; |
1118 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); | |
31dbd01f IE |
1119 | if (!err) { |
1120 | /* | |
1121 | * The page was successfully merged: | |
1122 | * add its rmap_item to the stable tree. | |
1123 | */ | |
5ad64688 | 1124 | lock_page(kpage); |
7b6ba2c7 | 1125 | stable_tree_append(rmap_item, stable_node); |
5ad64688 | 1126 | unlock_page(kpage); |
31dbd01f | 1127 | } |
8dd3557a | 1128 | put_page(kpage); |
31dbd01f IE |
1129 | return; |
1130 | } | |
1131 | ||
1132 | /* | |
1133 | * A ksm page might have got here by fork, but its other | |
1134 | * references have already been removed from the stable tree. | |
d952b791 HD |
1135 | * Or it might be left over from a break_ksm which failed |
1136 | * when the mem_cgroup had reached its limit: try again now. | |
31dbd01f IE |
1137 | */ |
1138 | if (PageKsm(page)) | |
8dd3557a | 1139 | break_cow(rmap_item); |
31dbd01f IE |
1140 | |
1141 | /* | |
1142 | * In case the hash value of the page was changed from the last time we | |
1143 | * have calculated it, this page to be changed frequely, therefore we | |
1144 | * don't want to insert it to the unstable tree, and we don't want to | |
1145 | * waste our time to search if there is something identical to it there. | |
1146 | */ | |
1147 | checksum = calc_checksum(page); | |
1148 | if (rmap_item->oldchecksum != checksum) { | |
1149 | rmap_item->oldchecksum = checksum; | |
1150 | return; | |
1151 | } | |
1152 | ||
8dd3557a HD |
1153 | tree_rmap_item = |
1154 | unstable_tree_search_insert(rmap_item, page, &tree_page); | |
31dbd01f | 1155 | if (tree_rmap_item) { |
8dd3557a HD |
1156 | kpage = try_to_merge_two_pages(rmap_item, page, |
1157 | tree_rmap_item, tree_page); | |
1158 | put_page(tree_page); | |
31dbd01f IE |
1159 | /* |
1160 | * As soon as we merge this page, we want to remove the | |
1161 | * rmap_item of the page we have merged with from the unstable | |
1162 | * tree, and insert it instead as new node in the stable tree. | |
1163 | */ | |
8dd3557a | 1164 | if (kpage) { |
93d17715 | 1165 | remove_rmap_item_from_tree(tree_rmap_item); |
473b0ce4 | 1166 | |
5ad64688 | 1167 | lock_page(kpage); |
7b6ba2c7 HD |
1168 | stable_node = stable_tree_insert(kpage); |
1169 | if (stable_node) { | |
1170 | stable_tree_append(tree_rmap_item, stable_node); | |
1171 | stable_tree_append(rmap_item, stable_node); | |
1172 | } | |
5ad64688 | 1173 | unlock_page(kpage); |
7b6ba2c7 HD |
1174 | put_page(kpage); |
1175 | ||
31dbd01f IE |
1176 | /* |
1177 | * If we fail to insert the page into the stable tree, | |
1178 | * we will have 2 virtual addresses that are pointing | |
1179 | * to a ksm page left outside the stable tree, | |
1180 | * in which case we need to break_cow on both. | |
1181 | */ | |
7b6ba2c7 | 1182 | if (!stable_node) { |
db114b83 | 1183 | drop_anon_vma(tree_rmap_item); |
8dd3557a | 1184 | break_cow(tree_rmap_item); |
db114b83 | 1185 | drop_anon_vma(rmap_item); |
8dd3557a | 1186 | break_cow(rmap_item); |
31dbd01f IE |
1187 | } |
1188 | } | |
31dbd01f IE |
1189 | } |
1190 | } | |
1191 | ||
1192 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | |
6514d511 | 1193 | struct rmap_item **rmap_list, |
31dbd01f IE |
1194 | unsigned long addr) |
1195 | { | |
1196 | struct rmap_item *rmap_item; | |
1197 | ||
6514d511 HD |
1198 | while (*rmap_list) { |
1199 | rmap_item = *rmap_list; | |
93d17715 | 1200 | if ((rmap_item->address & PAGE_MASK) == addr) |
31dbd01f | 1201 | return rmap_item; |
31dbd01f IE |
1202 | if (rmap_item->address > addr) |
1203 | break; | |
6514d511 | 1204 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 1205 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1206 | free_rmap_item(rmap_item); |
1207 | } | |
1208 | ||
1209 | rmap_item = alloc_rmap_item(); | |
1210 | if (rmap_item) { | |
1211 | /* It has already been zeroed */ | |
1212 | rmap_item->mm = mm_slot->mm; | |
1213 | rmap_item->address = addr; | |
6514d511 HD |
1214 | rmap_item->rmap_list = *rmap_list; |
1215 | *rmap_list = rmap_item; | |
31dbd01f IE |
1216 | } |
1217 | return rmap_item; | |
1218 | } | |
1219 | ||
1220 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | |
1221 | { | |
1222 | struct mm_struct *mm; | |
1223 | struct mm_slot *slot; | |
1224 | struct vm_area_struct *vma; | |
1225 | struct rmap_item *rmap_item; | |
1226 | ||
1227 | if (list_empty(&ksm_mm_head.mm_list)) | |
1228 | return NULL; | |
1229 | ||
1230 | slot = ksm_scan.mm_slot; | |
1231 | if (slot == &ksm_mm_head) { | |
1232 | root_unstable_tree = RB_ROOT; | |
1233 | ||
1234 | spin_lock(&ksm_mmlist_lock); | |
1235 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | |
1236 | ksm_scan.mm_slot = slot; | |
1237 | spin_unlock(&ksm_mmlist_lock); | |
1238 | next_mm: | |
1239 | ksm_scan.address = 0; | |
6514d511 | 1240 | ksm_scan.rmap_list = &slot->rmap_list; |
31dbd01f IE |
1241 | } |
1242 | ||
1243 | mm = slot->mm; | |
1244 | down_read(&mm->mmap_sem); | |
9ba69294 HD |
1245 | if (ksm_test_exit(mm)) |
1246 | vma = NULL; | |
1247 | else | |
1248 | vma = find_vma(mm, ksm_scan.address); | |
1249 | ||
1250 | for (; vma; vma = vma->vm_next) { | |
31dbd01f IE |
1251 | if (!(vma->vm_flags & VM_MERGEABLE)) |
1252 | continue; | |
1253 | if (ksm_scan.address < vma->vm_start) | |
1254 | ksm_scan.address = vma->vm_start; | |
1255 | if (!vma->anon_vma) | |
1256 | ksm_scan.address = vma->vm_end; | |
1257 | ||
1258 | while (ksm_scan.address < vma->vm_end) { | |
9ba69294 HD |
1259 | if (ksm_test_exit(mm)) |
1260 | break; | |
31dbd01f IE |
1261 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1262 | if (*page && PageAnon(*page)) { | |
1263 | flush_anon_page(vma, *page, ksm_scan.address); | |
1264 | flush_dcache_page(*page); | |
1265 | rmap_item = get_next_rmap_item(slot, | |
6514d511 | 1266 | ksm_scan.rmap_list, ksm_scan.address); |
31dbd01f | 1267 | if (rmap_item) { |
6514d511 HD |
1268 | ksm_scan.rmap_list = |
1269 | &rmap_item->rmap_list; | |
31dbd01f IE |
1270 | ksm_scan.address += PAGE_SIZE; |
1271 | } else | |
1272 | put_page(*page); | |
1273 | up_read(&mm->mmap_sem); | |
1274 | return rmap_item; | |
1275 | } | |
1276 | if (*page) | |
1277 | put_page(*page); | |
1278 | ksm_scan.address += PAGE_SIZE; | |
1279 | cond_resched(); | |
1280 | } | |
1281 | } | |
1282 | ||
9ba69294 HD |
1283 | if (ksm_test_exit(mm)) { |
1284 | ksm_scan.address = 0; | |
6514d511 | 1285 | ksm_scan.rmap_list = &slot->rmap_list; |
9ba69294 | 1286 | } |
31dbd01f IE |
1287 | /* |
1288 | * Nuke all the rmap_items that are above this current rmap: | |
1289 | * because there were no VM_MERGEABLE vmas with such addresses. | |
1290 | */ | |
6514d511 | 1291 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
31dbd01f IE |
1292 | |
1293 | spin_lock(&ksm_mmlist_lock); | |
cd551f97 HD |
1294 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1295 | struct mm_slot, mm_list); | |
1296 | if (ksm_scan.address == 0) { | |
1297 | /* | |
1298 | * We've completed a full scan of all vmas, holding mmap_sem | |
1299 | * throughout, and found no VM_MERGEABLE: so do the same as | |
1300 | * __ksm_exit does to remove this mm from all our lists now. | |
9ba69294 HD |
1301 | * This applies either when cleaning up after __ksm_exit |
1302 | * (but beware: we can reach here even before __ksm_exit), | |
1303 | * or when all VM_MERGEABLE areas have been unmapped (and | |
1304 | * mmap_sem then protects against race with MADV_MERGEABLE). | |
cd551f97 HD |
1305 | */ |
1306 | hlist_del(&slot->link); | |
1307 | list_del(&slot->mm_list); | |
9ba69294 HD |
1308 | spin_unlock(&ksm_mmlist_lock); |
1309 | ||
cd551f97 HD |
1310 | free_mm_slot(slot); |
1311 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
9ba69294 HD |
1312 | up_read(&mm->mmap_sem); |
1313 | mmdrop(mm); | |
1314 | } else { | |
1315 | spin_unlock(&ksm_mmlist_lock); | |
1316 | up_read(&mm->mmap_sem); | |
cd551f97 | 1317 | } |
31dbd01f IE |
1318 | |
1319 | /* Repeat until we've completed scanning the whole list */ | |
cd551f97 | 1320 | slot = ksm_scan.mm_slot; |
31dbd01f IE |
1321 | if (slot != &ksm_mm_head) |
1322 | goto next_mm; | |
1323 | ||
31dbd01f IE |
1324 | ksm_scan.seqnr++; |
1325 | return NULL; | |
1326 | } | |
1327 | ||
1328 | /** | |
1329 | * ksm_do_scan - the ksm scanner main worker function. | |
1330 | * @scan_npages - number of pages we want to scan before we return. | |
1331 | */ | |
1332 | static void ksm_do_scan(unsigned int scan_npages) | |
1333 | { | |
1334 | struct rmap_item *rmap_item; | |
1335 | struct page *page; | |
1336 | ||
1337 | while (scan_npages--) { | |
1338 | cond_resched(); | |
1339 | rmap_item = scan_get_next_rmap_item(&page); | |
1340 | if (!rmap_item) | |
1341 | return; | |
1342 | if (!PageKsm(page) || !in_stable_tree(rmap_item)) | |
1343 | cmp_and_merge_page(page, rmap_item); | |
1344 | put_page(page); | |
1345 | } | |
1346 | } | |
1347 | ||
6e158384 HD |
1348 | static int ksmd_should_run(void) |
1349 | { | |
1350 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | |
1351 | } | |
1352 | ||
31dbd01f IE |
1353 | static int ksm_scan_thread(void *nothing) |
1354 | { | |
339aa624 | 1355 | set_user_nice(current, 5); |
31dbd01f IE |
1356 | |
1357 | while (!kthread_should_stop()) { | |
6e158384 HD |
1358 | mutex_lock(&ksm_thread_mutex); |
1359 | if (ksmd_should_run()) | |
31dbd01f | 1360 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
1361 | mutex_unlock(&ksm_thread_mutex); |
1362 | ||
1363 | if (ksmd_should_run()) { | |
31dbd01f IE |
1364 | schedule_timeout_interruptible( |
1365 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); | |
1366 | } else { | |
1367 | wait_event_interruptible(ksm_thread_wait, | |
6e158384 | 1368 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
1369 | } |
1370 | } | |
1371 | return 0; | |
1372 | } | |
1373 | ||
f8af4da3 HD |
1374 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1375 | unsigned long end, int advice, unsigned long *vm_flags) | |
1376 | { | |
1377 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 1378 | int err; |
f8af4da3 HD |
1379 | |
1380 | switch (advice) { | |
1381 | case MADV_MERGEABLE: | |
1382 | /* | |
1383 | * Be somewhat over-protective for now! | |
1384 | */ | |
1385 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | | |
1386 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | | |
1387 | VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | | |
5ad64688 | 1388 | VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) |
f8af4da3 HD |
1389 | return 0; /* just ignore the advice */ |
1390 | ||
d952b791 HD |
1391 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1392 | err = __ksm_enter(mm); | |
1393 | if (err) | |
1394 | return err; | |
1395 | } | |
f8af4da3 HD |
1396 | |
1397 | *vm_flags |= VM_MERGEABLE; | |
1398 | break; | |
1399 | ||
1400 | case MADV_UNMERGEABLE: | |
1401 | if (!(*vm_flags & VM_MERGEABLE)) | |
1402 | return 0; /* just ignore the advice */ | |
1403 | ||
d952b791 HD |
1404 | if (vma->anon_vma) { |
1405 | err = unmerge_ksm_pages(vma, start, end); | |
1406 | if (err) | |
1407 | return err; | |
1408 | } | |
f8af4da3 HD |
1409 | |
1410 | *vm_flags &= ~VM_MERGEABLE; | |
1411 | break; | |
1412 | } | |
1413 | ||
1414 | return 0; | |
1415 | } | |
1416 | ||
1417 | int __ksm_enter(struct mm_struct *mm) | |
1418 | { | |
6e158384 HD |
1419 | struct mm_slot *mm_slot; |
1420 | int needs_wakeup; | |
1421 | ||
1422 | mm_slot = alloc_mm_slot(); | |
31dbd01f IE |
1423 | if (!mm_slot) |
1424 | return -ENOMEM; | |
1425 | ||
6e158384 HD |
1426 | /* Check ksm_run too? Would need tighter locking */ |
1427 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); | |
1428 | ||
31dbd01f IE |
1429 | spin_lock(&ksm_mmlist_lock); |
1430 | insert_to_mm_slots_hash(mm, mm_slot); | |
1431 | /* | |
1432 | * Insert just behind the scanning cursor, to let the area settle | |
1433 | * down a little; when fork is followed by immediate exec, we don't | |
1434 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
1435 | */ | |
1436 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | |
1437 | spin_unlock(&ksm_mmlist_lock); | |
1438 | ||
f8af4da3 | 1439 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
9ba69294 | 1440 | atomic_inc(&mm->mm_count); |
6e158384 HD |
1441 | |
1442 | if (needs_wakeup) | |
1443 | wake_up_interruptible(&ksm_thread_wait); | |
1444 | ||
f8af4da3 HD |
1445 | return 0; |
1446 | } | |
1447 | ||
1c2fb7a4 | 1448 | void __ksm_exit(struct mm_struct *mm) |
f8af4da3 | 1449 | { |
cd551f97 | 1450 | struct mm_slot *mm_slot; |
9ba69294 | 1451 | int easy_to_free = 0; |
cd551f97 | 1452 | |
31dbd01f | 1453 | /* |
9ba69294 HD |
1454 | * This process is exiting: if it's straightforward (as is the |
1455 | * case when ksmd was never running), free mm_slot immediately. | |
1456 | * But if it's at the cursor or has rmap_items linked to it, use | |
1457 | * mmap_sem to synchronize with any break_cows before pagetables | |
1458 | * are freed, and leave the mm_slot on the list for ksmd to free. | |
1459 | * Beware: ksm may already have noticed it exiting and freed the slot. | |
31dbd01f | 1460 | */ |
9ba69294 | 1461 | |
cd551f97 HD |
1462 | spin_lock(&ksm_mmlist_lock); |
1463 | mm_slot = get_mm_slot(mm); | |
9ba69294 | 1464 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
6514d511 | 1465 | if (!mm_slot->rmap_list) { |
9ba69294 HD |
1466 | hlist_del(&mm_slot->link); |
1467 | list_del(&mm_slot->mm_list); | |
1468 | easy_to_free = 1; | |
1469 | } else { | |
1470 | list_move(&mm_slot->mm_list, | |
1471 | &ksm_scan.mm_slot->mm_list); | |
1472 | } | |
cd551f97 | 1473 | } |
cd551f97 HD |
1474 | spin_unlock(&ksm_mmlist_lock); |
1475 | ||
9ba69294 HD |
1476 | if (easy_to_free) { |
1477 | free_mm_slot(mm_slot); | |
1478 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
1479 | mmdrop(mm); | |
1480 | } else if (mm_slot) { | |
9ba69294 HD |
1481 | down_write(&mm->mmap_sem); |
1482 | up_write(&mm->mmap_sem); | |
9ba69294 | 1483 | } |
31dbd01f IE |
1484 | } |
1485 | ||
5ad64688 HD |
1486 | struct page *ksm_does_need_to_copy(struct page *page, |
1487 | struct vm_area_struct *vma, unsigned long address) | |
1488 | { | |
1489 | struct page *new_page; | |
1490 | ||
1491 | unlock_page(page); /* any racers will COW it, not modify it */ | |
1492 | ||
1493 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | |
1494 | if (new_page) { | |
1495 | copy_user_highpage(new_page, page, address, vma); | |
1496 | ||
1497 | SetPageDirty(new_page); | |
1498 | __SetPageUptodate(new_page); | |
1499 | SetPageSwapBacked(new_page); | |
1500 | __set_page_locked(new_page); | |
1501 | ||
1502 | if (page_evictable(new_page, vma)) | |
1503 | lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); | |
1504 | else | |
1505 | add_page_to_unevictable_list(new_page); | |
1506 | } | |
1507 | ||
1508 | page_cache_release(page); | |
1509 | return new_page; | |
1510 | } | |
1511 | ||
1512 | int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, | |
1513 | unsigned long *vm_flags) | |
1514 | { | |
1515 | struct stable_node *stable_node; | |
1516 | struct rmap_item *rmap_item; | |
1517 | struct hlist_node *hlist; | |
1518 | unsigned int mapcount = page_mapcount(page); | |
1519 | int referenced = 0; | |
db114b83 | 1520 | int search_new_forks = 0; |
5ad64688 HD |
1521 | |
1522 | VM_BUG_ON(!PageKsm(page)); | |
1523 | VM_BUG_ON(!PageLocked(page)); | |
1524 | ||
1525 | stable_node = page_stable_node(page); | |
1526 | if (!stable_node) | |
1527 | return 0; | |
db114b83 | 1528 | again: |
5ad64688 | 1529 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
db114b83 HD |
1530 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
1531 | struct vm_area_struct *vma; | |
5ad64688 | 1532 | |
db114b83 HD |
1533 | spin_lock(&anon_vma->lock); |
1534 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | |
1535 | if (rmap_item->address < vma->vm_start || | |
1536 | rmap_item->address >= vma->vm_end) | |
1537 | continue; | |
1538 | /* | |
1539 | * Initially we examine only the vma which covers this | |
1540 | * rmap_item; but later, if there is still work to do, | |
1541 | * we examine covering vmas in other mms: in case they | |
1542 | * were forked from the original since ksmd passed. | |
1543 | */ | |
1544 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1545 | continue; | |
1546 | ||
1547 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | |
1548 | continue; | |
5ad64688 | 1549 | |
db114b83 | 1550 | referenced += page_referenced_one(page, vma, |
5ad64688 | 1551 | rmap_item->address, &mapcount, vm_flags); |
db114b83 HD |
1552 | if (!search_new_forks || !mapcount) |
1553 | break; | |
1554 | } | |
1555 | spin_unlock(&anon_vma->lock); | |
5ad64688 HD |
1556 | if (!mapcount) |
1557 | goto out; | |
1558 | } | |
db114b83 HD |
1559 | if (!search_new_forks++) |
1560 | goto again; | |
5ad64688 | 1561 | out: |
5ad64688 HD |
1562 | return referenced; |
1563 | } | |
1564 | ||
1565 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | |
1566 | { | |
1567 | struct stable_node *stable_node; | |
1568 | struct hlist_node *hlist; | |
1569 | struct rmap_item *rmap_item; | |
1570 | int ret = SWAP_AGAIN; | |
db114b83 | 1571 | int search_new_forks = 0; |
5ad64688 HD |
1572 | |
1573 | VM_BUG_ON(!PageKsm(page)); | |
1574 | VM_BUG_ON(!PageLocked(page)); | |
1575 | ||
1576 | stable_node = page_stable_node(page); | |
1577 | if (!stable_node) | |
1578 | return SWAP_FAIL; | |
db114b83 | 1579 | again: |
5ad64688 | 1580 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
db114b83 HD |
1581 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
1582 | struct vm_area_struct *vma; | |
5ad64688 | 1583 | |
db114b83 HD |
1584 | spin_lock(&anon_vma->lock); |
1585 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | |
1586 | if (rmap_item->address < vma->vm_start || | |
1587 | rmap_item->address >= vma->vm_end) | |
1588 | continue; | |
1589 | /* | |
1590 | * Initially we examine only the vma which covers this | |
1591 | * rmap_item; but later, if there is still work to do, | |
1592 | * we examine covering vmas in other mms: in case they | |
1593 | * were forked from the original since ksmd passed. | |
1594 | */ | |
1595 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1596 | continue; | |
1597 | ||
1598 | ret = try_to_unmap_one(page, vma, | |
1599 | rmap_item->address, flags); | |
1600 | if (ret != SWAP_AGAIN || !page_mapped(page)) { | |
1601 | spin_unlock(&anon_vma->lock); | |
1602 | goto out; | |
1603 | } | |
1604 | } | |
1605 | spin_unlock(&anon_vma->lock); | |
5ad64688 | 1606 | } |
db114b83 HD |
1607 | if (!search_new_forks++) |
1608 | goto again; | |
5ad64688 | 1609 | out: |
5ad64688 HD |
1610 | return ret; |
1611 | } | |
1612 | ||
2ffd8679 HD |
1613 | #ifdef CONFIG_SYSFS |
1614 | /* | |
1615 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | |
1616 | */ | |
1617 | ||
31dbd01f IE |
1618 | #define KSM_ATTR_RO(_name) \ |
1619 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
1620 | #define KSM_ATTR(_name) \ | |
1621 | static struct kobj_attribute _name##_attr = \ | |
1622 | __ATTR(_name, 0644, _name##_show, _name##_store) | |
1623 | ||
1624 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
1625 | struct kobj_attribute *attr, char *buf) | |
1626 | { | |
1627 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); | |
1628 | } | |
1629 | ||
1630 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
1631 | struct kobj_attribute *attr, | |
1632 | const char *buf, size_t count) | |
1633 | { | |
1634 | unsigned long msecs; | |
1635 | int err; | |
1636 | ||
1637 | err = strict_strtoul(buf, 10, &msecs); | |
1638 | if (err || msecs > UINT_MAX) | |
1639 | return -EINVAL; | |
1640 | ||
1641 | ksm_thread_sleep_millisecs = msecs; | |
1642 | ||
1643 | return count; | |
1644 | } | |
1645 | KSM_ATTR(sleep_millisecs); | |
1646 | ||
1647 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
1648 | struct kobj_attribute *attr, char *buf) | |
1649 | { | |
1650 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); | |
1651 | } | |
1652 | ||
1653 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
1654 | struct kobj_attribute *attr, | |
1655 | const char *buf, size_t count) | |
1656 | { | |
1657 | int err; | |
1658 | unsigned long nr_pages; | |
1659 | ||
1660 | err = strict_strtoul(buf, 10, &nr_pages); | |
1661 | if (err || nr_pages > UINT_MAX) | |
1662 | return -EINVAL; | |
1663 | ||
1664 | ksm_thread_pages_to_scan = nr_pages; | |
1665 | ||
1666 | return count; | |
1667 | } | |
1668 | KSM_ATTR(pages_to_scan); | |
1669 | ||
1670 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
1671 | char *buf) | |
1672 | { | |
1673 | return sprintf(buf, "%u\n", ksm_run); | |
1674 | } | |
1675 | ||
1676 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
1677 | const char *buf, size_t count) | |
1678 | { | |
1679 | int err; | |
1680 | unsigned long flags; | |
1681 | ||
1682 | err = strict_strtoul(buf, 10, &flags); | |
1683 | if (err || flags > UINT_MAX) | |
1684 | return -EINVAL; | |
1685 | if (flags > KSM_RUN_UNMERGE) | |
1686 | return -EINVAL; | |
1687 | ||
1688 | /* | |
1689 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
1690 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
b4028260 | 1691 | * breaking COW to free the unswappable pages_shared (but leaves |
31dbd01f IE |
1692 | * mm_slots on the list for when ksmd may be set running again). |
1693 | */ | |
1694 | ||
1695 | mutex_lock(&ksm_thread_mutex); | |
1696 | if (ksm_run != flags) { | |
1697 | ksm_run = flags; | |
d952b791 | 1698 | if (flags & KSM_RUN_UNMERGE) { |
35451bee | 1699 | current->flags |= PF_OOM_ORIGIN; |
d952b791 | 1700 | err = unmerge_and_remove_all_rmap_items(); |
35451bee | 1701 | current->flags &= ~PF_OOM_ORIGIN; |
d952b791 HD |
1702 | if (err) { |
1703 | ksm_run = KSM_RUN_STOP; | |
1704 | count = err; | |
1705 | } | |
1706 | } | |
31dbd01f IE |
1707 | } |
1708 | mutex_unlock(&ksm_thread_mutex); | |
1709 | ||
1710 | if (flags & KSM_RUN_MERGE) | |
1711 | wake_up_interruptible(&ksm_thread_wait); | |
1712 | ||
1713 | return count; | |
1714 | } | |
1715 | KSM_ATTR(run); | |
1716 | ||
31dbd01f IE |
1717 | static ssize_t max_kernel_pages_store(struct kobject *kobj, |
1718 | struct kobj_attribute *attr, | |
1719 | const char *buf, size_t count) | |
1720 | { | |
1721 | int err; | |
1722 | unsigned long nr_pages; | |
1723 | ||
1724 | err = strict_strtoul(buf, 10, &nr_pages); | |
1725 | if (err) | |
1726 | return -EINVAL; | |
1727 | ||
1728 | ksm_max_kernel_pages = nr_pages; | |
1729 | ||
1730 | return count; | |
1731 | } | |
1732 | ||
1733 | static ssize_t max_kernel_pages_show(struct kobject *kobj, | |
1734 | struct kobj_attribute *attr, char *buf) | |
1735 | { | |
1736 | return sprintf(buf, "%lu\n", ksm_max_kernel_pages); | |
1737 | } | |
1738 | KSM_ATTR(max_kernel_pages); | |
1739 | ||
b4028260 HD |
1740 | static ssize_t pages_shared_show(struct kobject *kobj, |
1741 | struct kobj_attribute *attr, char *buf) | |
1742 | { | |
1743 | return sprintf(buf, "%lu\n", ksm_pages_shared); | |
1744 | } | |
1745 | KSM_ATTR_RO(pages_shared); | |
1746 | ||
1747 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
1748 | struct kobj_attribute *attr, char *buf) | |
1749 | { | |
e178dfde | 1750 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
1751 | } |
1752 | KSM_ATTR_RO(pages_sharing); | |
1753 | ||
473b0ce4 HD |
1754 | static ssize_t pages_unshared_show(struct kobject *kobj, |
1755 | struct kobj_attribute *attr, char *buf) | |
1756 | { | |
1757 | return sprintf(buf, "%lu\n", ksm_pages_unshared); | |
1758 | } | |
1759 | KSM_ATTR_RO(pages_unshared); | |
1760 | ||
1761 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
1762 | struct kobj_attribute *attr, char *buf) | |
1763 | { | |
1764 | long ksm_pages_volatile; | |
1765 | ||
1766 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
1767 | - ksm_pages_sharing - ksm_pages_unshared; | |
1768 | /* | |
1769 | * It was not worth any locking to calculate that statistic, | |
1770 | * but it might therefore sometimes be negative: conceal that. | |
1771 | */ | |
1772 | if (ksm_pages_volatile < 0) | |
1773 | ksm_pages_volatile = 0; | |
1774 | return sprintf(buf, "%ld\n", ksm_pages_volatile); | |
1775 | } | |
1776 | KSM_ATTR_RO(pages_volatile); | |
1777 | ||
1778 | static ssize_t full_scans_show(struct kobject *kobj, | |
1779 | struct kobj_attribute *attr, char *buf) | |
1780 | { | |
1781 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); | |
1782 | } | |
1783 | KSM_ATTR_RO(full_scans); | |
1784 | ||
31dbd01f IE |
1785 | static struct attribute *ksm_attrs[] = { |
1786 | &sleep_millisecs_attr.attr, | |
1787 | &pages_to_scan_attr.attr, | |
1788 | &run_attr.attr, | |
31dbd01f | 1789 | &max_kernel_pages_attr.attr, |
b4028260 HD |
1790 | &pages_shared_attr.attr, |
1791 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
1792 | &pages_unshared_attr.attr, |
1793 | &pages_volatile_attr.attr, | |
1794 | &full_scans_attr.attr, | |
31dbd01f IE |
1795 | NULL, |
1796 | }; | |
1797 | ||
1798 | static struct attribute_group ksm_attr_group = { | |
1799 | .attrs = ksm_attrs, | |
1800 | .name = "ksm", | |
1801 | }; | |
2ffd8679 | 1802 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
1803 | |
1804 | static int __init ksm_init(void) | |
1805 | { | |
1806 | struct task_struct *ksm_thread; | |
1807 | int err; | |
1808 | ||
c73602ad | 1809 | ksm_max_kernel_pages = totalram_pages / 4; |
2c6854fd | 1810 | |
31dbd01f IE |
1811 | err = ksm_slab_init(); |
1812 | if (err) | |
1813 | goto out; | |
1814 | ||
1815 | err = mm_slots_hash_init(); | |
1816 | if (err) | |
1817 | goto out_free1; | |
1818 | ||
1819 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); | |
1820 | if (IS_ERR(ksm_thread)) { | |
1821 | printk(KERN_ERR "ksm: creating kthread failed\n"); | |
1822 | err = PTR_ERR(ksm_thread); | |
1823 | goto out_free2; | |
1824 | } | |
1825 | ||
2ffd8679 | 1826 | #ifdef CONFIG_SYSFS |
31dbd01f IE |
1827 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
1828 | if (err) { | |
1829 | printk(KERN_ERR "ksm: register sysfs failed\n"); | |
2ffd8679 HD |
1830 | kthread_stop(ksm_thread); |
1831 | goto out_free2; | |
31dbd01f | 1832 | } |
c73602ad HD |
1833 | #else |
1834 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | |
1835 | ||
2ffd8679 | 1836 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
1837 | |
1838 | return 0; | |
1839 | ||
31dbd01f IE |
1840 | out_free2: |
1841 | mm_slots_hash_free(); | |
1842 | out_free1: | |
1843 | ksm_slab_free(); | |
1844 | out: | |
1845 | return err; | |
f8af4da3 | 1846 | } |
31dbd01f | 1847 | module_init(ksm_init) |