]>
Commit | Line | Data |
---|---|---|
f8af4da3 | 1 | /* |
31dbd01f IE |
2 | * Memory merging support. |
3 | * | |
4 | * This code enables dynamic sharing of identical pages found in different | |
5 | * memory areas, even if they are not shared by fork() | |
6 | * | |
36b2528d | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
8 | * Authors: |
9 | * Izik Eidus | |
10 | * Andrea Arcangeli | |
11 | * Chris Wright | |
36b2528d | 12 | * Hugh Dickins |
31dbd01f IE |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. | |
f8af4da3 HD |
15 | */ |
16 | ||
17 | #include <linux/errno.h> | |
31dbd01f IE |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> | |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f IE |
21 | #include <linux/sched.h> |
22 | #include <linux/rwsem.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/rmap.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/jhash.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/kthread.h> | |
29 | #include <linux/wait.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/rbtree.h> | |
62b61f61 | 32 | #include <linux/memory.h> |
31dbd01f | 33 | #include <linux/mmu_notifier.h> |
2c6854fd | 34 | #include <linux/swap.h> |
f8af4da3 | 35 | #include <linux/ksm.h> |
4ca3a69b | 36 | #include <linux/hashtable.h> |
878aee7d | 37 | #include <linux/freezer.h> |
72788c38 | 38 | #include <linux/oom.h> |
90bd6fd3 | 39 | #include <linux/numa.h> |
f8af4da3 | 40 | |
31dbd01f | 41 | #include <asm/tlbflush.h> |
73848b46 | 42 | #include "internal.h" |
31dbd01f | 43 | |
e850dcf5 HD |
44 | #ifdef CONFIG_NUMA |
45 | #define NUMA(x) (x) | |
46 | #define DO_NUMA(x) do { (x); } while (0) | |
47 | #else | |
48 | #define NUMA(x) (0) | |
49 | #define DO_NUMA(x) do { } while (0) | |
50 | #endif | |
51 | ||
31dbd01f IE |
52 | /* |
53 | * A few notes about the KSM scanning process, | |
54 | * to make it easier to understand the data structures below: | |
55 | * | |
56 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
57 | * contents into a data structure that holds pointers to the pages' locations. | |
58 | * | |
59 | * Since the contents of the pages may change at any moment, KSM cannot just | |
60 | * insert the pages into a normal sorted tree and expect it to find anything. | |
61 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
62 | * | |
63 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
64 | * by their contents. Because each such page is write-protected, searching on | |
65 | * this tree is fully assured to be working (except when pages are unmapped), | |
66 | * and therefore this tree is called the stable tree. | |
67 | * | |
68 | * In addition to the stable tree, KSM uses a second data structure called the | |
69 | * unstable tree: this tree holds pointers to pages which have been found to | |
70 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
71 | * by their contents, but since they are not write-protected, KSM cannot rely | |
72 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
73 | * be corrupted as its contents are modified, and so it is called unstable. | |
74 | * | |
75 | * KSM solves this problem by several techniques: | |
76 | * | |
77 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
78 | * memory areas, and then the tree is rebuilt again from the beginning. | |
79 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
80 | * has not changed since the previous scan of all memory areas. | |
81 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
82 | * colors of the nodes and not on their contents, assuring that even when | |
83 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
84 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
85 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
86 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
87 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
88 | * it is secured in the stable tree. (When we scan a new page, we first | |
89 | * compare it against the stable tree, and then against the unstable tree.) | |
90 | */ | |
91 | ||
92 | /** | |
93 | * struct mm_slot - ksm information per mm that is being scanned | |
94 | * @link: link to the mm_slots hash list | |
95 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | |
6514d511 | 96 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
31dbd01f IE |
97 | * @mm: the mm that this information is valid for |
98 | */ | |
99 | struct mm_slot { | |
100 | struct hlist_node link; | |
101 | struct list_head mm_list; | |
6514d511 | 102 | struct rmap_item *rmap_list; |
31dbd01f IE |
103 | struct mm_struct *mm; |
104 | }; | |
105 | ||
106 | /** | |
107 | * struct ksm_scan - cursor for scanning | |
108 | * @mm_slot: the current mm_slot we are scanning | |
109 | * @address: the next address inside that to be scanned | |
6514d511 | 110 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
31dbd01f IE |
111 | * @seqnr: count of completed full scans (needed when removing unstable node) |
112 | * | |
113 | * There is only the one ksm_scan instance of this cursor structure. | |
114 | */ | |
115 | struct ksm_scan { | |
116 | struct mm_slot *mm_slot; | |
117 | unsigned long address; | |
6514d511 | 118 | struct rmap_item **rmap_list; |
31dbd01f IE |
119 | unsigned long seqnr; |
120 | }; | |
121 | ||
7b6ba2c7 HD |
122 | /** |
123 | * struct stable_node - node of the stable rbtree | |
124 | * @node: rb node of this ksm page in the stable tree | |
125 | * @hlist: hlist head of rmap_items using this ksm page | |
62b61f61 | 126 | * @kpfn: page frame number of this ksm page |
7b6ba2c7 HD |
127 | */ |
128 | struct stable_node { | |
129 | struct rb_node node; | |
130 | struct hlist_head hlist; | |
62b61f61 | 131 | unsigned long kpfn; |
7b6ba2c7 HD |
132 | }; |
133 | ||
31dbd01f IE |
134 | /** |
135 | * struct rmap_item - reverse mapping item for virtual addresses | |
6514d511 | 136 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
db114b83 | 137 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
31dbd01f IE |
138 | * @mm: the memory structure this rmap_item is pointing into |
139 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
140 | * @oldchecksum: previous checksum of the page at that virtual address | |
e850dcf5 | 141 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
7b6ba2c7 HD |
142 | * @node: rb node of this rmap_item in the unstable tree |
143 | * @head: pointer to stable_node heading this list in the stable tree | |
144 | * @hlist: link into hlist of rmap_items hanging off that stable_node | |
31dbd01f IE |
145 | */ |
146 | struct rmap_item { | |
6514d511 | 147 | struct rmap_item *rmap_list; |
db114b83 | 148 | struct anon_vma *anon_vma; /* when stable */ |
31dbd01f IE |
149 | struct mm_struct *mm; |
150 | unsigned long address; /* + low bits used for flags below */ | |
7b6ba2c7 | 151 | unsigned int oldchecksum; /* when unstable */ |
90bd6fd3 | 152 | #ifdef CONFIG_NUMA |
e850dcf5 | 153 | int nid; |
90bd6fd3 | 154 | #endif |
31dbd01f | 155 | union { |
7b6ba2c7 HD |
156 | struct rb_node node; /* when node of unstable tree */ |
157 | struct { /* when listed from stable tree */ | |
158 | struct stable_node *head; | |
159 | struct hlist_node hlist; | |
160 | }; | |
31dbd01f IE |
161 | }; |
162 | }; | |
163 | ||
164 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
7b6ba2c7 HD |
165 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
166 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ | |
31dbd01f IE |
167 | |
168 | /* The stable and unstable tree heads */ | |
90bd6fd3 PH |
169 | static struct rb_root root_unstable_tree[MAX_NUMNODES]; |
170 | static struct rb_root root_stable_tree[MAX_NUMNODES]; | |
31dbd01f | 171 | |
4ca3a69b SL |
172 | #define MM_SLOTS_HASH_BITS 10 |
173 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); | |
31dbd01f IE |
174 | |
175 | static struct mm_slot ksm_mm_head = { | |
176 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | |
177 | }; | |
178 | static struct ksm_scan ksm_scan = { | |
179 | .mm_slot = &ksm_mm_head, | |
180 | }; | |
181 | ||
182 | static struct kmem_cache *rmap_item_cache; | |
7b6ba2c7 | 183 | static struct kmem_cache *stable_node_cache; |
31dbd01f IE |
184 | static struct kmem_cache *mm_slot_cache; |
185 | ||
186 | /* The number of nodes in the stable tree */ | |
b4028260 | 187 | static unsigned long ksm_pages_shared; |
31dbd01f | 188 | |
e178dfde | 189 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 190 | static unsigned long ksm_pages_sharing; |
31dbd01f | 191 | |
473b0ce4 HD |
192 | /* The number of nodes in the unstable tree */ |
193 | static unsigned long ksm_pages_unshared; | |
194 | ||
195 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
196 | static unsigned long ksm_rmap_items; | |
197 | ||
31dbd01f | 198 | /* Number of pages ksmd should scan in one batch */ |
2c6854fd | 199 | static unsigned int ksm_thread_pages_to_scan = 100; |
31dbd01f IE |
200 | |
201 | /* Milliseconds ksmd should sleep between batches */ | |
2ffd8679 | 202 | static unsigned int ksm_thread_sleep_millisecs = 20; |
31dbd01f | 203 | |
e850dcf5 | 204 | #ifdef CONFIG_NUMA |
90bd6fd3 PH |
205 | /* Zeroed when merging across nodes is not allowed */ |
206 | static unsigned int ksm_merge_across_nodes = 1; | |
e850dcf5 HD |
207 | #else |
208 | #define ksm_merge_across_nodes 1U | |
209 | #endif | |
90bd6fd3 | 210 | |
31dbd01f IE |
211 | #define KSM_RUN_STOP 0 |
212 | #define KSM_RUN_MERGE 1 | |
213 | #define KSM_RUN_UNMERGE 2 | |
2c6854fd | 214 | static unsigned int ksm_run = KSM_RUN_STOP; |
31dbd01f IE |
215 | |
216 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
217 | static DEFINE_MUTEX(ksm_thread_mutex); | |
218 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
219 | ||
220 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | |
221 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
222 | (__flags), NULL) | |
223 | ||
224 | static int __init ksm_slab_init(void) | |
225 | { | |
226 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | |
227 | if (!rmap_item_cache) | |
228 | goto out; | |
229 | ||
7b6ba2c7 HD |
230 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
231 | if (!stable_node_cache) | |
232 | goto out_free1; | |
233 | ||
31dbd01f IE |
234 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
235 | if (!mm_slot_cache) | |
7b6ba2c7 | 236 | goto out_free2; |
31dbd01f IE |
237 | |
238 | return 0; | |
239 | ||
7b6ba2c7 HD |
240 | out_free2: |
241 | kmem_cache_destroy(stable_node_cache); | |
242 | out_free1: | |
31dbd01f IE |
243 | kmem_cache_destroy(rmap_item_cache); |
244 | out: | |
245 | return -ENOMEM; | |
246 | } | |
247 | ||
248 | static void __init ksm_slab_free(void) | |
249 | { | |
250 | kmem_cache_destroy(mm_slot_cache); | |
7b6ba2c7 | 251 | kmem_cache_destroy(stable_node_cache); |
31dbd01f IE |
252 | kmem_cache_destroy(rmap_item_cache); |
253 | mm_slot_cache = NULL; | |
254 | } | |
255 | ||
256 | static inline struct rmap_item *alloc_rmap_item(void) | |
257 | { | |
473b0ce4 HD |
258 | struct rmap_item *rmap_item; |
259 | ||
260 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); | |
261 | if (rmap_item) | |
262 | ksm_rmap_items++; | |
263 | return rmap_item; | |
31dbd01f IE |
264 | } |
265 | ||
266 | static inline void free_rmap_item(struct rmap_item *rmap_item) | |
267 | { | |
473b0ce4 | 268 | ksm_rmap_items--; |
31dbd01f IE |
269 | rmap_item->mm = NULL; /* debug safety */ |
270 | kmem_cache_free(rmap_item_cache, rmap_item); | |
271 | } | |
272 | ||
7b6ba2c7 HD |
273 | static inline struct stable_node *alloc_stable_node(void) |
274 | { | |
275 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); | |
276 | } | |
277 | ||
278 | static inline void free_stable_node(struct stable_node *stable_node) | |
279 | { | |
280 | kmem_cache_free(stable_node_cache, stable_node); | |
281 | } | |
282 | ||
31dbd01f IE |
283 | static inline struct mm_slot *alloc_mm_slot(void) |
284 | { | |
285 | if (!mm_slot_cache) /* initialization failed */ | |
286 | return NULL; | |
287 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
288 | } | |
289 | ||
290 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
291 | { | |
292 | kmem_cache_free(mm_slot_cache, mm_slot); | |
293 | } | |
294 | ||
31dbd01f IE |
295 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
296 | { | |
31dbd01f | 297 | struct hlist_node *node; |
4ca3a69b SL |
298 | struct mm_slot *slot; |
299 | ||
300 | hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) | |
301 | if (slot->mm == mm) | |
302 | return slot; | |
31dbd01f | 303 | |
31dbd01f IE |
304 | return NULL; |
305 | } | |
306 | ||
307 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
308 | struct mm_slot *mm_slot) | |
309 | { | |
31dbd01f | 310 | mm_slot->mm = mm; |
4ca3a69b | 311 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
31dbd01f IE |
312 | } |
313 | ||
314 | static inline int in_stable_tree(struct rmap_item *rmap_item) | |
315 | { | |
316 | return rmap_item->address & STABLE_FLAG; | |
317 | } | |
318 | ||
a913e182 HD |
319 | /* |
320 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | |
321 | * page tables after it has passed through ksm_exit() - which, if necessary, | |
322 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set | |
323 | * a special flag: they can just back out as soon as mm_users goes to zero. | |
324 | * ksm_test_exit() is used throughout to make this test for exit: in some | |
325 | * places for correctness, in some places just to avoid unnecessary work. | |
326 | */ | |
327 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
328 | { | |
329 | return atomic_read(&mm->mm_users) == 0; | |
330 | } | |
331 | ||
31dbd01f IE |
332 | /* |
333 | * We use break_ksm to break COW on a ksm page: it's a stripped down | |
334 | * | |
335 | * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) | |
336 | * put_page(page); | |
337 | * | |
338 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | |
339 | * in case the application has unmapped and remapped mm,addr meanwhile. | |
340 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
341 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. | |
342 | */ | |
d952b791 | 343 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
31dbd01f IE |
344 | { |
345 | struct page *page; | |
d952b791 | 346 | int ret = 0; |
31dbd01f IE |
347 | |
348 | do { | |
349 | cond_resched(); | |
350 | page = follow_page(vma, addr, FOLL_GET); | |
22eccdd7 | 351 | if (IS_ERR_OR_NULL(page)) |
31dbd01f IE |
352 | break; |
353 | if (PageKsm(page)) | |
354 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | |
355 | FAULT_FLAG_WRITE); | |
356 | else | |
357 | ret = VM_FAULT_WRITE; | |
358 | put_page(page); | |
d952b791 HD |
359 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); |
360 | /* | |
361 | * We must loop because handle_mm_fault() may back out if there's | |
362 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | |
363 | * | |
364 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that | |
365 | * COW has been broken, even if the vma does not permit VM_WRITE; | |
366 | * but note that a concurrent fault might break PageKsm for us. | |
367 | * | |
368 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
369 | * backing file, which also invalidates anonymous pages: that's | |
370 | * okay, that truncation will have unmapped the PageKsm for us. | |
371 | * | |
372 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
373 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
374 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
375 | * to user; and ksmd, having no mm, would never be chosen for that. | |
376 | * | |
377 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
378 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
379 | * even ksmd can fail in this way - though it's usually breaking ksm | |
380 | * just to undo a merge it made a moment before, so unlikely to oom. | |
381 | * | |
382 | * That's a pity: we might therefore have more kernel pages allocated | |
383 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
384 | * will retry to break_cow on each pass, so should recover the page | |
385 | * in due course. The important thing is to not let VM_MERGEABLE | |
386 | * be cleared while any such pages might remain in the area. | |
387 | */ | |
388 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
389 | } |
390 | ||
ef694222 BL |
391 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
392 | unsigned long addr) | |
393 | { | |
394 | struct vm_area_struct *vma; | |
395 | if (ksm_test_exit(mm)) | |
396 | return NULL; | |
397 | vma = find_vma(mm, addr); | |
398 | if (!vma || vma->vm_start > addr) | |
399 | return NULL; | |
400 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
401 | return NULL; | |
402 | return vma; | |
403 | } | |
404 | ||
8dd3557a | 405 | static void break_cow(struct rmap_item *rmap_item) |
31dbd01f | 406 | { |
8dd3557a HD |
407 | struct mm_struct *mm = rmap_item->mm; |
408 | unsigned long addr = rmap_item->address; | |
31dbd01f IE |
409 | struct vm_area_struct *vma; |
410 | ||
4035c07a HD |
411 | /* |
412 | * It is not an accident that whenever we want to break COW | |
413 | * to undo, we also need to drop a reference to the anon_vma. | |
414 | */ | |
9e60109f | 415 | put_anon_vma(rmap_item->anon_vma); |
4035c07a | 416 | |
81464e30 | 417 | down_read(&mm->mmap_sem); |
ef694222 BL |
418 | vma = find_mergeable_vma(mm, addr); |
419 | if (vma) | |
420 | break_ksm(vma, addr); | |
31dbd01f IE |
421 | up_read(&mm->mmap_sem); |
422 | } | |
423 | ||
29ad768c AA |
424 | static struct page *page_trans_compound_anon(struct page *page) |
425 | { | |
426 | if (PageTransCompound(page)) { | |
22e5c47e | 427 | struct page *head = compound_trans_head(page); |
29ad768c | 428 | /* |
22e5c47e AA |
429 | * head may actually be splitted and freed from under |
430 | * us but it's ok here. | |
29ad768c | 431 | */ |
29ad768c AA |
432 | if (PageAnon(head)) |
433 | return head; | |
434 | } | |
435 | return NULL; | |
436 | } | |
437 | ||
31dbd01f IE |
438 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) |
439 | { | |
440 | struct mm_struct *mm = rmap_item->mm; | |
441 | unsigned long addr = rmap_item->address; | |
442 | struct vm_area_struct *vma; | |
443 | struct page *page; | |
444 | ||
445 | down_read(&mm->mmap_sem); | |
ef694222 BL |
446 | vma = find_mergeable_vma(mm, addr); |
447 | if (!vma) | |
31dbd01f IE |
448 | goto out; |
449 | ||
450 | page = follow_page(vma, addr, FOLL_GET); | |
22eccdd7 | 451 | if (IS_ERR_OR_NULL(page)) |
31dbd01f | 452 | goto out; |
29ad768c | 453 | if (PageAnon(page) || page_trans_compound_anon(page)) { |
31dbd01f IE |
454 | flush_anon_page(vma, page, addr); |
455 | flush_dcache_page(page); | |
456 | } else { | |
457 | put_page(page); | |
458 | out: page = NULL; | |
459 | } | |
460 | up_read(&mm->mmap_sem); | |
461 | return page; | |
462 | } | |
463 | ||
90bd6fd3 PH |
464 | /* |
465 | * This helper is used for getting right index into array of tree roots. | |
466 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for | |
467 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, | |
468 | * every node has its own stable and unstable tree. | |
469 | */ | |
470 | static inline int get_kpfn_nid(unsigned long kpfn) | |
471 | { | |
e850dcf5 | 472 | return ksm_merge_across_nodes ? 0 : pfn_to_nid(kpfn); |
90bd6fd3 PH |
473 | } |
474 | ||
4035c07a HD |
475 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
476 | { | |
477 | struct rmap_item *rmap_item; | |
478 | struct hlist_node *hlist; | |
90bd6fd3 | 479 | int nid; |
4035c07a HD |
480 | |
481 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | |
482 | if (rmap_item->hlist.next) | |
483 | ksm_pages_sharing--; | |
484 | else | |
485 | ksm_pages_shared--; | |
9e60109f | 486 | put_anon_vma(rmap_item->anon_vma); |
4035c07a HD |
487 | rmap_item->address &= PAGE_MASK; |
488 | cond_resched(); | |
489 | } | |
490 | ||
90bd6fd3 | 491 | nid = get_kpfn_nid(stable_node->kpfn); |
90bd6fd3 | 492 | rb_erase(&stable_node->node, &root_stable_tree[nid]); |
4035c07a HD |
493 | free_stable_node(stable_node); |
494 | } | |
495 | ||
496 | /* | |
497 | * get_ksm_page: checks if the page indicated by the stable node | |
498 | * is still its ksm page, despite having held no reference to it. | |
499 | * In which case we can trust the content of the page, and it | |
500 | * returns the gotten page; but if the page has now been zapped, | |
501 | * remove the stale node from the stable tree and return NULL. | |
502 | * | |
503 | * You would expect the stable_node to hold a reference to the ksm page. | |
504 | * But if it increments the page's count, swapping out has to wait for | |
505 | * ksmd to come around again before it can free the page, which may take | |
506 | * seconds or even minutes: much too unresponsive. So instead we use a | |
507 | * "keyhole reference": access to the ksm page from the stable node peeps | |
508 | * out through its keyhole to see if that page still holds the right key, | |
509 | * pointing back to this stable node. This relies on freeing a PageAnon | |
510 | * page to reset its page->mapping to NULL, and relies on no other use of | |
511 | * a page to put something that might look like our key in page->mapping. | |
512 | * | |
513 | * include/linux/pagemap.h page_cache_get_speculative() is a good reference, | |
514 | * but this is different - made simpler by ksm_thread_mutex being held, but | |
515 | * interesting for assuming that no other use of the struct page could ever | |
516 | * put our expected_mapping into page->mapping (or a field of the union which | |
8aafa6a4 | 517 | * coincides with page->mapping). |
4035c07a HD |
518 | * |
519 | * Note: it is possible that get_ksm_page() will return NULL one moment, | |
520 | * then page the next, if the page is in between page_freeze_refs() and | |
521 | * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page | |
522 | * is on its way to being freed; but it is an anomaly to bear in mind. | |
523 | */ | |
8aafa6a4 | 524 | static struct page *get_ksm_page(struct stable_node *stable_node, bool locked) |
4035c07a HD |
525 | { |
526 | struct page *page; | |
527 | void *expected_mapping; | |
528 | ||
62b61f61 | 529 | page = pfn_to_page(stable_node->kpfn); |
4035c07a HD |
530 | expected_mapping = (void *)stable_node + |
531 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | |
4035c07a HD |
532 | if (page->mapping != expected_mapping) |
533 | goto stale; | |
534 | if (!get_page_unless_zero(page)) | |
535 | goto stale; | |
536 | if (page->mapping != expected_mapping) { | |
537 | put_page(page); | |
538 | goto stale; | |
539 | } | |
8aafa6a4 HD |
540 | if (locked) { |
541 | lock_page(page); | |
542 | if (page->mapping != expected_mapping) { | |
543 | unlock_page(page); | |
544 | put_page(page); | |
545 | goto stale; | |
546 | } | |
547 | } | |
4035c07a HD |
548 | return page; |
549 | stale: | |
4035c07a HD |
550 | remove_node_from_stable_tree(stable_node); |
551 | return NULL; | |
552 | } | |
553 | ||
31dbd01f IE |
554 | /* |
555 | * Removing rmap_item from stable or unstable tree. | |
556 | * This function will clean the information from the stable/unstable tree. | |
557 | */ | |
558 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |
559 | { | |
7b6ba2c7 HD |
560 | if (rmap_item->address & STABLE_FLAG) { |
561 | struct stable_node *stable_node; | |
5ad64688 | 562 | struct page *page; |
31dbd01f | 563 | |
7b6ba2c7 | 564 | stable_node = rmap_item->head; |
8aafa6a4 | 565 | page = get_ksm_page(stable_node, true); |
4035c07a HD |
566 | if (!page) |
567 | goto out; | |
5ad64688 | 568 | |
7b6ba2c7 | 569 | hlist_del(&rmap_item->hlist); |
4035c07a HD |
570 | unlock_page(page); |
571 | put_page(page); | |
08beca44 | 572 | |
4035c07a HD |
573 | if (stable_node->hlist.first) |
574 | ksm_pages_sharing--; | |
575 | else | |
7b6ba2c7 | 576 | ksm_pages_shared--; |
31dbd01f | 577 | |
9e60109f | 578 | put_anon_vma(rmap_item->anon_vma); |
93d17715 | 579 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 580 | |
7b6ba2c7 | 581 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
31dbd01f IE |
582 | unsigned char age; |
583 | /* | |
9ba69294 | 584 | * Usually ksmd can and must skip the rb_erase, because |
31dbd01f | 585 | * root_unstable_tree was already reset to RB_ROOT. |
9ba69294 HD |
586 | * But be careful when an mm is exiting: do the rb_erase |
587 | * if this rmap_item was inserted by this scan, rather | |
588 | * than left over from before. | |
31dbd01f IE |
589 | */ |
590 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 591 | BUG_ON(age > 1); |
31dbd01f | 592 | if (!age) |
90bd6fd3 | 593 | rb_erase(&rmap_item->node, |
e850dcf5 | 594 | &root_unstable_tree[NUMA(rmap_item->nid)]); |
473b0ce4 | 595 | ksm_pages_unshared--; |
93d17715 | 596 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 597 | } |
4035c07a | 598 | out: |
31dbd01f IE |
599 | cond_resched(); /* we're called from many long loops */ |
600 | } | |
601 | ||
31dbd01f | 602 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
6514d511 | 603 | struct rmap_item **rmap_list) |
31dbd01f | 604 | { |
6514d511 HD |
605 | while (*rmap_list) { |
606 | struct rmap_item *rmap_item = *rmap_list; | |
607 | *rmap_list = rmap_item->rmap_list; | |
31dbd01f | 608 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
609 | free_rmap_item(rmap_item); |
610 | } | |
611 | } | |
612 | ||
613 | /* | |
e850dcf5 | 614 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
31dbd01f IE |
615 | * than check every pte of a given vma, the locking doesn't quite work for |
616 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
617 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing | |
618 | * rmap_items from parent to child at fork time (so as not to waste time | |
619 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
620 | * |
621 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
622 | * and freeing memory) when unmerging an area, it's easier to leave that | |
623 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
624 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 625 | */ |
d952b791 HD |
626 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
627 | unsigned long start, unsigned long end) | |
31dbd01f IE |
628 | { |
629 | unsigned long addr; | |
d952b791 | 630 | int err = 0; |
31dbd01f | 631 | |
d952b791 | 632 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
9ba69294 HD |
633 | if (ksm_test_exit(vma->vm_mm)) |
634 | break; | |
d952b791 HD |
635 | if (signal_pending(current)) |
636 | err = -ERESTARTSYS; | |
637 | else | |
638 | err = break_ksm(vma, addr); | |
639 | } | |
640 | return err; | |
31dbd01f IE |
641 | } |
642 | ||
2ffd8679 HD |
643 | #ifdef CONFIG_SYSFS |
644 | /* | |
645 | * Only called through the sysfs control interface: | |
646 | */ | |
cbf86cfe HD |
647 | static int remove_stable_node(struct stable_node *stable_node) |
648 | { | |
649 | struct page *page; | |
650 | int err; | |
651 | ||
652 | page = get_ksm_page(stable_node, true); | |
653 | if (!page) { | |
654 | /* | |
655 | * get_ksm_page did remove_node_from_stable_tree itself. | |
656 | */ | |
657 | return 0; | |
658 | } | |
659 | ||
660 | if (WARN_ON_ONCE(page_mapped(page))) | |
661 | err = -EBUSY; | |
662 | else { | |
663 | /* | |
664 | * This page might be in a pagevec waiting to be freed, | |
665 | * or it might be PageSwapCache (perhaps under writeback), | |
666 | * or it might have been removed from swapcache a moment ago. | |
667 | */ | |
668 | set_page_stable_node(page, NULL); | |
669 | remove_node_from_stable_tree(stable_node); | |
670 | err = 0; | |
671 | } | |
672 | ||
673 | unlock_page(page); | |
674 | put_page(page); | |
675 | return err; | |
676 | } | |
677 | ||
678 | static int remove_all_stable_nodes(void) | |
679 | { | |
680 | struct stable_node *stable_node; | |
681 | int nid; | |
682 | int err = 0; | |
683 | ||
684 | for (nid = 0; nid < nr_node_ids; nid++) { | |
685 | while (root_stable_tree[nid].rb_node) { | |
686 | stable_node = rb_entry(root_stable_tree[nid].rb_node, | |
687 | struct stable_node, node); | |
688 | if (remove_stable_node(stable_node)) { | |
689 | err = -EBUSY; | |
690 | break; /* proceed to next nid */ | |
691 | } | |
692 | cond_resched(); | |
693 | } | |
694 | } | |
695 | return err; | |
696 | } | |
697 | ||
d952b791 | 698 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f IE |
699 | { |
700 | struct mm_slot *mm_slot; | |
701 | struct mm_struct *mm; | |
702 | struct vm_area_struct *vma; | |
d952b791 HD |
703 | int err = 0; |
704 | ||
705 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 706 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
d952b791 HD |
707 | struct mm_slot, mm_list); |
708 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f | 709 | |
9ba69294 HD |
710 | for (mm_slot = ksm_scan.mm_slot; |
711 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { | |
31dbd01f IE |
712 | mm = mm_slot->mm; |
713 | down_read(&mm->mmap_sem); | |
714 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
9ba69294 HD |
715 | if (ksm_test_exit(mm)) |
716 | break; | |
31dbd01f IE |
717 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
718 | continue; | |
d952b791 HD |
719 | err = unmerge_ksm_pages(vma, |
720 | vma->vm_start, vma->vm_end); | |
9ba69294 HD |
721 | if (err) |
722 | goto error; | |
31dbd01f | 723 | } |
9ba69294 | 724 | |
6514d511 | 725 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
d952b791 HD |
726 | |
727 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 728 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
d952b791 | 729 | struct mm_slot, mm_list); |
9ba69294 | 730 | if (ksm_test_exit(mm)) { |
4ca3a69b | 731 | hash_del(&mm_slot->link); |
9ba69294 HD |
732 | list_del(&mm_slot->mm_list); |
733 | spin_unlock(&ksm_mmlist_lock); | |
734 | ||
735 | free_mm_slot(mm_slot); | |
736 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
737 | up_read(&mm->mmap_sem); | |
738 | mmdrop(mm); | |
739 | } else { | |
740 | spin_unlock(&ksm_mmlist_lock); | |
741 | up_read(&mm->mmap_sem); | |
742 | } | |
31dbd01f IE |
743 | } |
744 | ||
cbf86cfe HD |
745 | /* Clean up stable nodes, but don't worry if some are still busy */ |
746 | remove_all_stable_nodes(); | |
d952b791 | 747 | ksm_scan.seqnr = 0; |
9ba69294 HD |
748 | return 0; |
749 | ||
750 | error: | |
751 | up_read(&mm->mmap_sem); | |
31dbd01f | 752 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 753 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 754 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 755 | return err; |
31dbd01f | 756 | } |
2ffd8679 | 757 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 758 | |
31dbd01f IE |
759 | static u32 calc_checksum(struct page *page) |
760 | { | |
761 | u32 checksum; | |
9b04c5fe | 762 | void *addr = kmap_atomic(page); |
31dbd01f | 763 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
9b04c5fe | 764 | kunmap_atomic(addr); |
31dbd01f IE |
765 | return checksum; |
766 | } | |
767 | ||
768 | static int memcmp_pages(struct page *page1, struct page *page2) | |
769 | { | |
770 | char *addr1, *addr2; | |
771 | int ret; | |
772 | ||
9b04c5fe CW |
773 | addr1 = kmap_atomic(page1); |
774 | addr2 = kmap_atomic(page2); | |
31dbd01f | 775 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
9b04c5fe CW |
776 | kunmap_atomic(addr2); |
777 | kunmap_atomic(addr1); | |
31dbd01f IE |
778 | return ret; |
779 | } | |
780 | ||
781 | static inline int pages_identical(struct page *page1, struct page *page2) | |
782 | { | |
783 | return !memcmp_pages(page1, page2); | |
784 | } | |
785 | ||
786 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |
787 | pte_t *orig_pte) | |
788 | { | |
789 | struct mm_struct *mm = vma->vm_mm; | |
790 | unsigned long addr; | |
791 | pte_t *ptep; | |
792 | spinlock_t *ptl; | |
793 | int swapped; | |
794 | int err = -EFAULT; | |
6bdb913f HE |
795 | unsigned long mmun_start; /* For mmu_notifiers */ |
796 | unsigned long mmun_end; /* For mmu_notifiers */ | |
31dbd01f IE |
797 | |
798 | addr = page_address_in_vma(page, vma); | |
799 | if (addr == -EFAULT) | |
800 | goto out; | |
801 | ||
29ad768c | 802 | BUG_ON(PageTransCompound(page)); |
6bdb913f HE |
803 | |
804 | mmun_start = addr; | |
805 | mmun_end = addr + PAGE_SIZE; | |
806 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | |
807 | ||
31dbd01f IE |
808 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
809 | if (!ptep) | |
6bdb913f | 810 | goto out_mn; |
31dbd01f | 811 | |
4e31635c | 812 | if (pte_write(*ptep) || pte_dirty(*ptep)) { |
31dbd01f IE |
813 | pte_t entry; |
814 | ||
815 | swapped = PageSwapCache(page); | |
816 | flush_cache_page(vma, addr, page_to_pfn(page)); | |
817 | /* | |
25985edc | 818 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
31dbd01f IE |
819 | * take any lock, therefore the check that we are going to make |
820 | * with the pagecount against the mapcount is racey and | |
821 | * O_DIRECT can happen right after the check. | |
822 | * So we clear the pte and flush the tlb before the check | |
823 | * this assure us that no O_DIRECT can happen after the check | |
824 | * or in the middle of the check. | |
825 | */ | |
826 | entry = ptep_clear_flush(vma, addr, ptep); | |
827 | /* | |
828 | * Check that no O_DIRECT or similar I/O is in progress on the | |
829 | * page | |
830 | */ | |
31e855ea | 831 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
cb532375 | 832 | set_pte_at(mm, addr, ptep, entry); |
31dbd01f IE |
833 | goto out_unlock; |
834 | } | |
4e31635c HD |
835 | if (pte_dirty(entry)) |
836 | set_page_dirty(page); | |
837 | entry = pte_mkclean(pte_wrprotect(entry)); | |
31dbd01f IE |
838 | set_pte_at_notify(mm, addr, ptep, entry); |
839 | } | |
840 | *orig_pte = *ptep; | |
841 | err = 0; | |
842 | ||
843 | out_unlock: | |
844 | pte_unmap_unlock(ptep, ptl); | |
6bdb913f HE |
845 | out_mn: |
846 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | |
31dbd01f IE |
847 | out: |
848 | return err; | |
849 | } | |
850 | ||
851 | /** | |
852 | * replace_page - replace page in vma by new ksm page | |
8dd3557a HD |
853 | * @vma: vma that holds the pte pointing to page |
854 | * @page: the page we are replacing by kpage | |
855 | * @kpage: the ksm page we replace page by | |
31dbd01f IE |
856 | * @orig_pte: the original value of the pte |
857 | * | |
858 | * Returns 0 on success, -EFAULT on failure. | |
859 | */ | |
8dd3557a HD |
860 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
861 | struct page *kpage, pte_t orig_pte) | |
31dbd01f IE |
862 | { |
863 | struct mm_struct *mm = vma->vm_mm; | |
31dbd01f IE |
864 | pmd_t *pmd; |
865 | pte_t *ptep; | |
866 | spinlock_t *ptl; | |
867 | unsigned long addr; | |
31dbd01f | 868 | int err = -EFAULT; |
6bdb913f HE |
869 | unsigned long mmun_start; /* For mmu_notifiers */ |
870 | unsigned long mmun_end; /* For mmu_notifiers */ | |
31dbd01f | 871 | |
8dd3557a | 872 | addr = page_address_in_vma(page, vma); |
31dbd01f IE |
873 | if (addr == -EFAULT) |
874 | goto out; | |
875 | ||
6219049a BL |
876 | pmd = mm_find_pmd(mm, addr); |
877 | if (!pmd) | |
31dbd01f | 878 | goto out; |
29ad768c | 879 | BUG_ON(pmd_trans_huge(*pmd)); |
31dbd01f | 880 | |
6bdb913f HE |
881 | mmun_start = addr; |
882 | mmun_end = addr + PAGE_SIZE; | |
883 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | |
884 | ||
31dbd01f IE |
885 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
886 | if (!pte_same(*ptep, orig_pte)) { | |
887 | pte_unmap_unlock(ptep, ptl); | |
6bdb913f | 888 | goto out_mn; |
31dbd01f IE |
889 | } |
890 | ||
8dd3557a | 891 | get_page(kpage); |
5ad64688 | 892 | page_add_anon_rmap(kpage, vma, addr); |
31dbd01f IE |
893 | |
894 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
895 | ptep_clear_flush(vma, addr, ptep); | |
8dd3557a | 896 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); |
31dbd01f | 897 | |
8dd3557a | 898 | page_remove_rmap(page); |
ae52a2ad HD |
899 | if (!page_mapped(page)) |
900 | try_to_free_swap(page); | |
8dd3557a | 901 | put_page(page); |
31dbd01f IE |
902 | |
903 | pte_unmap_unlock(ptep, ptl); | |
904 | err = 0; | |
6bdb913f HE |
905 | out_mn: |
906 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | |
31dbd01f IE |
907 | out: |
908 | return err; | |
909 | } | |
910 | ||
29ad768c AA |
911 | static int page_trans_compound_anon_split(struct page *page) |
912 | { | |
913 | int ret = 0; | |
914 | struct page *transhuge_head = page_trans_compound_anon(page); | |
915 | if (transhuge_head) { | |
916 | /* Get the reference on the head to split it. */ | |
917 | if (get_page_unless_zero(transhuge_head)) { | |
918 | /* | |
919 | * Recheck we got the reference while the head | |
920 | * was still anonymous. | |
921 | */ | |
922 | if (PageAnon(transhuge_head)) | |
923 | ret = split_huge_page(transhuge_head); | |
924 | else | |
925 | /* | |
926 | * Retry later if split_huge_page run | |
927 | * from under us. | |
928 | */ | |
929 | ret = 1; | |
930 | put_page(transhuge_head); | |
931 | } else | |
932 | /* Retry later if split_huge_page run from under us. */ | |
933 | ret = 1; | |
934 | } | |
935 | return ret; | |
936 | } | |
937 | ||
31dbd01f IE |
938 | /* |
939 | * try_to_merge_one_page - take two pages and merge them into one | |
8dd3557a HD |
940 | * @vma: the vma that holds the pte pointing to page |
941 | * @page: the PageAnon page that we want to replace with kpage | |
80e14822 HD |
942 | * @kpage: the PageKsm page that we want to map instead of page, |
943 | * or NULL the first time when we want to use page as kpage. | |
31dbd01f IE |
944 | * |
945 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
946 | */ | |
947 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
8dd3557a | 948 | struct page *page, struct page *kpage) |
31dbd01f IE |
949 | { |
950 | pte_t orig_pte = __pte(0); | |
951 | int err = -EFAULT; | |
952 | ||
db114b83 HD |
953 | if (page == kpage) /* ksm page forked */ |
954 | return 0; | |
955 | ||
31dbd01f IE |
956 | if (!(vma->vm_flags & VM_MERGEABLE)) |
957 | goto out; | |
29ad768c AA |
958 | if (PageTransCompound(page) && page_trans_compound_anon_split(page)) |
959 | goto out; | |
960 | BUG_ON(PageTransCompound(page)); | |
8dd3557a | 961 | if (!PageAnon(page)) |
31dbd01f IE |
962 | goto out; |
963 | ||
31dbd01f IE |
964 | /* |
965 | * We need the page lock to read a stable PageSwapCache in | |
966 | * write_protect_page(). We use trylock_page() instead of | |
967 | * lock_page() because we don't want to wait here - we | |
968 | * prefer to continue scanning and merging different pages, | |
969 | * then come back to this page when it is unlocked. | |
970 | */ | |
8dd3557a | 971 | if (!trylock_page(page)) |
31e855ea | 972 | goto out; |
31dbd01f IE |
973 | /* |
974 | * If this anonymous page is mapped only here, its pte may need | |
975 | * to be write-protected. If it's mapped elsewhere, all of its | |
976 | * ptes are necessarily already write-protected. But in either | |
977 | * case, we need to lock and check page_count is not raised. | |
978 | */ | |
80e14822 HD |
979 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
980 | if (!kpage) { | |
981 | /* | |
982 | * While we hold page lock, upgrade page from | |
983 | * PageAnon+anon_vma to PageKsm+NULL stable_node: | |
984 | * stable_tree_insert() will update stable_node. | |
985 | */ | |
986 | set_page_stable_node(page, NULL); | |
987 | mark_page_accessed(page); | |
988 | err = 0; | |
989 | } else if (pages_identical(page, kpage)) | |
990 | err = replace_page(vma, page, kpage, orig_pte); | |
991 | } | |
31dbd01f | 992 | |
80e14822 | 993 | if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { |
73848b46 | 994 | munlock_vma_page(page); |
5ad64688 HD |
995 | if (!PageMlocked(kpage)) { |
996 | unlock_page(page); | |
5ad64688 HD |
997 | lock_page(kpage); |
998 | mlock_vma_page(kpage); | |
999 | page = kpage; /* for final unlock */ | |
1000 | } | |
1001 | } | |
73848b46 | 1002 | |
8dd3557a | 1003 | unlock_page(page); |
31dbd01f IE |
1004 | out: |
1005 | return err; | |
1006 | } | |
1007 | ||
81464e30 HD |
1008 | /* |
1009 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
1010 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
8dd3557a HD |
1011 | * |
1012 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
81464e30 | 1013 | */ |
8dd3557a HD |
1014 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
1015 | struct page *page, struct page *kpage) | |
81464e30 | 1016 | { |
8dd3557a | 1017 | struct mm_struct *mm = rmap_item->mm; |
81464e30 HD |
1018 | struct vm_area_struct *vma; |
1019 | int err = -EFAULT; | |
1020 | ||
8dd3557a HD |
1021 | down_read(&mm->mmap_sem); |
1022 | if (ksm_test_exit(mm)) | |
9ba69294 | 1023 | goto out; |
8dd3557a HD |
1024 | vma = find_vma(mm, rmap_item->address); |
1025 | if (!vma || vma->vm_start > rmap_item->address) | |
81464e30 HD |
1026 | goto out; |
1027 | ||
8dd3557a | 1028 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
1029 | if (err) |
1030 | goto out; | |
1031 | ||
1032 | /* Must get reference to anon_vma while still holding mmap_sem */ | |
9e60109f PZ |
1033 | rmap_item->anon_vma = vma->anon_vma; |
1034 | get_anon_vma(vma->anon_vma); | |
81464e30 | 1035 | out: |
8dd3557a | 1036 | up_read(&mm->mmap_sem); |
81464e30 HD |
1037 | return err; |
1038 | } | |
1039 | ||
31dbd01f IE |
1040 | /* |
1041 | * try_to_merge_two_pages - take two identical pages and prepare them | |
1042 | * to be merged into one page. | |
1043 | * | |
8dd3557a HD |
1044 | * This function returns the kpage if we successfully merged two identical |
1045 | * pages into one ksm page, NULL otherwise. | |
31dbd01f | 1046 | * |
80e14822 | 1047 | * Note that this function upgrades page to ksm page: if one of the pages |
31dbd01f IE |
1048 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1049 | */ | |
8dd3557a HD |
1050 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
1051 | struct page *page, | |
1052 | struct rmap_item *tree_rmap_item, | |
1053 | struct page *tree_page) | |
31dbd01f | 1054 | { |
80e14822 | 1055 | int err; |
31dbd01f | 1056 | |
80e14822 | 1057 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
31dbd01f | 1058 | if (!err) { |
8dd3557a | 1059 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
80e14822 | 1060 | tree_page, page); |
31dbd01f | 1061 | /* |
81464e30 HD |
1062 | * If that fails, we have a ksm page with only one pte |
1063 | * pointing to it: so break it. | |
31dbd01f | 1064 | */ |
4035c07a | 1065 | if (err) |
8dd3557a | 1066 | break_cow(rmap_item); |
31dbd01f | 1067 | } |
80e14822 | 1068 | return err ? NULL : page; |
31dbd01f IE |
1069 | } |
1070 | ||
31dbd01f | 1071 | /* |
8dd3557a | 1072 | * stable_tree_search - search for page inside the stable tree |
31dbd01f IE |
1073 | * |
1074 | * This function checks if there is a page inside the stable tree | |
1075 | * with identical content to the page that we are scanning right now. | |
1076 | * | |
7b6ba2c7 | 1077 | * This function returns the stable tree node of identical content if found, |
31dbd01f IE |
1078 | * NULL otherwise. |
1079 | */ | |
62b61f61 | 1080 | static struct page *stable_tree_search(struct page *page) |
31dbd01f | 1081 | { |
90bd6fd3 | 1082 | struct rb_node *node; |
7b6ba2c7 | 1083 | struct stable_node *stable_node; |
90bd6fd3 | 1084 | int nid; |
31dbd01f | 1085 | |
08beca44 HD |
1086 | stable_node = page_stable_node(page); |
1087 | if (stable_node) { /* ksm page forked */ | |
1088 | get_page(page); | |
62b61f61 | 1089 | return page; |
08beca44 HD |
1090 | } |
1091 | ||
90bd6fd3 PH |
1092 | nid = get_kpfn_nid(page_to_pfn(page)); |
1093 | node = root_stable_tree[nid].rb_node; | |
1094 | ||
31dbd01f | 1095 | while (node) { |
4035c07a | 1096 | struct page *tree_page; |
31dbd01f IE |
1097 | int ret; |
1098 | ||
08beca44 | 1099 | cond_resched(); |
7b6ba2c7 | 1100 | stable_node = rb_entry(node, struct stable_node, node); |
8aafa6a4 | 1101 | tree_page = get_ksm_page(stable_node, false); |
4035c07a HD |
1102 | if (!tree_page) |
1103 | return NULL; | |
31dbd01f | 1104 | |
4035c07a | 1105 | ret = memcmp_pages(page, tree_page); |
31dbd01f | 1106 | |
4035c07a HD |
1107 | if (ret < 0) { |
1108 | put_page(tree_page); | |
31dbd01f | 1109 | node = node->rb_left; |
4035c07a HD |
1110 | } else if (ret > 0) { |
1111 | put_page(tree_page); | |
31dbd01f | 1112 | node = node->rb_right; |
4035c07a | 1113 | } else |
62b61f61 | 1114 | return tree_page; |
31dbd01f IE |
1115 | } |
1116 | ||
1117 | return NULL; | |
1118 | } | |
1119 | ||
1120 | /* | |
e850dcf5 | 1121 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
31dbd01f IE |
1122 | * into the stable tree. |
1123 | * | |
7b6ba2c7 HD |
1124 | * This function returns the stable tree node just allocated on success, |
1125 | * NULL otherwise. | |
31dbd01f | 1126 | */ |
7b6ba2c7 | 1127 | static struct stable_node *stable_tree_insert(struct page *kpage) |
31dbd01f | 1128 | { |
90bd6fd3 PH |
1129 | int nid; |
1130 | unsigned long kpfn; | |
1131 | struct rb_node **new; | |
31dbd01f | 1132 | struct rb_node *parent = NULL; |
7b6ba2c7 | 1133 | struct stable_node *stable_node; |
31dbd01f | 1134 | |
90bd6fd3 PH |
1135 | kpfn = page_to_pfn(kpage); |
1136 | nid = get_kpfn_nid(kpfn); | |
1137 | new = &root_stable_tree[nid].rb_node; | |
1138 | ||
31dbd01f | 1139 | while (*new) { |
4035c07a | 1140 | struct page *tree_page; |
31dbd01f IE |
1141 | int ret; |
1142 | ||
08beca44 | 1143 | cond_resched(); |
7b6ba2c7 | 1144 | stable_node = rb_entry(*new, struct stable_node, node); |
8aafa6a4 | 1145 | tree_page = get_ksm_page(stable_node, false); |
4035c07a HD |
1146 | if (!tree_page) |
1147 | return NULL; | |
31dbd01f | 1148 | |
4035c07a HD |
1149 | ret = memcmp_pages(kpage, tree_page); |
1150 | put_page(tree_page); | |
31dbd01f IE |
1151 | |
1152 | parent = *new; | |
1153 | if (ret < 0) | |
1154 | new = &parent->rb_left; | |
1155 | else if (ret > 0) | |
1156 | new = &parent->rb_right; | |
1157 | else { | |
1158 | /* | |
1159 | * It is not a bug that stable_tree_search() didn't | |
1160 | * find this node: because at that time our page was | |
1161 | * not yet write-protected, so may have changed since. | |
1162 | */ | |
1163 | return NULL; | |
1164 | } | |
1165 | } | |
1166 | ||
7b6ba2c7 HD |
1167 | stable_node = alloc_stable_node(); |
1168 | if (!stable_node) | |
1169 | return NULL; | |
31dbd01f | 1170 | |
7b6ba2c7 | 1171 | INIT_HLIST_HEAD(&stable_node->hlist); |
90bd6fd3 | 1172 | stable_node->kpfn = kpfn; |
08beca44 | 1173 | set_page_stable_node(kpage, stable_node); |
e850dcf5 HD |
1174 | rb_link_node(&stable_node->node, parent, new); |
1175 | rb_insert_color(&stable_node->node, &root_stable_tree[nid]); | |
08beca44 | 1176 | |
7b6ba2c7 | 1177 | return stable_node; |
31dbd01f IE |
1178 | } |
1179 | ||
1180 | /* | |
8dd3557a HD |
1181 | * unstable_tree_search_insert - search for identical page, |
1182 | * else insert rmap_item into the unstable tree. | |
31dbd01f IE |
1183 | * |
1184 | * This function searches for a page in the unstable tree identical to the | |
1185 | * page currently being scanned; and if no identical page is found in the | |
1186 | * tree, we insert rmap_item as a new object into the unstable tree. | |
1187 | * | |
1188 | * This function returns pointer to rmap_item found to be identical | |
1189 | * to the currently scanned page, NULL otherwise. | |
1190 | * | |
1191 | * This function does both searching and inserting, because they share | |
1192 | * the same walking algorithm in an rbtree. | |
1193 | */ | |
8dd3557a HD |
1194 | static |
1195 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |
1196 | struct page *page, | |
1197 | struct page **tree_pagep) | |
31dbd01f | 1198 | { |
90bd6fd3 PH |
1199 | struct rb_node **new; |
1200 | struct rb_root *root; | |
31dbd01f | 1201 | struct rb_node *parent = NULL; |
90bd6fd3 PH |
1202 | int nid; |
1203 | ||
1204 | nid = get_kpfn_nid(page_to_pfn(page)); | |
1205 | root = &root_unstable_tree[nid]; | |
1206 | new = &root->rb_node; | |
31dbd01f IE |
1207 | |
1208 | while (*new) { | |
1209 | struct rmap_item *tree_rmap_item; | |
8dd3557a | 1210 | struct page *tree_page; |
31dbd01f IE |
1211 | int ret; |
1212 | ||
d178f27f | 1213 | cond_resched(); |
31dbd01f | 1214 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
8dd3557a | 1215 | tree_page = get_mergeable_page(tree_rmap_item); |
22eccdd7 | 1216 | if (IS_ERR_OR_NULL(tree_page)) |
31dbd01f IE |
1217 | return NULL; |
1218 | ||
1219 | /* | |
8dd3557a | 1220 | * Don't substitute a ksm page for a forked page. |
31dbd01f | 1221 | */ |
8dd3557a HD |
1222 | if (page == tree_page) { |
1223 | put_page(tree_page); | |
31dbd01f IE |
1224 | return NULL; |
1225 | } | |
1226 | ||
90bd6fd3 PH |
1227 | /* |
1228 | * If tree_page has been migrated to another NUMA node, it | |
1229 | * will be flushed out and put into the right unstable tree | |
1230 | * next time: only merge with it if merge_across_nodes. | |
90bd6fd3 PH |
1231 | */ |
1232 | if (!ksm_merge_across_nodes && page_to_nid(tree_page) != nid) { | |
1233 | put_page(tree_page); | |
1234 | return NULL; | |
1235 | } | |
1236 | ||
8dd3557a | 1237 | ret = memcmp_pages(page, tree_page); |
31dbd01f IE |
1238 | |
1239 | parent = *new; | |
1240 | if (ret < 0) { | |
8dd3557a | 1241 | put_page(tree_page); |
31dbd01f IE |
1242 | new = &parent->rb_left; |
1243 | } else if (ret > 0) { | |
8dd3557a | 1244 | put_page(tree_page); |
31dbd01f IE |
1245 | new = &parent->rb_right; |
1246 | } else { | |
8dd3557a | 1247 | *tree_pagep = tree_page; |
31dbd01f IE |
1248 | return tree_rmap_item; |
1249 | } | |
1250 | } | |
1251 | ||
7b6ba2c7 | 1252 | rmap_item->address |= UNSTABLE_FLAG; |
31dbd01f | 1253 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
e850dcf5 | 1254 | DO_NUMA(rmap_item->nid = nid); |
31dbd01f | 1255 | rb_link_node(&rmap_item->node, parent, new); |
90bd6fd3 | 1256 | rb_insert_color(&rmap_item->node, root); |
31dbd01f | 1257 | |
473b0ce4 | 1258 | ksm_pages_unshared++; |
31dbd01f IE |
1259 | return NULL; |
1260 | } | |
1261 | ||
1262 | /* | |
1263 | * stable_tree_append - add another rmap_item to the linked list of | |
1264 | * rmap_items hanging off a given node of the stable tree, all sharing | |
1265 | * the same ksm page. | |
1266 | */ | |
1267 | static void stable_tree_append(struct rmap_item *rmap_item, | |
7b6ba2c7 | 1268 | struct stable_node *stable_node) |
31dbd01f | 1269 | { |
90bd6fd3 PH |
1270 | /* |
1271 | * Usually rmap_item->nid is already set correctly, | |
1272 | * but it may be wrong after switching merge_across_nodes. | |
1273 | */ | |
e850dcf5 | 1274 | DO_NUMA(rmap_item->nid = get_kpfn_nid(stable_node->kpfn)); |
7b6ba2c7 | 1275 | rmap_item->head = stable_node; |
31dbd01f | 1276 | rmap_item->address |= STABLE_FLAG; |
7b6ba2c7 | 1277 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
e178dfde | 1278 | |
7b6ba2c7 HD |
1279 | if (rmap_item->hlist.next) |
1280 | ksm_pages_sharing++; | |
1281 | else | |
1282 | ksm_pages_shared++; | |
31dbd01f IE |
1283 | } |
1284 | ||
1285 | /* | |
81464e30 HD |
1286 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1287 | * if not, compare checksum to previous and if it's the same, see if page can | |
1288 | * be inserted into the unstable tree, or merged with a page already there and | |
1289 | * both transferred to the stable tree. | |
31dbd01f IE |
1290 | * |
1291 | * @page: the page that we are searching identical page to. | |
1292 | * @rmap_item: the reverse mapping into the virtual address of this page | |
1293 | */ | |
1294 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |
1295 | { | |
31dbd01f | 1296 | struct rmap_item *tree_rmap_item; |
8dd3557a | 1297 | struct page *tree_page = NULL; |
7b6ba2c7 | 1298 | struct stable_node *stable_node; |
8dd3557a | 1299 | struct page *kpage; |
31dbd01f IE |
1300 | unsigned int checksum; |
1301 | int err; | |
1302 | ||
93d17715 | 1303 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1304 | |
1305 | /* We first start with searching the page inside the stable tree */ | |
62b61f61 HD |
1306 | kpage = stable_tree_search(page); |
1307 | if (kpage) { | |
08beca44 | 1308 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
31dbd01f IE |
1309 | if (!err) { |
1310 | /* | |
1311 | * The page was successfully merged: | |
1312 | * add its rmap_item to the stable tree. | |
1313 | */ | |
5ad64688 | 1314 | lock_page(kpage); |
62b61f61 | 1315 | stable_tree_append(rmap_item, page_stable_node(kpage)); |
5ad64688 | 1316 | unlock_page(kpage); |
31dbd01f | 1317 | } |
8dd3557a | 1318 | put_page(kpage); |
31dbd01f IE |
1319 | return; |
1320 | } | |
1321 | ||
1322 | /* | |
4035c07a HD |
1323 | * If the hash value of the page has changed from the last time |
1324 | * we calculated it, this page is changing frequently: therefore we | |
1325 | * don't want to insert it in the unstable tree, and we don't want | |
1326 | * to waste our time searching for something identical to it there. | |
31dbd01f IE |
1327 | */ |
1328 | checksum = calc_checksum(page); | |
1329 | if (rmap_item->oldchecksum != checksum) { | |
1330 | rmap_item->oldchecksum = checksum; | |
1331 | return; | |
1332 | } | |
1333 | ||
8dd3557a HD |
1334 | tree_rmap_item = |
1335 | unstable_tree_search_insert(rmap_item, page, &tree_page); | |
31dbd01f | 1336 | if (tree_rmap_item) { |
8dd3557a HD |
1337 | kpage = try_to_merge_two_pages(rmap_item, page, |
1338 | tree_rmap_item, tree_page); | |
1339 | put_page(tree_page); | |
31dbd01f IE |
1340 | /* |
1341 | * As soon as we merge this page, we want to remove the | |
1342 | * rmap_item of the page we have merged with from the unstable | |
1343 | * tree, and insert it instead as new node in the stable tree. | |
1344 | */ | |
8dd3557a | 1345 | if (kpage) { |
93d17715 | 1346 | remove_rmap_item_from_tree(tree_rmap_item); |
473b0ce4 | 1347 | |
5ad64688 | 1348 | lock_page(kpage); |
7b6ba2c7 HD |
1349 | stable_node = stable_tree_insert(kpage); |
1350 | if (stable_node) { | |
1351 | stable_tree_append(tree_rmap_item, stable_node); | |
1352 | stable_tree_append(rmap_item, stable_node); | |
1353 | } | |
5ad64688 | 1354 | unlock_page(kpage); |
7b6ba2c7 | 1355 | |
31dbd01f IE |
1356 | /* |
1357 | * If we fail to insert the page into the stable tree, | |
1358 | * we will have 2 virtual addresses that are pointing | |
1359 | * to a ksm page left outside the stable tree, | |
1360 | * in which case we need to break_cow on both. | |
1361 | */ | |
7b6ba2c7 | 1362 | if (!stable_node) { |
8dd3557a HD |
1363 | break_cow(tree_rmap_item); |
1364 | break_cow(rmap_item); | |
31dbd01f IE |
1365 | } |
1366 | } | |
31dbd01f IE |
1367 | } |
1368 | } | |
1369 | ||
1370 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | |
6514d511 | 1371 | struct rmap_item **rmap_list, |
31dbd01f IE |
1372 | unsigned long addr) |
1373 | { | |
1374 | struct rmap_item *rmap_item; | |
1375 | ||
6514d511 HD |
1376 | while (*rmap_list) { |
1377 | rmap_item = *rmap_list; | |
93d17715 | 1378 | if ((rmap_item->address & PAGE_MASK) == addr) |
31dbd01f | 1379 | return rmap_item; |
31dbd01f IE |
1380 | if (rmap_item->address > addr) |
1381 | break; | |
6514d511 | 1382 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 1383 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1384 | free_rmap_item(rmap_item); |
1385 | } | |
1386 | ||
1387 | rmap_item = alloc_rmap_item(); | |
1388 | if (rmap_item) { | |
1389 | /* It has already been zeroed */ | |
1390 | rmap_item->mm = mm_slot->mm; | |
1391 | rmap_item->address = addr; | |
6514d511 HD |
1392 | rmap_item->rmap_list = *rmap_list; |
1393 | *rmap_list = rmap_item; | |
31dbd01f IE |
1394 | } |
1395 | return rmap_item; | |
1396 | } | |
1397 | ||
1398 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | |
1399 | { | |
1400 | struct mm_struct *mm; | |
1401 | struct mm_slot *slot; | |
1402 | struct vm_area_struct *vma; | |
1403 | struct rmap_item *rmap_item; | |
90bd6fd3 | 1404 | int nid; |
31dbd01f IE |
1405 | |
1406 | if (list_empty(&ksm_mm_head.mm_list)) | |
1407 | return NULL; | |
1408 | ||
1409 | slot = ksm_scan.mm_slot; | |
1410 | if (slot == &ksm_mm_head) { | |
2919bfd0 HD |
1411 | /* |
1412 | * A number of pages can hang around indefinitely on per-cpu | |
1413 | * pagevecs, raised page count preventing write_protect_page | |
1414 | * from merging them. Though it doesn't really matter much, | |
1415 | * it is puzzling to see some stuck in pages_volatile until | |
1416 | * other activity jostles them out, and they also prevented | |
1417 | * LTP's KSM test from succeeding deterministically; so drain | |
1418 | * them here (here rather than on entry to ksm_do_scan(), | |
1419 | * so we don't IPI too often when pages_to_scan is set low). | |
1420 | */ | |
1421 | lru_add_drain_all(); | |
1422 | ||
90bd6fd3 PH |
1423 | for (nid = 0; nid < nr_node_ids; nid++) |
1424 | root_unstable_tree[nid] = RB_ROOT; | |
31dbd01f IE |
1425 | |
1426 | spin_lock(&ksm_mmlist_lock); | |
1427 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | |
1428 | ksm_scan.mm_slot = slot; | |
1429 | spin_unlock(&ksm_mmlist_lock); | |
2b472611 HD |
1430 | /* |
1431 | * Although we tested list_empty() above, a racing __ksm_exit | |
1432 | * of the last mm on the list may have removed it since then. | |
1433 | */ | |
1434 | if (slot == &ksm_mm_head) | |
1435 | return NULL; | |
31dbd01f IE |
1436 | next_mm: |
1437 | ksm_scan.address = 0; | |
6514d511 | 1438 | ksm_scan.rmap_list = &slot->rmap_list; |
31dbd01f IE |
1439 | } |
1440 | ||
1441 | mm = slot->mm; | |
1442 | down_read(&mm->mmap_sem); | |
9ba69294 HD |
1443 | if (ksm_test_exit(mm)) |
1444 | vma = NULL; | |
1445 | else | |
1446 | vma = find_vma(mm, ksm_scan.address); | |
1447 | ||
1448 | for (; vma; vma = vma->vm_next) { | |
31dbd01f IE |
1449 | if (!(vma->vm_flags & VM_MERGEABLE)) |
1450 | continue; | |
1451 | if (ksm_scan.address < vma->vm_start) | |
1452 | ksm_scan.address = vma->vm_start; | |
1453 | if (!vma->anon_vma) | |
1454 | ksm_scan.address = vma->vm_end; | |
1455 | ||
1456 | while (ksm_scan.address < vma->vm_end) { | |
9ba69294 HD |
1457 | if (ksm_test_exit(mm)) |
1458 | break; | |
31dbd01f | 1459 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
21ae5b01 AA |
1460 | if (IS_ERR_OR_NULL(*page)) { |
1461 | ksm_scan.address += PAGE_SIZE; | |
1462 | cond_resched(); | |
1463 | continue; | |
1464 | } | |
29ad768c AA |
1465 | if (PageAnon(*page) || |
1466 | page_trans_compound_anon(*page)) { | |
31dbd01f IE |
1467 | flush_anon_page(vma, *page, ksm_scan.address); |
1468 | flush_dcache_page(*page); | |
1469 | rmap_item = get_next_rmap_item(slot, | |
6514d511 | 1470 | ksm_scan.rmap_list, ksm_scan.address); |
31dbd01f | 1471 | if (rmap_item) { |
6514d511 HD |
1472 | ksm_scan.rmap_list = |
1473 | &rmap_item->rmap_list; | |
31dbd01f IE |
1474 | ksm_scan.address += PAGE_SIZE; |
1475 | } else | |
1476 | put_page(*page); | |
1477 | up_read(&mm->mmap_sem); | |
1478 | return rmap_item; | |
1479 | } | |
21ae5b01 | 1480 | put_page(*page); |
31dbd01f IE |
1481 | ksm_scan.address += PAGE_SIZE; |
1482 | cond_resched(); | |
1483 | } | |
1484 | } | |
1485 | ||
9ba69294 HD |
1486 | if (ksm_test_exit(mm)) { |
1487 | ksm_scan.address = 0; | |
6514d511 | 1488 | ksm_scan.rmap_list = &slot->rmap_list; |
9ba69294 | 1489 | } |
31dbd01f IE |
1490 | /* |
1491 | * Nuke all the rmap_items that are above this current rmap: | |
1492 | * because there were no VM_MERGEABLE vmas with such addresses. | |
1493 | */ | |
6514d511 | 1494 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
31dbd01f IE |
1495 | |
1496 | spin_lock(&ksm_mmlist_lock); | |
cd551f97 HD |
1497 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1498 | struct mm_slot, mm_list); | |
1499 | if (ksm_scan.address == 0) { | |
1500 | /* | |
1501 | * We've completed a full scan of all vmas, holding mmap_sem | |
1502 | * throughout, and found no VM_MERGEABLE: so do the same as | |
1503 | * __ksm_exit does to remove this mm from all our lists now. | |
9ba69294 HD |
1504 | * This applies either when cleaning up after __ksm_exit |
1505 | * (but beware: we can reach here even before __ksm_exit), | |
1506 | * or when all VM_MERGEABLE areas have been unmapped (and | |
1507 | * mmap_sem then protects against race with MADV_MERGEABLE). | |
cd551f97 | 1508 | */ |
4ca3a69b | 1509 | hash_del(&slot->link); |
cd551f97 | 1510 | list_del(&slot->mm_list); |
9ba69294 HD |
1511 | spin_unlock(&ksm_mmlist_lock); |
1512 | ||
cd551f97 HD |
1513 | free_mm_slot(slot); |
1514 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
9ba69294 HD |
1515 | up_read(&mm->mmap_sem); |
1516 | mmdrop(mm); | |
1517 | } else { | |
1518 | spin_unlock(&ksm_mmlist_lock); | |
1519 | up_read(&mm->mmap_sem); | |
cd551f97 | 1520 | } |
31dbd01f IE |
1521 | |
1522 | /* Repeat until we've completed scanning the whole list */ | |
cd551f97 | 1523 | slot = ksm_scan.mm_slot; |
31dbd01f IE |
1524 | if (slot != &ksm_mm_head) |
1525 | goto next_mm; | |
1526 | ||
31dbd01f IE |
1527 | ksm_scan.seqnr++; |
1528 | return NULL; | |
1529 | } | |
1530 | ||
1531 | /** | |
1532 | * ksm_do_scan - the ksm scanner main worker function. | |
1533 | * @scan_npages - number of pages we want to scan before we return. | |
1534 | */ | |
1535 | static void ksm_do_scan(unsigned int scan_npages) | |
1536 | { | |
1537 | struct rmap_item *rmap_item; | |
22eccdd7 | 1538 | struct page *uninitialized_var(page); |
31dbd01f | 1539 | |
878aee7d | 1540 | while (scan_npages-- && likely(!freezing(current))) { |
31dbd01f IE |
1541 | cond_resched(); |
1542 | rmap_item = scan_get_next_rmap_item(&page); | |
1543 | if (!rmap_item) | |
1544 | return; | |
1545 | if (!PageKsm(page) || !in_stable_tree(rmap_item)) | |
1546 | cmp_and_merge_page(page, rmap_item); | |
1547 | put_page(page); | |
1548 | } | |
1549 | } | |
1550 | ||
6e158384 HD |
1551 | static int ksmd_should_run(void) |
1552 | { | |
1553 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | |
1554 | } | |
1555 | ||
31dbd01f IE |
1556 | static int ksm_scan_thread(void *nothing) |
1557 | { | |
878aee7d | 1558 | set_freezable(); |
339aa624 | 1559 | set_user_nice(current, 5); |
31dbd01f IE |
1560 | |
1561 | while (!kthread_should_stop()) { | |
6e158384 HD |
1562 | mutex_lock(&ksm_thread_mutex); |
1563 | if (ksmd_should_run()) | |
31dbd01f | 1564 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
1565 | mutex_unlock(&ksm_thread_mutex); |
1566 | ||
878aee7d AA |
1567 | try_to_freeze(); |
1568 | ||
6e158384 | 1569 | if (ksmd_should_run()) { |
31dbd01f IE |
1570 | schedule_timeout_interruptible( |
1571 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); | |
1572 | } else { | |
878aee7d | 1573 | wait_event_freezable(ksm_thread_wait, |
6e158384 | 1574 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
1575 | } |
1576 | } | |
1577 | return 0; | |
1578 | } | |
1579 | ||
f8af4da3 HD |
1580 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1581 | unsigned long end, int advice, unsigned long *vm_flags) | |
1582 | { | |
1583 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 1584 | int err; |
f8af4da3 HD |
1585 | |
1586 | switch (advice) { | |
1587 | case MADV_MERGEABLE: | |
1588 | /* | |
1589 | * Be somewhat over-protective for now! | |
1590 | */ | |
1591 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | | |
1592 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | | |
314e51b9 | 1593 | VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP)) |
f8af4da3 HD |
1594 | return 0; /* just ignore the advice */ |
1595 | ||
cc2383ec KK |
1596 | #ifdef VM_SAO |
1597 | if (*vm_flags & VM_SAO) | |
1598 | return 0; | |
1599 | #endif | |
1600 | ||
d952b791 HD |
1601 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1602 | err = __ksm_enter(mm); | |
1603 | if (err) | |
1604 | return err; | |
1605 | } | |
f8af4da3 HD |
1606 | |
1607 | *vm_flags |= VM_MERGEABLE; | |
1608 | break; | |
1609 | ||
1610 | case MADV_UNMERGEABLE: | |
1611 | if (!(*vm_flags & VM_MERGEABLE)) | |
1612 | return 0; /* just ignore the advice */ | |
1613 | ||
d952b791 HD |
1614 | if (vma->anon_vma) { |
1615 | err = unmerge_ksm_pages(vma, start, end); | |
1616 | if (err) | |
1617 | return err; | |
1618 | } | |
f8af4da3 HD |
1619 | |
1620 | *vm_flags &= ~VM_MERGEABLE; | |
1621 | break; | |
1622 | } | |
1623 | ||
1624 | return 0; | |
1625 | } | |
1626 | ||
1627 | int __ksm_enter(struct mm_struct *mm) | |
1628 | { | |
6e158384 HD |
1629 | struct mm_slot *mm_slot; |
1630 | int needs_wakeup; | |
1631 | ||
1632 | mm_slot = alloc_mm_slot(); | |
31dbd01f IE |
1633 | if (!mm_slot) |
1634 | return -ENOMEM; | |
1635 | ||
6e158384 HD |
1636 | /* Check ksm_run too? Would need tighter locking */ |
1637 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); | |
1638 | ||
31dbd01f IE |
1639 | spin_lock(&ksm_mmlist_lock); |
1640 | insert_to_mm_slots_hash(mm, mm_slot); | |
1641 | /* | |
cbf86cfe HD |
1642 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
1643 | * insert just behind the scanning cursor, to let the area settle | |
31dbd01f IE |
1644 | * down a little; when fork is followed by immediate exec, we don't |
1645 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
cbf86cfe HD |
1646 | * |
1647 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its | |
1648 | * scanning cursor, otherwise KSM pages in newly forked mms will be | |
1649 | * missed: then we might as well insert at the end of the list. | |
31dbd01f | 1650 | */ |
cbf86cfe HD |
1651 | if (ksm_run & KSM_RUN_UNMERGE) |
1652 | list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); | |
1653 | else | |
1654 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | |
31dbd01f IE |
1655 | spin_unlock(&ksm_mmlist_lock); |
1656 | ||
f8af4da3 | 1657 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
9ba69294 | 1658 | atomic_inc(&mm->mm_count); |
6e158384 HD |
1659 | |
1660 | if (needs_wakeup) | |
1661 | wake_up_interruptible(&ksm_thread_wait); | |
1662 | ||
f8af4da3 HD |
1663 | return 0; |
1664 | } | |
1665 | ||
1c2fb7a4 | 1666 | void __ksm_exit(struct mm_struct *mm) |
f8af4da3 | 1667 | { |
cd551f97 | 1668 | struct mm_slot *mm_slot; |
9ba69294 | 1669 | int easy_to_free = 0; |
cd551f97 | 1670 | |
31dbd01f | 1671 | /* |
9ba69294 HD |
1672 | * This process is exiting: if it's straightforward (as is the |
1673 | * case when ksmd was never running), free mm_slot immediately. | |
1674 | * But if it's at the cursor or has rmap_items linked to it, use | |
1675 | * mmap_sem to synchronize with any break_cows before pagetables | |
1676 | * are freed, and leave the mm_slot on the list for ksmd to free. | |
1677 | * Beware: ksm may already have noticed it exiting and freed the slot. | |
31dbd01f | 1678 | */ |
9ba69294 | 1679 | |
cd551f97 HD |
1680 | spin_lock(&ksm_mmlist_lock); |
1681 | mm_slot = get_mm_slot(mm); | |
9ba69294 | 1682 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
6514d511 | 1683 | if (!mm_slot->rmap_list) { |
4ca3a69b | 1684 | hash_del(&mm_slot->link); |
9ba69294 HD |
1685 | list_del(&mm_slot->mm_list); |
1686 | easy_to_free = 1; | |
1687 | } else { | |
1688 | list_move(&mm_slot->mm_list, | |
1689 | &ksm_scan.mm_slot->mm_list); | |
1690 | } | |
cd551f97 | 1691 | } |
cd551f97 HD |
1692 | spin_unlock(&ksm_mmlist_lock); |
1693 | ||
9ba69294 HD |
1694 | if (easy_to_free) { |
1695 | free_mm_slot(mm_slot); | |
1696 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
1697 | mmdrop(mm); | |
1698 | } else if (mm_slot) { | |
9ba69294 HD |
1699 | down_write(&mm->mmap_sem); |
1700 | up_write(&mm->mmap_sem); | |
9ba69294 | 1701 | } |
31dbd01f IE |
1702 | } |
1703 | ||
cbf86cfe | 1704 | struct page *ksm_might_need_to_copy(struct page *page, |
5ad64688 HD |
1705 | struct vm_area_struct *vma, unsigned long address) |
1706 | { | |
cbf86cfe | 1707 | struct anon_vma *anon_vma = page_anon_vma(page); |
5ad64688 HD |
1708 | struct page *new_page; |
1709 | ||
cbf86cfe HD |
1710 | if (PageKsm(page)) { |
1711 | if (page_stable_node(page) && | |
1712 | !(ksm_run & KSM_RUN_UNMERGE)) | |
1713 | return page; /* no need to copy it */ | |
1714 | } else if (!anon_vma) { | |
1715 | return page; /* no need to copy it */ | |
1716 | } else if (anon_vma->root == vma->anon_vma->root && | |
1717 | page->index == linear_page_index(vma, address)) { | |
1718 | return page; /* still no need to copy it */ | |
1719 | } | |
1720 | if (!PageUptodate(page)) | |
1721 | return page; /* let do_swap_page report the error */ | |
1722 | ||
5ad64688 HD |
1723 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1724 | if (new_page) { | |
1725 | copy_user_highpage(new_page, page, address, vma); | |
1726 | ||
1727 | SetPageDirty(new_page); | |
1728 | __SetPageUptodate(new_page); | |
5ad64688 | 1729 | __set_page_locked(new_page); |
5ad64688 HD |
1730 | } |
1731 | ||
5ad64688 HD |
1732 | return new_page; |
1733 | } | |
1734 | ||
1735 | int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, | |
1736 | unsigned long *vm_flags) | |
1737 | { | |
1738 | struct stable_node *stable_node; | |
1739 | struct rmap_item *rmap_item; | |
1740 | struct hlist_node *hlist; | |
1741 | unsigned int mapcount = page_mapcount(page); | |
1742 | int referenced = 0; | |
db114b83 | 1743 | int search_new_forks = 0; |
5ad64688 HD |
1744 | |
1745 | VM_BUG_ON(!PageKsm(page)); | |
1746 | VM_BUG_ON(!PageLocked(page)); | |
1747 | ||
1748 | stable_node = page_stable_node(page); | |
1749 | if (!stable_node) | |
1750 | return 0; | |
db114b83 | 1751 | again: |
5ad64688 | 1752 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
db114b83 | 1753 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
5beb4930 | 1754 | struct anon_vma_chain *vmac; |
db114b83 | 1755 | struct vm_area_struct *vma; |
5ad64688 | 1756 | |
b6b19f25 | 1757 | anon_vma_lock_read(anon_vma); |
bf181b9f ML |
1758 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1759 | 0, ULONG_MAX) { | |
5beb4930 | 1760 | vma = vmac->vma; |
db114b83 HD |
1761 | if (rmap_item->address < vma->vm_start || |
1762 | rmap_item->address >= vma->vm_end) | |
1763 | continue; | |
1764 | /* | |
1765 | * Initially we examine only the vma which covers this | |
1766 | * rmap_item; but later, if there is still work to do, | |
1767 | * we examine covering vmas in other mms: in case they | |
1768 | * were forked from the original since ksmd passed. | |
1769 | */ | |
1770 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1771 | continue; | |
1772 | ||
1773 | if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) | |
1774 | continue; | |
5ad64688 | 1775 | |
db114b83 | 1776 | referenced += page_referenced_one(page, vma, |
5ad64688 | 1777 | rmap_item->address, &mapcount, vm_flags); |
db114b83 HD |
1778 | if (!search_new_forks || !mapcount) |
1779 | break; | |
1780 | } | |
b6b19f25 | 1781 | anon_vma_unlock_read(anon_vma); |
5ad64688 HD |
1782 | if (!mapcount) |
1783 | goto out; | |
1784 | } | |
db114b83 HD |
1785 | if (!search_new_forks++) |
1786 | goto again; | |
5ad64688 | 1787 | out: |
5ad64688 HD |
1788 | return referenced; |
1789 | } | |
1790 | ||
1791 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) | |
1792 | { | |
1793 | struct stable_node *stable_node; | |
1794 | struct hlist_node *hlist; | |
1795 | struct rmap_item *rmap_item; | |
1796 | int ret = SWAP_AGAIN; | |
db114b83 | 1797 | int search_new_forks = 0; |
5ad64688 HD |
1798 | |
1799 | VM_BUG_ON(!PageKsm(page)); | |
1800 | VM_BUG_ON(!PageLocked(page)); | |
1801 | ||
1802 | stable_node = page_stable_node(page); | |
1803 | if (!stable_node) | |
1804 | return SWAP_FAIL; | |
db114b83 | 1805 | again: |
5ad64688 | 1806 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { |
db114b83 | 1807 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
5beb4930 | 1808 | struct anon_vma_chain *vmac; |
db114b83 | 1809 | struct vm_area_struct *vma; |
5ad64688 | 1810 | |
b6b19f25 | 1811 | anon_vma_lock_read(anon_vma); |
bf181b9f ML |
1812 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1813 | 0, ULONG_MAX) { | |
5beb4930 | 1814 | vma = vmac->vma; |
db114b83 HD |
1815 | if (rmap_item->address < vma->vm_start || |
1816 | rmap_item->address >= vma->vm_end) | |
1817 | continue; | |
1818 | /* | |
1819 | * Initially we examine only the vma which covers this | |
1820 | * rmap_item; but later, if there is still work to do, | |
1821 | * we examine covering vmas in other mms: in case they | |
1822 | * were forked from the original since ksmd passed. | |
1823 | */ | |
1824 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1825 | continue; | |
1826 | ||
1827 | ret = try_to_unmap_one(page, vma, | |
1828 | rmap_item->address, flags); | |
1829 | if (ret != SWAP_AGAIN || !page_mapped(page)) { | |
b6b19f25 | 1830 | anon_vma_unlock_read(anon_vma); |
db114b83 HD |
1831 | goto out; |
1832 | } | |
1833 | } | |
b6b19f25 | 1834 | anon_vma_unlock_read(anon_vma); |
5ad64688 | 1835 | } |
db114b83 HD |
1836 | if (!search_new_forks++) |
1837 | goto again; | |
5ad64688 | 1838 | out: |
5ad64688 HD |
1839 | return ret; |
1840 | } | |
1841 | ||
e9995ef9 HD |
1842 | #ifdef CONFIG_MIGRATION |
1843 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | |
1844 | struct vm_area_struct *, unsigned long, void *), void *arg) | |
1845 | { | |
1846 | struct stable_node *stable_node; | |
1847 | struct hlist_node *hlist; | |
1848 | struct rmap_item *rmap_item; | |
1849 | int ret = SWAP_AGAIN; | |
1850 | int search_new_forks = 0; | |
1851 | ||
1852 | VM_BUG_ON(!PageKsm(page)); | |
1853 | VM_BUG_ON(!PageLocked(page)); | |
1854 | ||
1855 | stable_node = page_stable_node(page); | |
1856 | if (!stable_node) | |
1857 | return ret; | |
1858 | again: | |
1859 | hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { | |
1860 | struct anon_vma *anon_vma = rmap_item->anon_vma; | |
5beb4930 | 1861 | struct anon_vma_chain *vmac; |
e9995ef9 HD |
1862 | struct vm_area_struct *vma; |
1863 | ||
b6b19f25 | 1864 | anon_vma_lock_read(anon_vma); |
bf181b9f ML |
1865 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1866 | 0, ULONG_MAX) { | |
5beb4930 | 1867 | vma = vmac->vma; |
e9995ef9 HD |
1868 | if (rmap_item->address < vma->vm_start || |
1869 | rmap_item->address >= vma->vm_end) | |
1870 | continue; | |
1871 | /* | |
1872 | * Initially we examine only the vma which covers this | |
1873 | * rmap_item; but later, if there is still work to do, | |
1874 | * we examine covering vmas in other mms: in case they | |
1875 | * were forked from the original since ksmd passed. | |
1876 | */ | |
1877 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1878 | continue; | |
1879 | ||
1880 | ret = rmap_one(page, vma, rmap_item->address, arg); | |
1881 | if (ret != SWAP_AGAIN) { | |
b6b19f25 | 1882 | anon_vma_unlock_read(anon_vma); |
e9995ef9 HD |
1883 | goto out; |
1884 | } | |
1885 | } | |
b6b19f25 | 1886 | anon_vma_unlock_read(anon_vma); |
e9995ef9 HD |
1887 | } |
1888 | if (!search_new_forks++) | |
1889 | goto again; | |
1890 | out: | |
1891 | return ret; | |
1892 | } | |
1893 | ||
1894 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) | |
1895 | { | |
1896 | struct stable_node *stable_node; | |
1897 | ||
1898 | VM_BUG_ON(!PageLocked(oldpage)); | |
1899 | VM_BUG_ON(!PageLocked(newpage)); | |
1900 | VM_BUG_ON(newpage->mapping != oldpage->mapping); | |
1901 | ||
1902 | stable_node = page_stable_node(newpage); | |
1903 | if (stable_node) { | |
62b61f61 HD |
1904 | VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); |
1905 | stable_node->kpfn = page_to_pfn(newpage); | |
e9995ef9 HD |
1906 | } |
1907 | } | |
1908 | #endif /* CONFIG_MIGRATION */ | |
1909 | ||
62b61f61 | 1910 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ee0ea59c HD |
1911 | static void ksm_check_stable_tree(unsigned long start_pfn, |
1912 | unsigned long end_pfn) | |
62b61f61 | 1913 | { |
ee0ea59c | 1914 | struct stable_node *stable_node; |
62b61f61 | 1915 | struct rb_node *node; |
90bd6fd3 | 1916 | int nid; |
62b61f61 | 1917 | |
ee0ea59c HD |
1918 | for (nid = 0; nid < nr_node_ids; nid++) { |
1919 | node = rb_first(&root_stable_tree[nid]); | |
1920 | while (node) { | |
90bd6fd3 PH |
1921 | stable_node = rb_entry(node, struct stable_node, node); |
1922 | if (stable_node->kpfn >= start_pfn && | |
ee0ea59c HD |
1923 | stable_node->kpfn < end_pfn) { |
1924 | /* | |
1925 | * Don't get_ksm_page, page has already gone: | |
1926 | * which is why we keep kpfn instead of page* | |
1927 | */ | |
1928 | remove_node_from_stable_tree(stable_node); | |
1929 | node = rb_first(&root_stable_tree[nid]); | |
1930 | } else | |
1931 | node = rb_next(node); | |
1932 | cond_resched(); | |
90bd6fd3 | 1933 | } |
ee0ea59c | 1934 | } |
62b61f61 HD |
1935 | } |
1936 | ||
1937 | static int ksm_memory_callback(struct notifier_block *self, | |
1938 | unsigned long action, void *arg) | |
1939 | { | |
1940 | struct memory_notify *mn = arg; | |
62b61f61 HD |
1941 | |
1942 | switch (action) { | |
1943 | case MEM_GOING_OFFLINE: | |
1944 | /* | |
1945 | * Keep it very simple for now: just lock out ksmd and | |
1946 | * MADV_UNMERGEABLE while any memory is going offline. | |
a0b0f58c KM |
1947 | * mutex_lock_nested() is necessary because lockdep was alarmed |
1948 | * that here we take ksm_thread_mutex inside notifier chain | |
1949 | * mutex, and later take notifier chain mutex inside | |
1950 | * ksm_thread_mutex to unlock it. But that's safe because both | |
1951 | * are inside mem_hotplug_mutex. | |
62b61f61 | 1952 | */ |
a0b0f58c | 1953 | mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); |
62b61f61 HD |
1954 | break; |
1955 | ||
1956 | case MEM_OFFLINE: | |
1957 | /* | |
1958 | * Most of the work is done by page migration; but there might | |
1959 | * be a few stable_nodes left over, still pointing to struct | |
ee0ea59c HD |
1960 | * pages which have been offlined: prune those from the tree, |
1961 | * otherwise get_ksm_page() might later try to access a | |
1962 | * non-existent struct page. | |
62b61f61 | 1963 | */ |
ee0ea59c HD |
1964 | ksm_check_stable_tree(mn->start_pfn, |
1965 | mn->start_pfn + mn->nr_pages); | |
62b61f61 HD |
1966 | /* fallthrough */ |
1967 | ||
1968 | case MEM_CANCEL_OFFLINE: | |
1969 | mutex_unlock(&ksm_thread_mutex); | |
1970 | break; | |
1971 | } | |
1972 | return NOTIFY_OK; | |
1973 | } | |
1974 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
1975 | ||
2ffd8679 HD |
1976 | #ifdef CONFIG_SYSFS |
1977 | /* | |
1978 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | |
1979 | */ | |
1980 | ||
31dbd01f IE |
1981 | #define KSM_ATTR_RO(_name) \ |
1982 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
1983 | #define KSM_ATTR(_name) \ | |
1984 | static struct kobj_attribute _name##_attr = \ | |
1985 | __ATTR(_name, 0644, _name##_show, _name##_store) | |
1986 | ||
1987 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
1988 | struct kobj_attribute *attr, char *buf) | |
1989 | { | |
1990 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); | |
1991 | } | |
1992 | ||
1993 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
1994 | struct kobj_attribute *attr, | |
1995 | const char *buf, size_t count) | |
1996 | { | |
1997 | unsigned long msecs; | |
1998 | int err; | |
1999 | ||
2000 | err = strict_strtoul(buf, 10, &msecs); | |
2001 | if (err || msecs > UINT_MAX) | |
2002 | return -EINVAL; | |
2003 | ||
2004 | ksm_thread_sleep_millisecs = msecs; | |
2005 | ||
2006 | return count; | |
2007 | } | |
2008 | KSM_ATTR(sleep_millisecs); | |
2009 | ||
2010 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
2011 | struct kobj_attribute *attr, char *buf) | |
2012 | { | |
2013 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); | |
2014 | } | |
2015 | ||
2016 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
2017 | struct kobj_attribute *attr, | |
2018 | const char *buf, size_t count) | |
2019 | { | |
2020 | int err; | |
2021 | unsigned long nr_pages; | |
2022 | ||
2023 | err = strict_strtoul(buf, 10, &nr_pages); | |
2024 | if (err || nr_pages > UINT_MAX) | |
2025 | return -EINVAL; | |
2026 | ||
2027 | ksm_thread_pages_to_scan = nr_pages; | |
2028 | ||
2029 | return count; | |
2030 | } | |
2031 | KSM_ATTR(pages_to_scan); | |
2032 | ||
2033 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
2034 | char *buf) | |
2035 | { | |
2036 | return sprintf(buf, "%u\n", ksm_run); | |
2037 | } | |
2038 | ||
2039 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
2040 | const char *buf, size_t count) | |
2041 | { | |
2042 | int err; | |
2043 | unsigned long flags; | |
2044 | ||
2045 | err = strict_strtoul(buf, 10, &flags); | |
2046 | if (err || flags > UINT_MAX) | |
2047 | return -EINVAL; | |
2048 | if (flags > KSM_RUN_UNMERGE) | |
2049 | return -EINVAL; | |
2050 | ||
2051 | /* | |
2052 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
2053 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
d0f209f6 HD |
2054 | * breaking COW to free the pages_shared (but leaves mm_slots |
2055 | * on the list for when ksmd may be set running again). | |
31dbd01f IE |
2056 | */ |
2057 | ||
2058 | mutex_lock(&ksm_thread_mutex); | |
2059 | if (ksm_run != flags) { | |
2060 | ksm_run = flags; | |
d952b791 | 2061 | if (flags & KSM_RUN_UNMERGE) { |
e1e12d2f | 2062 | set_current_oom_origin(); |
d952b791 | 2063 | err = unmerge_and_remove_all_rmap_items(); |
e1e12d2f | 2064 | clear_current_oom_origin(); |
d952b791 HD |
2065 | if (err) { |
2066 | ksm_run = KSM_RUN_STOP; | |
2067 | count = err; | |
2068 | } | |
2069 | } | |
31dbd01f IE |
2070 | } |
2071 | mutex_unlock(&ksm_thread_mutex); | |
2072 | ||
2073 | if (flags & KSM_RUN_MERGE) | |
2074 | wake_up_interruptible(&ksm_thread_wait); | |
2075 | ||
2076 | return count; | |
2077 | } | |
2078 | KSM_ATTR(run); | |
2079 | ||
90bd6fd3 PH |
2080 | #ifdef CONFIG_NUMA |
2081 | static ssize_t merge_across_nodes_show(struct kobject *kobj, | |
2082 | struct kobj_attribute *attr, char *buf) | |
2083 | { | |
2084 | return sprintf(buf, "%u\n", ksm_merge_across_nodes); | |
2085 | } | |
2086 | ||
2087 | static ssize_t merge_across_nodes_store(struct kobject *kobj, | |
2088 | struct kobj_attribute *attr, | |
2089 | const char *buf, size_t count) | |
2090 | { | |
2091 | int err; | |
2092 | unsigned long knob; | |
2093 | ||
2094 | err = kstrtoul(buf, 10, &knob); | |
2095 | if (err) | |
2096 | return err; | |
2097 | if (knob > 1) | |
2098 | return -EINVAL; | |
2099 | ||
2100 | mutex_lock(&ksm_thread_mutex); | |
2101 | if (ksm_merge_across_nodes != knob) { | |
cbf86cfe | 2102 | if (ksm_pages_shared || remove_all_stable_nodes()) |
90bd6fd3 PH |
2103 | err = -EBUSY; |
2104 | else | |
2105 | ksm_merge_across_nodes = knob; | |
2106 | } | |
2107 | mutex_unlock(&ksm_thread_mutex); | |
2108 | ||
2109 | return err ? err : count; | |
2110 | } | |
2111 | KSM_ATTR(merge_across_nodes); | |
2112 | #endif | |
2113 | ||
b4028260 HD |
2114 | static ssize_t pages_shared_show(struct kobject *kobj, |
2115 | struct kobj_attribute *attr, char *buf) | |
2116 | { | |
2117 | return sprintf(buf, "%lu\n", ksm_pages_shared); | |
2118 | } | |
2119 | KSM_ATTR_RO(pages_shared); | |
2120 | ||
2121 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
2122 | struct kobj_attribute *attr, char *buf) | |
2123 | { | |
e178dfde | 2124 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
2125 | } |
2126 | KSM_ATTR_RO(pages_sharing); | |
2127 | ||
473b0ce4 HD |
2128 | static ssize_t pages_unshared_show(struct kobject *kobj, |
2129 | struct kobj_attribute *attr, char *buf) | |
2130 | { | |
2131 | return sprintf(buf, "%lu\n", ksm_pages_unshared); | |
2132 | } | |
2133 | KSM_ATTR_RO(pages_unshared); | |
2134 | ||
2135 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
2136 | struct kobj_attribute *attr, char *buf) | |
2137 | { | |
2138 | long ksm_pages_volatile; | |
2139 | ||
2140 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
2141 | - ksm_pages_sharing - ksm_pages_unshared; | |
2142 | /* | |
2143 | * It was not worth any locking to calculate that statistic, | |
2144 | * but it might therefore sometimes be negative: conceal that. | |
2145 | */ | |
2146 | if (ksm_pages_volatile < 0) | |
2147 | ksm_pages_volatile = 0; | |
2148 | return sprintf(buf, "%ld\n", ksm_pages_volatile); | |
2149 | } | |
2150 | KSM_ATTR_RO(pages_volatile); | |
2151 | ||
2152 | static ssize_t full_scans_show(struct kobject *kobj, | |
2153 | struct kobj_attribute *attr, char *buf) | |
2154 | { | |
2155 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); | |
2156 | } | |
2157 | KSM_ATTR_RO(full_scans); | |
2158 | ||
31dbd01f IE |
2159 | static struct attribute *ksm_attrs[] = { |
2160 | &sleep_millisecs_attr.attr, | |
2161 | &pages_to_scan_attr.attr, | |
2162 | &run_attr.attr, | |
b4028260 HD |
2163 | &pages_shared_attr.attr, |
2164 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
2165 | &pages_unshared_attr.attr, |
2166 | &pages_volatile_attr.attr, | |
2167 | &full_scans_attr.attr, | |
90bd6fd3 PH |
2168 | #ifdef CONFIG_NUMA |
2169 | &merge_across_nodes_attr.attr, | |
2170 | #endif | |
31dbd01f IE |
2171 | NULL, |
2172 | }; | |
2173 | ||
2174 | static struct attribute_group ksm_attr_group = { | |
2175 | .attrs = ksm_attrs, | |
2176 | .name = "ksm", | |
2177 | }; | |
2ffd8679 | 2178 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
2179 | |
2180 | static int __init ksm_init(void) | |
2181 | { | |
2182 | struct task_struct *ksm_thread; | |
2183 | int err; | |
90bd6fd3 | 2184 | int nid; |
31dbd01f IE |
2185 | |
2186 | err = ksm_slab_init(); | |
2187 | if (err) | |
2188 | goto out; | |
2189 | ||
90bd6fd3 PH |
2190 | for (nid = 0; nid < nr_node_ids; nid++) |
2191 | root_stable_tree[nid] = RB_ROOT; | |
2192 | ||
31dbd01f IE |
2193 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
2194 | if (IS_ERR(ksm_thread)) { | |
2195 | printk(KERN_ERR "ksm: creating kthread failed\n"); | |
2196 | err = PTR_ERR(ksm_thread); | |
d9f8984c | 2197 | goto out_free; |
31dbd01f IE |
2198 | } |
2199 | ||
2ffd8679 | 2200 | #ifdef CONFIG_SYSFS |
31dbd01f IE |
2201 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
2202 | if (err) { | |
2203 | printk(KERN_ERR "ksm: register sysfs failed\n"); | |
2ffd8679 | 2204 | kthread_stop(ksm_thread); |
d9f8984c | 2205 | goto out_free; |
31dbd01f | 2206 | } |
c73602ad HD |
2207 | #else |
2208 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | |
2209 | ||
2ffd8679 | 2210 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 2211 | |
62b61f61 HD |
2212 | #ifdef CONFIG_MEMORY_HOTREMOVE |
2213 | /* | |
2214 | * Choose a high priority since the callback takes ksm_thread_mutex: | |
2215 | * later callbacks could only be taking locks which nest within that. | |
2216 | */ | |
2217 | hotplug_memory_notifier(ksm_memory_callback, 100); | |
2218 | #endif | |
31dbd01f IE |
2219 | return 0; |
2220 | ||
d9f8984c | 2221 | out_free: |
31dbd01f IE |
2222 | ksm_slab_free(); |
2223 | out: | |
2224 | return err; | |
f8af4da3 | 2225 | } |
31dbd01f | 2226 | module_init(ksm_init) |