]>
Commit | Line | Data |
---|---|---|
f8af4da3 | 1 | /* |
31dbd01f IE |
2 | * Memory merging support. |
3 | * | |
4 | * This code enables dynamic sharing of identical pages found in different | |
5 | * memory areas, even if they are not shared by fork() | |
6 | * | |
36b2528d | 7 | * Copyright (C) 2008-2009 Red Hat, Inc. |
31dbd01f IE |
8 | * Authors: |
9 | * Izik Eidus | |
10 | * Andrea Arcangeli | |
11 | * Chris Wright | |
36b2528d | 12 | * Hugh Dickins |
31dbd01f IE |
13 | * |
14 | * This work is licensed under the terms of the GNU GPL, version 2. | |
f8af4da3 HD |
15 | */ |
16 | ||
17 | #include <linux/errno.h> | |
31dbd01f IE |
18 | #include <linux/mm.h> |
19 | #include <linux/fs.h> | |
f8af4da3 | 20 | #include <linux/mman.h> |
31dbd01f | 21 | #include <linux/sched.h> |
6e84f315 | 22 | #include <linux/sched/mm.h> |
f7ccbae4 | 23 | #include <linux/sched/coredump.h> |
31dbd01f IE |
24 | #include <linux/rwsem.h> |
25 | #include <linux/pagemap.h> | |
26 | #include <linux/rmap.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/jhash.h> | |
29 | #include <linux/delay.h> | |
30 | #include <linux/kthread.h> | |
31 | #include <linux/wait.h> | |
32 | #include <linux/slab.h> | |
33 | #include <linux/rbtree.h> | |
62b61f61 | 34 | #include <linux/memory.h> |
31dbd01f | 35 | #include <linux/mmu_notifier.h> |
2c6854fd | 36 | #include <linux/swap.h> |
f8af4da3 | 37 | #include <linux/ksm.h> |
4ca3a69b | 38 | #include <linux/hashtable.h> |
878aee7d | 39 | #include <linux/freezer.h> |
72788c38 | 40 | #include <linux/oom.h> |
90bd6fd3 | 41 | #include <linux/numa.h> |
f8af4da3 | 42 | |
31dbd01f | 43 | #include <asm/tlbflush.h> |
73848b46 | 44 | #include "internal.h" |
31dbd01f | 45 | |
e850dcf5 HD |
46 | #ifdef CONFIG_NUMA |
47 | #define NUMA(x) (x) | |
48 | #define DO_NUMA(x) do { (x); } while (0) | |
49 | #else | |
50 | #define NUMA(x) (0) | |
51 | #define DO_NUMA(x) do { } while (0) | |
52 | #endif | |
53 | ||
31dbd01f IE |
54 | /* |
55 | * A few notes about the KSM scanning process, | |
56 | * to make it easier to understand the data structures below: | |
57 | * | |
58 | * In order to reduce excessive scanning, KSM sorts the memory pages by their | |
59 | * contents into a data structure that holds pointers to the pages' locations. | |
60 | * | |
61 | * Since the contents of the pages may change at any moment, KSM cannot just | |
62 | * insert the pages into a normal sorted tree and expect it to find anything. | |
63 | * Therefore KSM uses two data structures - the stable and the unstable tree. | |
64 | * | |
65 | * The stable tree holds pointers to all the merged pages (ksm pages), sorted | |
66 | * by their contents. Because each such page is write-protected, searching on | |
67 | * this tree is fully assured to be working (except when pages are unmapped), | |
68 | * and therefore this tree is called the stable tree. | |
69 | * | |
70 | * In addition to the stable tree, KSM uses a second data structure called the | |
71 | * unstable tree: this tree holds pointers to pages which have been found to | |
72 | * be "unchanged for a period of time". The unstable tree sorts these pages | |
73 | * by their contents, but since they are not write-protected, KSM cannot rely | |
74 | * upon the unstable tree to work correctly - the unstable tree is liable to | |
75 | * be corrupted as its contents are modified, and so it is called unstable. | |
76 | * | |
77 | * KSM solves this problem by several techniques: | |
78 | * | |
79 | * 1) The unstable tree is flushed every time KSM completes scanning all | |
80 | * memory areas, and then the tree is rebuilt again from the beginning. | |
81 | * 2) KSM will only insert into the unstable tree, pages whose hash value | |
82 | * has not changed since the previous scan of all memory areas. | |
83 | * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the | |
84 | * colors of the nodes and not on their contents, assuring that even when | |
85 | * the tree gets "corrupted" it won't get out of balance, so scanning time | |
86 | * remains the same (also, searching and inserting nodes in an rbtree uses | |
87 | * the same algorithm, so we have no overhead when we flush and rebuild). | |
88 | * 4) KSM never flushes the stable tree, which means that even if it were to | |
89 | * take 10 attempts to find a page in the unstable tree, once it is found, | |
90 | * it is secured in the stable tree. (When we scan a new page, we first | |
91 | * compare it against the stable tree, and then against the unstable tree.) | |
8fdb3dbf HD |
92 | * |
93 | * If the merge_across_nodes tunable is unset, then KSM maintains multiple | |
94 | * stable trees and multiple unstable trees: one of each for each NUMA node. | |
31dbd01f IE |
95 | */ |
96 | ||
97 | /** | |
98 | * struct mm_slot - ksm information per mm that is being scanned | |
99 | * @link: link to the mm_slots hash list | |
100 | * @mm_list: link into the mm_slots list, rooted in ksm_mm_head | |
6514d511 | 101 | * @rmap_list: head for this mm_slot's singly-linked list of rmap_items |
31dbd01f IE |
102 | * @mm: the mm that this information is valid for |
103 | */ | |
104 | struct mm_slot { | |
105 | struct hlist_node link; | |
106 | struct list_head mm_list; | |
6514d511 | 107 | struct rmap_item *rmap_list; |
31dbd01f IE |
108 | struct mm_struct *mm; |
109 | }; | |
110 | ||
111 | /** | |
112 | * struct ksm_scan - cursor for scanning | |
113 | * @mm_slot: the current mm_slot we are scanning | |
114 | * @address: the next address inside that to be scanned | |
6514d511 | 115 | * @rmap_list: link to the next rmap to be scanned in the rmap_list |
31dbd01f IE |
116 | * @seqnr: count of completed full scans (needed when removing unstable node) |
117 | * | |
118 | * There is only the one ksm_scan instance of this cursor structure. | |
119 | */ | |
120 | struct ksm_scan { | |
121 | struct mm_slot *mm_slot; | |
122 | unsigned long address; | |
6514d511 | 123 | struct rmap_item **rmap_list; |
31dbd01f IE |
124 | unsigned long seqnr; |
125 | }; | |
126 | ||
7b6ba2c7 HD |
127 | /** |
128 | * struct stable_node - node of the stable rbtree | |
129 | * @node: rb node of this ksm page in the stable tree | |
4146d2d6 HD |
130 | * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list |
131 | * @list: linked into migrate_nodes, pending placement in the proper node tree | |
7b6ba2c7 | 132 | * @hlist: hlist head of rmap_items using this ksm page |
4146d2d6 HD |
133 | * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) |
134 | * @nid: NUMA node id of stable tree in which linked (may not match kpfn) | |
7b6ba2c7 HD |
135 | */ |
136 | struct stable_node { | |
4146d2d6 HD |
137 | union { |
138 | struct rb_node node; /* when node of stable tree */ | |
139 | struct { /* when listed for migration */ | |
140 | struct list_head *head; | |
141 | struct list_head list; | |
142 | }; | |
143 | }; | |
7b6ba2c7 | 144 | struct hlist_head hlist; |
62b61f61 | 145 | unsigned long kpfn; |
4146d2d6 HD |
146 | #ifdef CONFIG_NUMA |
147 | int nid; | |
148 | #endif | |
7b6ba2c7 HD |
149 | }; |
150 | ||
31dbd01f IE |
151 | /** |
152 | * struct rmap_item - reverse mapping item for virtual addresses | |
6514d511 | 153 | * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list |
db114b83 | 154 | * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree |
bc56620b | 155 | * @nid: NUMA node id of unstable tree in which linked (may not match page) |
31dbd01f IE |
156 | * @mm: the memory structure this rmap_item is pointing into |
157 | * @address: the virtual address this rmap_item tracks (+ flags in low bits) | |
158 | * @oldchecksum: previous checksum of the page at that virtual address | |
7b6ba2c7 HD |
159 | * @node: rb node of this rmap_item in the unstable tree |
160 | * @head: pointer to stable_node heading this list in the stable tree | |
161 | * @hlist: link into hlist of rmap_items hanging off that stable_node | |
31dbd01f IE |
162 | */ |
163 | struct rmap_item { | |
6514d511 | 164 | struct rmap_item *rmap_list; |
bc56620b HD |
165 | union { |
166 | struct anon_vma *anon_vma; /* when stable */ | |
167 | #ifdef CONFIG_NUMA | |
168 | int nid; /* when node of unstable tree */ | |
169 | #endif | |
170 | }; | |
31dbd01f IE |
171 | struct mm_struct *mm; |
172 | unsigned long address; /* + low bits used for flags below */ | |
7b6ba2c7 | 173 | unsigned int oldchecksum; /* when unstable */ |
31dbd01f | 174 | union { |
7b6ba2c7 HD |
175 | struct rb_node node; /* when node of unstable tree */ |
176 | struct { /* when listed from stable tree */ | |
177 | struct stable_node *head; | |
178 | struct hlist_node hlist; | |
179 | }; | |
31dbd01f IE |
180 | }; |
181 | }; | |
182 | ||
183 | #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ | |
7b6ba2c7 HD |
184 | #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ |
185 | #define STABLE_FLAG 0x200 /* is listed from the stable tree */ | |
31dbd01f IE |
186 | |
187 | /* The stable and unstable tree heads */ | |
ef53d16c HD |
188 | static struct rb_root one_stable_tree[1] = { RB_ROOT }; |
189 | static struct rb_root one_unstable_tree[1] = { RB_ROOT }; | |
190 | static struct rb_root *root_stable_tree = one_stable_tree; | |
191 | static struct rb_root *root_unstable_tree = one_unstable_tree; | |
31dbd01f | 192 | |
4146d2d6 HD |
193 | /* Recently migrated nodes of stable tree, pending proper placement */ |
194 | static LIST_HEAD(migrate_nodes); | |
195 | ||
4ca3a69b SL |
196 | #define MM_SLOTS_HASH_BITS 10 |
197 | static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); | |
31dbd01f IE |
198 | |
199 | static struct mm_slot ksm_mm_head = { | |
200 | .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), | |
201 | }; | |
202 | static struct ksm_scan ksm_scan = { | |
203 | .mm_slot = &ksm_mm_head, | |
204 | }; | |
205 | ||
206 | static struct kmem_cache *rmap_item_cache; | |
7b6ba2c7 | 207 | static struct kmem_cache *stable_node_cache; |
31dbd01f IE |
208 | static struct kmem_cache *mm_slot_cache; |
209 | ||
210 | /* The number of nodes in the stable tree */ | |
b4028260 | 211 | static unsigned long ksm_pages_shared; |
31dbd01f | 212 | |
e178dfde | 213 | /* The number of page slots additionally sharing those nodes */ |
b4028260 | 214 | static unsigned long ksm_pages_sharing; |
31dbd01f | 215 | |
473b0ce4 HD |
216 | /* The number of nodes in the unstable tree */ |
217 | static unsigned long ksm_pages_unshared; | |
218 | ||
219 | /* The number of rmap_items in use: to calculate pages_volatile */ | |
220 | static unsigned long ksm_rmap_items; | |
221 | ||
31dbd01f | 222 | /* Number of pages ksmd should scan in one batch */ |
2c6854fd | 223 | static unsigned int ksm_thread_pages_to_scan = 100; |
31dbd01f IE |
224 | |
225 | /* Milliseconds ksmd should sleep between batches */ | |
2ffd8679 | 226 | static unsigned int ksm_thread_sleep_millisecs = 20; |
31dbd01f | 227 | |
e86c59b1 CI |
228 | /* Checksum of an empty (zeroed) page */ |
229 | static unsigned int zero_checksum __read_mostly; | |
230 | ||
231 | /* Whether to merge empty (zeroed) pages with actual zero pages */ | |
232 | static bool ksm_use_zero_pages __read_mostly; | |
233 | ||
e850dcf5 | 234 | #ifdef CONFIG_NUMA |
90bd6fd3 PH |
235 | /* Zeroed when merging across nodes is not allowed */ |
236 | static unsigned int ksm_merge_across_nodes = 1; | |
ef53d16c | 237 | static int ksm_nr_node_ids = 1; |
e850dcf5 HD |
238 | #else |
239 | #define ksm_merge_across_nodes 1U | |
ef53d16c | 240 | #define ksm_nr_node_ids 1 |
e850dcf5 | 241 | #endif |
90bd6fd3 | 242 | |
31dbd01f IE |
243 | #define KSM_RUN_STOP 0 |
244 | #define KSM_RUN_MERGE 1 | |
245 | #define KSM_RUN_UNMERGE 2 | |
ef4d43a8 HD |
246 | #define KSM_RUN_OFFLINE 4 |
247 | static unsigned long ksm_run = KSM_RUN_STOP; | |
248 | static void wait_while_offlining(void); | |
31dbd01f IE |
249 | |
250 | static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); | |
251 | static DEFINE_MUTEX(ksm_thread_mutex); | |
252 | static DEFINE_SPINLOCK(ksm_mmlist_lock); | |
253 | ||
254 | #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ | |
255 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
256 | (__flags), NULL) | |
257 | ||
258 | static int __init ksm_slab_init(void) | |
259 | { | |
260 | rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); | |
261 | if (!rmap_item_cache) | |
262 | goto out; | |
263 | ||
7b6ba2c7 HD |
264 | stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); |
265 | if (!stable_node_cache) | |
266 | goto out_free1; | |
267 | ||
31dbd01f IE |
268 | mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); |
269 | if (!mm_slot_cache) | |
7b6ba2c7 | 270 | goto out_free2; |
31dbd01f IE |
271 | |
272 | return 0; | |
273 | ||
7b6ba2c7 HD |
274 | out_free2: |
275 | kmem_cache_destroy(stable_node_cache); | |
276 | out_free1: | |
31dbd01f IE |
277 | kmem_cache_destroy(rmap_item_cache); |
278 | out: | |
279 | return -ENOMEM; | |
280 | } | |
281 | ||
282 | static void __init ksm_slab_free(void) | |
283 | { | |
284 | kmem_cache_destroy(mm_slot_cache); | |
7b6ba2c7 | 285 | kmem_cache_destroy(stable_node_cache); |
31dbd01f IE |
286 | kmem_cache_destroy(rmap_item_cache); |
287 | mm_slot_cache = NULL; | |
288 | } | |
289 | ||
290 | static inline struct rmap_item *alloc_rmap_item(void) | |
291 | { | |
473b0ce4 HD |
292 | struct rmap_item *rmap_item; |
293 | ||
5b398e41 | 294 | rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | |
295 | __GFP_NORETRY | __GFP_NOWARN); | |
473b0ce4 HD |
296 | if (rmap_item) |
297 | ksm_rmap_items++; | |
298 | return rmap_item; | |
31dbd01f IE |
299 | } |
300 | ||
301 | static inline void free_rmap_item(struct rmap_item *rmap_item) | |
302 | { | |
473b0ce4 | 303 | ksm_rmap_items--; |
31dbd01f IE |
304 | rmap_item->mm = NULL; /* debug safety */ |
305 | kmem_cache_free(rmap_item_cache, rmap_item); | |
306 | } | |
307 | ||
7b6ba2c7 HD |
308 | static inline struct stable_node *alloc_stable_node(void) |
309 | { | |
6213055f | 310 | /* |
311 | * The allocation can take too long with GFP_KERNEL when memory is under | |
312 | * pressure, which may lead to hung task warnings. Adding __GFP_HIGH | |
313 | * grants access to memory reserves, helping to avoid this problem. | |
314 | */ | |
315 | return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); | |
7b6ba2c7 HD |
316 | } |
317 | ||
318 | static inline void free_stable_node(struct stable_node *stable_node) | |
319 | { | |
320 | kmem_cache_free(stable_node_cache, stable_node); | |
321 | } | |
322 | ||
31dbd01f IE |
323 | static inline struct mm_slot *alloc_mm_slot(void) |
324 | { | |
325 | if (!mm_slot_cache) /* initialization failed */ | |
326 | return NULL; | |
327 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); | |
328 | } | |
329 | ||
330 | static inline void free_mm_slot(struct mm_slot *mm_slot) | |
331 | { | |
332 | kmem_cache_free(mm_slot_cache, mm_slot); | |
333 | } | |
334 | ||
31dbd01f IE |
335 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
336 | { | |
4ca3a69b SL |
337 | struct mm_slot *slot; |
338 | ||
b67bfe0d | 339 | hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) |
4ca3a69b SL |
340 | if (slot->mm == mm) |
341 | return slot; | |
31dbd01f | 342 | |
31dbd01f IE |
343 | return NULL; |
344 | } | |
345 | ||
346 | static void insert_to_mm_slots_hash(struct mm_struct *mm, | |
347 | struct mm_slot *mm_slot) | |
348 | { | |
31dbd01f | 349 | mm_slot->mm = mm; |
4ca3a69b | 350 | hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); |
31dbd01f IE |
351 | } |
352 | ||
a913e182 HD |
353 | /* |
354 | * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's | |
355 | * page tables after it has passed through ksm_exit() - which, if necessary, | |
356 | * takes mmap_sem briefly to serialize against them. ksm_exit() does not set | |
357 | * a special flag: they can just back out as soon as mm_users goes to zero. | |
358 | * ksm_test_exit() is used throughout to make this test for exit: in some | |
359 | * places for correctness, in some places just to avoid unnecessary work. | |
360 | */ | |
361 | static inline bool ksm_test_exit(struct mm_struct *mm) | |
362 | { | |
363 | return atomic_read(&mm->mm_users) == 0; | |
364 | } | |
365 | ||
31dbd01f IE |
366 | /* |
367 | * We use break_ksm to break COW on a ksm page: it's a stripped down | |
368 | * | |
d4edcf0d | 369 | * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) |
31dbd01f IE |
370 | * put_page(page); |
371 | * | |
372 | * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, | |
373 | * in case the application has unmapped and remapped mm,addr meanwhile. | |
374 | * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP | |
375 | * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. | |
1b2ee126 DH |
376 | * |
377 | * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context | |
378 | * of the process that owns 'vma'. We also do not want to enforce | |
379 | * protection keys here anyway. | |
31dbd01f | 380 | */ |
d952b791 | 381 | static int break_ksm(struct vm_area_struct *vma, unsigned long addr) |
31dbd01f IE |
382 | { |
383 | struct page *page; | |
d952b791 | 384 | int ret = 0; |
31dbd01f IE |
385 | |
386 | do { | |
387 | cond_resched(); | |
1b2ee126 DH |
388 | page = follow_page(vma, addr, |
389 | FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); | |
22eccdd7 | 390 | if (IS_ERR_OR_NULL(page)) |
31dbd01f IE |
391 | break; |
392 | if (PageKsm(page)) | |
dcddffd4 KS |
393 | ret = handle_mm_fault(vma, addr, |
394 | FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); | |
31dbd01f IE |
395 | else |
396 | ret = VM_FAULT_WRITE; | |
397 | put_page(page); | |
33692f27 | 398 | } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); |
d952b791 HD |
399 | /* |
400 | * We must loop because handle_mm_fault() may back out if there's | |
401 | * any difficulty e.g. if pte accessed bit gets updated concurrently. | |
402 | * | |
403 | * VM_FAULT_WRITE is what we have been hoping for: it indicates that | |
404 | * COW has been broken, even if the vma does not permit VM_WRITE; | |
405 | * but note that a concurrent fault might break PageKsm for us. | |
406 | * | |
407 | * VM_FAULT_SIGBUS could occur if we race with truncation of the | |
408 | * backing file, which also invalidates anonymous pages: that's | |
409 | * okay, that truncation will have unmapped the PageKsm for us. | |
410 | * | |
411 | * VM_FAULT_OOM: at the time of writing (late July 2009), setting | |
412 | * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the | |
413 | * current task has TIF_MEMDIE set, and will be OOM killed on return | |
414 | * to user; and ksmd, having no mm, would never be chosen for that. | |
415 | * | |
416 | * But if the mm is in a limited mem_cgroup, then the fault may fail | |
417 | * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and | |
418 | * even ksmd can fail in this way - though it's usually breaking ksm | |
419 | * just to undo a merge it made a moment before, so unlikely to oom. | |
420 | * | |
421 | * That's a pity: we might therefore have more kernel pages allocated | |
422 | * than we're counting as nodes in the stable tree; but ksm_do_scan | |
423 | * will retry to break_cow on each pass, so should recover the page | |
424 | * in due course. The important thing is to not let VM_MERGEABLE | |
425 | * be cleared while any such pages might remain in the area. | |
426 | */ | |
427 | return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; | |
31dbd01f IE |
428 | } |
429 | ||
ef694222 BL |
430 | static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, |
431 | unsigned long addr) | |
432 | { | |
433 | struct vm_area_struct *vma; | |
434 | if (ksm_test_exit(mm)) | |
435 | return NULL; | |
436 | vma = find_vma(mm, addr); | |
437 | if (!vma || vma->vm_start > addr) | |
438 | return NULL; | |
439 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) | |
440 | return NULL; | |
441 | return vma; | |
442 | } | |
443 | ||
8dd3557a | 444 | static void break_cow(struct rmap_item *rmap_item) |
31dbd01f | 445 | { |
8dd3557a HD |
446 | struct mm_struct *mm = rmap_item->mm; |
447 | unsigned long addr = rmap_item->address; | |
31dbd01f IE |
448 | struct vm_area_struct *vma; |
449 | ||
4035c07a HD |
450 | /* |
451 | * It is not an accident that whenever we want to break COW | |
452 | * to undo, we also need to drop a reference to the anon_vma. | |
453 | */ | |
9e60109f | 454 | put_anon_vma(rmap_item->anon_vma); |
4035c07a | 455 | |
81464e30 | 456 | down_read(&mm->mmap_sem); |
ef694222 BL |
457 | vma = find_mergeable_vma(mm, addr); |
458 | if (vma) | |
459 | break_ksm(vma, addr); | |
31dbd01f IE |
460 | up_read(&mm->mmap_sem); |
461 | } | |
462 | ||
463 | static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |
464 | { | |
465 | struct mm_struct *mm = rmap_item->mm; | |
466 | unsigned long addr = rmap_item->address; | |
467 | struct vm_area_struct *vma; | |
468 | struct page *page; | |
469 | ||
470 | down_read(&mm->mmap_sem); | |
ef694222 BL |
471 | vma = find_mergeable_vma(mm, addr); |
472 | if (!vma) | |
31dbd01f IE |
473 | goto out; |
474 | ||
475 | page = follow_page(vma, addr, FOLL_GET); | |
22eccdd7 | 476 | if (IS_ERR_OR_NULL(page)) |
31dbd01f | 477 | goto out; |
f765f540 | 478 | if (PageAnon(page)) { |
31dbd01f IE |
479 | flush_anon_page(vma, page, addr); |
480 | flush_dcache_page(page); | |
481 | } else { | |
482 | put_page(page); | |
c8f95ed1 AA |
483 | out: |
484 | page = NULL; | |
31dbd01f IE |
485 | } |
486 | up_read(&mm->mmap_sem); | |
487 | return page; | |
488 | } | |
489 | ||
90bd6fd3 PH |
490 | /* |
491 | * This helper is used for getting right index into array of tree roots. | |
492 | * When merge_across_nodes knob is set to 1, there are only two rb-trees for | |
493 | * stable and unstable pages from all nodes with roots in index 0. Otherwise, | |
494 | * every node has its own stable and unstable tree. | |
495 | */ | |
496 | static inline int get_kpfn_nid(unsigned long kpfn) | |
497 | { | |
d8fc16a8 | 498 | return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); |
90bd6fd3 PH |
499 | } |
500 | ||
4035c07a HD |
501 | static void remove_node_from_stable_tree(struct stable_node *stable_node) |
502 | { | |
503 | struct rmap_item *rmap_item; | |
4035c07a | 504 | |
b67bfe0d | 505 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
4035c07a HD |
506 | if (rmap_item->hlist.next) |
507 | ksm_pages_sharing--; | |
508 | else | |
509 | ksm_pages_shared--; | |
9e60109f | 510 | put_anon_vma(rmap_item->anon_vma); |
4035c07a HD |
511 | rmap_item->address &= PAGE_MASK; |
512 | cond_resched(); | |
513 | } | |
514 | ||
4146d2d6 HD |
515 | if (stable_node->head == &migrate_nodes) |
516 | list_del(&stable_node->list); | |
517 | else | |
518 | rb_erase(&stable_node->node, | |
ef53d16c | 519 | root_stable_tree + NUMA(stable_node->nid)); |
4035c07a HD |
520 | free_stable_node(stable_node); |
521 | } | |
522 | ||
523 | /* | |
524 | * get_ksm_page: checks if the page indicated by the stable node | |
525 | * is still its ksm page, despite having held no reference to it. | |
526 | * In which case we can trust the content of the page, and it | |
527 | * returns the gotten page; but if the page has now been zapped, | |
528 | * remove the stale node from the stable tree and return NULL. | |
c8d6553b | 529 | * But beware, the stable node's page might be being migrated. |
4035c07a HD |
530 | * |
531 | * You would expect the stable_node to hold a reference to the ksm page. | |
532 | * But if it increments the page's count, swapping out has to wait for | |
533 | * ksmd to come around again before it can free the page, which may take | |
534 | * seconds or even minutes: much too unresponsive. So instead we use a | |
535 | * "keyhole reference": access to the ksm page from the stable node peeps | |
536 | * out through its keyhole to see if that page still holds the right key, | |
537 | * pointing back to this stable node. This relies on freeing a PageAnon | |
538 | * page to reset its page->mapping to NULL, and relies on no other use of | |
539 | * a page to put something that might look like our key in page->mapping. | |
4035c07a HD |
540 | * is on its way to being freed; but it is an anomaly to bear in mind. |
541 | */ | |
8fdb3dbf | 542 | static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) |
4035c07a HD |
543 | { |
544 | struct page *page; | |
545 | void *expected_mapping; | |
c8d6553b | 546 | unsigned long kpfn; |
4035c07a | 547 | |
bda807d4 MK |
548 | expected_mapping = (void *)((unsigned long)stable_node | |
549 | PAGE_MAPPING_KSM); | |
c8d6553b | 550 | again: |
4db0c3c2 | 551 | kpfn = READ_ONCE(stable_node->kpfn); |
c8d6553b HD |
552 | page = pfn_to_page(kpfn); |
553 | ||
554 | /* | |
555 | * page is computed from kpfn, so on most architectures reading | |
556 | * page->mapping is naturally ordered after reading node->kpfn, | |
557 | * but on Alpha we need to be more careful. | |
558 | */ | |
559 | smp_read_barrier_depends(); | |
4db0c3c2 | 560 | if (READ_ONCE(page->mapping) != expected_mapping) |
4035c07a | 561 | goto stale; |
c8d6553b HD |
562 | |
563 | /* | |
564 | * We cannot do anything with the page while its refcount is 0. | |
565 | * Usually 0 means free, or tail of a higher-order page: in which | |
566 | * case this node is no longer referenced, and should be freed; | |
567 | * however, it might mean that the page is under page_freeze_refs(). | |
568 | * The __remove_mapping() case is easy, again the node is now stale; | |
569 | * but if page is swapcache in migrate_page_move_mapping(), it might | |
570 | * still be our page, in which case it's essential to keep the node. | |
571 | */ | |
572 | while (!get_page_unless_zero(page)) { | |
573 | /* | |
574 | * Another check for page->mapping != expected_mapping would | |
575 | * work here too. We have chosen the !PageSwapCache test to | |
576 | * optimize the common case, when the page is or is about to | |
577 | * be freed: PageSwapCache is cleared (under spin_lock_irq) | |
578 | * in the freeze_refs section of __remove_mapping(); but Anon | |
579 | * page->mapping reset to NULL later, in free_pages_prepare(). | |
580 | */ | |
581 | if (!PageSwapCache(page)) | |
582 | goto stale; | |
583 | cpu_relax(); | |
584 | } | |
585 | ||
4db0c3c2 | 586 | if (READ_ONCE(page->mapping) != expected_mapping) { |
4035c07a HD |
587 | put_page(page); |
588 | goto stale; | |
589 | } | |
c8d6553b | 590 | |
8fdb3dbf | 591 | if (lock_it) { |
8aafa6a4 | 592 | lock_page(page); |
4db0c3c2 | 593 | if (READ_ONCE(page->mapping) != expected_mapping) { |
8aafa6a4 HD |
594 | unlock_page(page); |
595 | put_page(page); | |
596 | goto stale; | |
597 | } | |
598 | } | |
4035c07a | 599 | return page; |
c8d6553b | 600 | |
4035c07a | 601 | stale: |
c8d6553b HD |
602 | /* |
603 | * We come here from above when page->mapping or !PageSwapCache | |
604 | * suggests that the node is stale; but it might be under migration. | |
605 | * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), | |
606 | * before checking whether node->kpfn has been changed. | |
607 | */ | |
608 | smp_rmb(); | |
4db0c3c2 | 609 | if (READ_ONCE(stable_node->kpfn) != kpfn) |
c8d6553b | 610 | goto again; |
4035c07a HD |
611 | remove_node_from_stable_tree(stable_node); |
612 | return NULL; | |
613 | } | |
614 | ||
31dbd01f IE |
615 | /* |
616 | * Removing rmap_item from stable or unstable tree. | |
617 | * This function will clean the information from the stable/unstable tree. | |
618 | */ | |
619 | static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) | |
620 | { | |
7b6ba2c7 HD |
621 | if (rmap_item->address & STABLE_FLAG) { |
622 | struct stable_node *stable_node; | |
5ad64688 | 623 | struct page *page; |
31dbd01f | 624 | |
7b6ba2c7 | 625 | stable_node = rmap_item->head; |
8aafa6a4 | 626 | page = get_ksm_page(stable_node, true); |
4035c07a HD |
627 | if (!page) |
628 | goto out; | |
5ad64688 | 629 | |
7b6ba2c7 | 630 | hlist_del(&rmap_item->hlist); |
4035c07a HD |
631 | unlock_page(page); |
632 | put_page(page); | |
08beca44 | 633 | |
98666f8a | 634 | if (!hlist_empty(&stable_node->hlist)) |
4035c07a HD |
635 | ksm_pages_sharing--; |
636 | else | |
7b6ba2c7 | 637 | ksm_pages_shared--; |
31dbd01f | 638 | |
9e60109f | 639 | put_anon_vma(rmap_item->anon_vma); |
93d17715 | 640 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 641 | |
7b6ba2c7 | 642 | } else if (rmap_item->address & UNSTABLE_FLAG) { |
31dbd01f IE |
643 | unsigned char age; |
644 | /* | |
9ba69294 | 645 | * Usually ksmd can and must skip the rb_erase, because |
31dbd01f | 646 | * root_unstable_tree was already reset to RB_ROOT. |
9ba69294 HD |
647 | * But be careful when an mm is exiting: do the rb_erase |
648 | * if this rmap_item was inserted by this scan, rather | |
649 | * than left over from before. | |
31dbd01f IE |
650 | */ |
651 | age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); | |
cd551f97 | 652 | BUG_ON(age > 1); |
31dbd01f | 653 | if (!age) |
90bd6fd3 | 654 | rb_erase(&rmap_item->node, |
ef53d16c | 655 | root_unstable_tree + NUMA(rmap_item->nid)); |
473b0ce4 | 656 | ksm_pages_unshared--; |
93d17715 | 657 | rmap_item->address &= PAGE_MASK; |
31dbd01f | 658 | } |
4035c07a | 659 | out: |
31dbd01f IE |
660 | cond_resched(); /* we're called from many long loops */ |
661 | } | |
662 | ||
31dbd01f | 663 | static void remove_trailing_rmap_items(struct mm_slot *mm_slot, |
6514d511 | 664 | struct rmap_item **rmap_list) |
31dbd01f | 665 | { |
6514d511 HD |
666 | while (*rmap_list) { |
667 | struct rmap_item *rmap_item = *rmap_list; | |
668 | *rmap_list = rmap_item->rmap_list; | |
31dbd01f | 669 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
670 | free_rmap_item(rmap_item); |
671 | } | |
672 | } | |
673 | ||
674 | /* | |
e850dcf5 | 675 | * Though it's very tempting to unmerge rmap_items from stable tree rather |
31dbd01f IE |
676 | * than check every pte of a given vma, the locking doesn't quite work for |
677 | * that - an rmap_item is assigned to the stable tree after inserting ksm | |
678 | * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing | |
679 | * rmap_items from parent to child at fork time (so as not to waste time | |
680 | * if exit comes before the next scan reaches it). | |
81464e30 HD |
681 | * |
682 | * Similarly, although we'd like to remove rmap_items (so updating counts | |
683 | * and freeing memory) when unmerging an area, it's easier to leave that | |
684 | * to the next pass of ksmd - consider, for example, how ksmd might be | |
685 | * in cmp_and_merge_page on one of the rmap_items we would be removing. | |
31dbd01f | 686 | */ |
d952b791 HD |
687 | static int unmerge_ksm_pages(struct vm_area_struct *vma, |
688 | unsigned long start, unsigned long end) | |
31dbd01f IE |
689 | { |
690 | unsigned long addr; | |
d952b791 | 691 | int err = 0; |
31dbd01f | 692 | |
d952b791 | 693 | for (addr = start; addr < end && !err; addr += PAGE_SIZE) { |
9ba69294 HD |
694 | if (ksm_test_exit(vma->vm_mm)) |
695 | break; | |
d952b791 HD |
696 | if (signal_pending(current)) |
697 | err = -ERESTARTSYS; | |
698 | else | |
699 | err = break_ksm(vma, addr); | |
700 | } | |
701 | return err; | |
31dbd01f IE |
702 | } |
703 | ||
2ffd8679 HD |
704 | #ifdef CONFIG_SYSFS |
705 | /* | |
706 | * Only called through the sysfs control interface: | |
707 | */ | |
cbf86cfe HD |
708 | static int remove_stable_node(struct stable_node *stable_node) |
709 | { | |
710 | struct page *page; | |
711 | int err; | |
712 | ||
713 | page = get_ksm_page(stable_node, true); | |
714 | if (!page) { | |
715 | /* | |
716 | * get_ksm_page did remove_node_from_stable_tree itself. | |
717 | */ | |
718 | return 0; | |
719 | } | |
720 | ||
8fdb3dbf HD |
721 | if (WARN_ON_ONCE(page_mapped(page))) { |
722 | /* | |
723 | * This should not happen: but if it does, just refuse to let | |
724 | * merge_across_nodes be switched - there is no need to panic. | |
725 | */ | |
cbf86cfe | 726 | err = -EBUSY; |
8fdb3dbf | 727 | } else { |
cbf86cfe | 728 | /* |
8fdb3dbf HD |
729 | * The stable node did not yet appear stale to get_ksm_page(), |
730 | * since that allows for an unmapped ksm page to be recognized | |
731 | * right up until it is freed; but the node is safe to remove. | |
cbf86cfe HD |
732 | * This page might be in a pagevec waiting to be freed, |
733 | * or it might be PageSwapCache (perhaps under writeback), | |
734 | * or it might have been removed from swapcache a moment ago. | |
735 | */ | |
736 | set_page_stable_node(page, NULL); | |
737 | remove_node_from_stable_tree(stable_node); | |
738 | err = 0; | |
739 | } | |
740 | ||
741 | unlock_page(page); | |
742 | put_page(page); | |
743 | return err; | |
744 | } | |
745 | ||
746 | static int remove_all_stable_nodes(void) | |
747 | { | |
03640418 | 748 | struct stable_node *stable_node, *next; |
cbf86cfe HD |
749 | int nid; |
750 | int err = 0; | |
751 | ||
ef53d16c | 752 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
cbf86cfe HD |
753 | while (root_stable_tree[nid].rb_node) { |
754 | stable_node = rb_entry(root_stable_tree[nid].rb_node, | |
755 | struct stable_node, node); | |
756 | if (remove_stable_node(stable_node)) { | |
757 | err = -EBUSY; | |
758 | break; /* proceed to next nid */ | |
759 | } | |
760 | cond_resched(); | |
761 | } | |
762 | } | |
03640418 | 763 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
764 | if (remove_stable_node(stable_node)) |
765 | err = -EBUSY; | |
766 | cond_resched(); | |
767 | } | |
cbf86cfe HD |
768 | return err; |
769 | } | |
770 | ||
d952b791 | 771 | static int unmerge_and_remove_all_rmap_items(void) |
31dbd01f IE |
772 | { |
773 | struct mm_slot *mm_slot; | |
774 | struct mm_struct *mm; | |
775 | struct vm_area_struct *vma; | |
d952b791 HD |
776 | int err = 0; |
777 | ||
778 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 779 | ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, |
d952b791 HD |
780 | struct mm_slot, mm_list); |
781 | spin_unlock(&ksm_mmlist_lock); | |
31dbd01f | 782 | |
9ba69294 HD |
783 | for (mm_slot = ksm_scan.mm_slot; |
784 | mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { | |
31dbd01f IE |
785 | mm = mm_slot->mm; |
786 | down_read(&mm->mmap_sem); | |
787 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
9ba69294 HD |
788 | if (ksm_test_exit(mm)) |
789 | break; | |
31dbd01f IE |
790 | if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) |
791 | continue; | |
d952b791 HD |
792 | err = unmerge_ksm_pages(vma, |
793 | vma->vm_start, vma->vm_end); | |
9ba69294 HD |
794 | if (err) |
795 | goto error; | |
31dbd01f | 796 | } |
9ba69294 | 797 | |
6514d511 | 798 | remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); |
7496fea9 | 799 | up_read(&mm->mmap_sem); |
d952b791 HD |
800 | |
801 | spin_lock(&ksm_mmlist_lock); | |
9ba69294 | 802 | ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, |
d952b791 | 803 | struct mm_slot, mm_list); |
9ba69294 | 804 | if (ksm_test_exit(mm)) { |
4ca3a69b | 805 | hash_del(&mm_slot->link); |
9ba69294 HD |
806 | list_del(&mm_slot->mm_list); |
807 | spin_unlock(&ksm_mmlist_lock); | |
808 | ||
809 | free_mm_slot(mm_slot); | |
810 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
9ba69294 | 811 | mmdrop(mm); |
7496fea9 | 812 | } else |
9ba69294 | 813 | spin_unlock(&ksm_mmlist_lock); |
31dbd01f IE |
814 | } |
815 | ||
cbf86cfe HD |
816 | /* Clean up stable nodes, but don't worry if some are still busy */ |
817 | remove_all_stable_nodes(); | |
d952b791 | 818 | ksm_scan.seqnr = 0; |
9ba69294 HD |
819 | return 0; |
820 | ||
821 | error: | |
822 | up_read(&mm->mmap_sem); | |
31dbd01f | 823 | spin_lock(&ksm_mmlist_lock); |
d952b791 | 824 | ksm_scan.mm_slot = &ksm_mm_head; |
31dbd01f | 825 | spin_unlock(&ksm_mmlist_lock); |
d952b791 | 826 | return err; |
31dbd01f | 827 | } |
2ffd8679 | 828 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 829 | |
31dbd01f IE |
830 | static u32 calc_checksum(struct page *page) |
831 | { | |
832 | u32 checksum; | |
9b04c5fe | 833 | void *addr = kmap_atomic(page); |
31dbd01f | 834 | checksum = jhash2(addr, PAGE_SIZE / 4, 17); |
9b04c5fe | 835 | kunmap_atomic(addr); |
31dbd01f IE |
836 | return checksum; |
837 | } | |
838 | ||
839 | static int memcmp_pages(struct page *page1, struct page *page2) | |
840 | { | |
841 | char *addr1, *addr2; | |
842 | int ret; | |
843 | ||
9b04c5fe CW |
844 | addr1 = kmap_atomic(page1); |
845 | addr2 = kmap_atomic(page2); | |
31dbd01f | 846 | ret = memcmp(addr1, addr2, PAGE_SIZE); |
9b04c5fe CW |
847 | kunmap_atomic(addr2); |
848 | kunmap_atomic(addr1); | |
31dbd01f IE |
849 | return ret; |
850 | } | |
851 | ||
852 | static inline int pages_identical(struct page *page1, struct page *page2) | |
853 | { | |
854 | return !memcmp_pages(page1, page2); | |
855 | } | |
856 | ||
857 | static int write_protect_page(struct vm_area_struct *vma, struct page *page, | |
858 | pte_t *orig_pte) | |
859 | { | |
860 | struct mm_struct *mm = vma->vm_mm; | |
36eaff33 KS |
861 | struct page_vma_mapped_walk pvmw = { |
862 | .page = page, | |
863 | .vma = vma, | |
864 | }; | |
31dbd01f IE |
865 | int swapped; |
866 | int err = -EFAULT; | |
6bdb913f HE |
867 | unsigned long mmun_start; /* For mmu_notifiers */ |
868 | unsigned long mmun_end; /* For mmu_notifiers */ | |
31dbd01f | 869 | |
36eaff33 KS |
870 | pvmw.address = page_address_in_vma(page, vma); |
871 | if (pvmw.address == -EFAULT) | |
31dbd01f IE |
872 | goto out; |
873 | ||
29ad768c | 874 | BUG_ON(PageTransCompound(page)); |
6bdb913f | 875 | |
36eaff33 KS |
876 | mmun_start = pvmw.address; |
877 | mmun_end = pvmw.address + PAGE_SIZE; | |
6bdb913f HE |
878 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
879 | ||
36eaff33 | 880 | if (!page_vma_mapped_walk(&pvmw)) |
6bdb913f | 881 | goto out_mn; |
36eaff33 KS |
882 | if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) |
883 | goto out_unlock; | |
31dbd01f | 884 | |
595cd8f2 AK |
885 | if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || |
886 | (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte))) { | |
31dbd01f IE |
887 | pte_t entry; |
888 | ||
889 | swapped = PageSwapCache(page); | |
36eaff33 | 890 | flush_cache_page(vma, pvmw.address, page_to_pfn(page)); |
31dbd01f | 891 | /* |
25985edc | 892 | * Ok this is tricky, when get_user_pages_fast() run it doesn't |
31dbd01f IE |
893 | * take any lock, therefore the check that we are going to make |
894 | * with the pagecount against the mapcount is racey and | |
895 | * O_DIRECT can happen right after the check. | |
896 | * So we clear the pte and flush the tlb before the check | |
897 | * this assure us that no O_DIRECT can happen after the check | |
898 | * or in the middle of the check. | |
899 | */ | |
36eaff33 | 900 | entry = ptep_clear_flush_notify(vma, pvmw.address, pvmw.pte); |
31dbd01f IE |
901 | /* |
902 | * Check that no O_DIRECT or similar I/O is in progress on the | |
903 | * page | |
904 | */ | |
31e855ea | 905 | if (page_mapcount(page) + 1 + swapped != page_count(page)) { |
36eaff33 | 906 | set_pte_at(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f IE |
907 | goto out_unlock; |
908 | } | |
4e31635c HD |
909 | if (pte_dirty(entry)) |
910 | set_page_dirty(page); | |
595cd8f2 AK |
911 | |
912 | if (pte_protnone(entry)) | |
913 | entry = pte_mkclean(pte_clear_savedwrite(entry)); | |
914 | else | |
915 | entry = pte_mkclean(pte_wrprotect(entry)); | |
36eaff33 | 916 | set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); |
31dbd01f | 917 | } |
36eaff33 | 918 | *orig_pte = *pvmw.pte; |
31dbd01f IE |
919 | err = 0; |
920 | ||
921 | out_unlock: | |
36eaff33 | 922 | page_vma_mapped_walk_done(&pvmw); |
6bdb913f HE |
923 | out_mn: |
924 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | |
31dbd01f IE |
925 | out: |
926 | return err; | |
927 | } | |
928 | ||
929 | /** | |
930 | * replace_page - replace page in vma by new ksm page | |
8dd3557a HD |
931 | * @vma: vma that holds the pte pointing to page |
932 | * @page: the page we are replacing by kpage | |
933 | * @kpage: the ksm page we replace page by | |
31dbd01f IE |
934 | * @orig_pte: the original value of the pte |
935 | * | |
936 | * Returns 0 on success, -EFAULT on failure. | |
937 | */ | |
8dd3557a HD |
938 | static int replace_page(struct vm_area_struct *vma, struct page *page, |
939 | struct page *kpage, pte_t orig_pte) | |
31dbd01f IE |
940 | { |
941 | struct mm_struct *mm = vma->vm_mm; | |
31dbd01f IE |
942 | pmd_t *pmd; |
943 | pte_t *ptep; | |
e86c59b1 | 944 | pte_t newpte; |
31dbd01f IE |
945 | spinlock_t *ptl; |
946 | unsigned long addr; | |
31dbd01f | 947 | int err = -EFAULT; |
6bdb913f HE |
948 | unsigned long mmun_start; /* For mmu_notifiers */ |
949 | unsigned long mmun_end; /* For mmu_notifiers */ | |
31dbd01f | 950 | |
8dd3557a | 951 | addr = page_address_in_vma(page, vma); |
31dbd01f IE |
952 | if (addr == -EFAULT) |
953 | goto out; | |
954 | ||
6219049a BL |
955 | pmd = mm_find_pmd(mm, addr); |
956 | if (!pmd) | |
31dbd01f | 957 | goto out; |
31dbd01f | 958 | |
6bdb913f HE |
959 | mmun_start = addr; |
960 | mmun_end = addr + PAGE_SIZE; | |
961 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | |
962 | ||
31dbd01f IE |
963 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); |
964 | if (!pte_same(*ptep, orig_pte)) { | |
965 | pte_unmap_unlock(ptep, ptl); | |
6bdb913f | 966 | goto out_mn; |
31dbd01f IE |
967 | } |
968 | ||
e86c59b1 CI |
969 | /* |
970 | * No need to check ksm_use_zero_pages here: we can only have a | |
971 | * zero_page here if ksm_use_zero_pages was enabled alreaady. | |
972 | */ | |
973 | if (!is_zero_pfn(page_to_pfn(kpage))) { | |
974 | get_page(kpage); | |
975 | page_add_anon_rmap(kpage, vma, addr, false); | |
976 | newpte = mk_pte(kpage, vma->vm_page_prot); | |
977 | } else { | |
978 | newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage), | |
979 | vma->vm_page_prot)); | |
980 | } | |
31dbd01f IE |
981 | |
982 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
34ee645e | 983 | ptep_clear_flush_notify(vma, addr, ptep); |
e86c59b1 | 984 | set_pte_at_notify(mm, addr, ptep, newpte); |
31dbd01f | 985 | |
d281ee61 | 986 | page_remove_rmap(page, false); |
ae52a2ad HD |
987 | if (!page_mapped(page)) |
988 | try_to_free_swap(page); | |
8dd3557a | 989 | put_page(page); |
31dbd01f IE |
990 | |
991 | pte_unmap_unlock(ptep, ptl); | |
992 | err = 0; | |
6bdb913f HE |
993 | out_mn: |
994 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | |
31dbd01f IE |
995 | out: |
996 | return err; | |
997 | } | |
998 | ||
999 | /* | |
1000 | * try_to_merge_one_page - take two pages and merge them into one | |
8dd3557a HD |
1001 | * @vma: the vma that holds the pte pointing to page |
1002 | * @page: the PageAnon page that we want to replace with kpage | |
80e14822 HD |
1003 | * @kpage: the PageKsm page that we want to map instead of page, |
1004 | * or NULL the first time when we want to use page as kpage. | |
31dbd01f IE |
1005 | * |
1006 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
1007 | */ | |
1008 | static int try_to_merge_one_page(struct vm_area_struct *vma, | |
8dd3557a | 1009 | struct page *page, struct page *kpage) |
31dbd01f IE |
1010 | { |
1011 | pte_t orig_pte = __pte(0); | |
1012 | int err = -EFAULT; | |
1013 | ||
db114b83 HD |
1014 | if (page == kpage) /* ksm page forked */ |
1015 | return 0; | |
1016 | ||
8dd3557a | 1017 | if (!PageAnon(page)) |
31dbd01f IE |
1018 | goto out; |
1019 | ||
31dbd01f IE |
1020 | /* |
1021 | * We need the page lock to read a stable PageSwapCache in | |
1022 | * write_protect_page(). We use trylock_page() instead of | |
1023 | * lock_page() because we don't want to wait here - we | |
1024 | * prefer to continue scanning and merging different pages, | |
1025 | * then come back to this page when it is unlocked. | |
1026 | */ | |
8dd3557a | 1027 | if (!trylock_page(page)) |
31e855ea | 1028 | goto out; |
f765f540 KS |
1029 | |
1030 | if (PageTransCompound(page)) { | |
a7306c34 | 1031 | if (split_huge_page(page)) |
f765f540 KS |
1032 | goto out_unlock; |
1033 | } | |
1034 | ||
31dbd01f IE |
1035 | /* |
1036 | * If this anonymous page is mapped only here, its pte may need | |
1037 | * to be write-protected. If it's mapped elsewhere, all of its | |
1038 | * ptes are necessarily already write-protected. But in either | |
1039 | * case, we need to lock and check page_count is not raised. | |
1040 | */ | |
80e14822 HD |
1041 | if (write_protect_page(vma, page, &orig_pte) == 0) { |
1042 | if (!kpage) { | |
1043 | /* | |
1044 | * While we hold page lock, upgrade page from | |
1045 | * PageAnon+anon_vma to PageKsm+NULL stable_node: | |
1046 | * stable_tree_insert() will update stable_node. | |
1047 | */ | |
1048 | set_page_stable_node(page, NULL); | |
1049 | mark_page_accessed(page); | |
337ed7eb MK |
1050 | /* |
1051 | * Page reclaim just frees a clean page with no dirty | |
1052 | * ptes: make sure that the ksm page would be swapped. | |
1053 | */ | |
1054 | if (!PageDirty(page)) | |
1055 | SetPageDirty(page); | |
80e14822 HD |
1056 | err = 0; |
1057 | } else if (pages_identical(page, kpage)) | |
1058 | err = replace_page(vma, page, kpage, orig_pte); | |
1059 | } | |
31dbd01f | 1060 | |
80e14822 | 1061 | if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { |
73848b46 | 1062 | munlock_vma_page(page); |
5ad64688 HD |
1063 | if (!PageMlocked(kpage)) { |
1064 | unlock_page(page); | |
5ad64688 HD |
1065 | lock_page(kpage); |
1066 | mlock_vma_page(kpage); | |
1067 | page = kpage; /* for final unlock */ | |
1068 | } | |
1069 | } | |
73848b46 | 1070 | |
f765f540 | 1071 | out_unlock: |
8dd3557a | 1072 | unlock_page(page); |
31dbd01f IE |
1073 | out: |
1074 | return err; | |
1075 | } | |
1076 | ||
81464e30 HD |
1077 | /* |
1078 | * try_to_merge_with_ksm_page - like try_to_merge_two_pages, | |
1079 | * but no new kernel page is allocated: kpage must already be a ksm page. | |
8dd3557a HD |
1080 | * |
1081 | * This function returns 0 if the pages were merged, -EFAULT otherwise. | |
81464e30 | 1082 | */ |
8dd3557a HD |
1083 | static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, |
1084 | struct page *page, struct page *kpage) | |
81464e30 | 1085 | { |
8dd3557a | 1086 | struct mm_struct *mm = rmap_item->mm; |
81464e30 HD |
1087 | struct vm_area_struct *vma; |
1088 | int err = -EFAULT; | |
1089 | ||
8dd3557a | 1090 | down_read(&mm->mmap_sem); |
85c6e8dd AA |
1091 | vma = find_mergeable_vma(mm, rmap_item->address); |
1092 | if (!vma) | |
81464e30 HD |
1093 | goto out; |
1094 | ||
8dd3557a | 1095 | err = try_to_merge_one_page(vma, page, kpage); |
db114b83 HD |
1096 | if (err) |
1097 | goto out; | |
1098 | ||
bc56620b HD |
1099 | /* Unstable nid is in union with stable anon_vma: remove first */ |
1100 | remove_rmap_item_from_tree(rmap_item); | |
1101 | ||
db114b83 | 1102 | /* Must get reference to anon_vma while still holding mmap_sem */ |
9e60109f PZ |
1103 | rmap_item->anon_vma = vma->anon_vma; |
1104 | get_anon_vma(vma->anon_vma); | |
81464e30 | 1105 | out: |
8dd3557a | 1106 | up_read(&mm->mmap_sem); |
81464e30 HD |
1107 | return err; |
1108 | } | |
1109 | ||
31dbd01f IE |
1110 | /* |
1111 | * try_to_merge_two_pages - take two identical pages and prepare them | |
1112 | * to be merged into one page. | |
1113 | * | |
8dd3557a HD |
1114 | * This function returns the kpage if we successfully merged two identical |
1115 | * pages into one ksm page, NULL otherwise. | |
31dbd01f | 1116 | * |
80e14822 | 1117 | * Note that this function upgrades page to ksm page: if one of the pages |
31dbd01f IE |
1118 | * is already a ksm page, try_to_merge_with_ksm_page should be used. |
1119 | */ | |
8dd3557a HD |
1120 | static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, |
1121 | struct page *page, | |
1122 | struct rmap_item *tree_rmap_item, | |
1123 | struct page *tree_page) | |
31dbd01f | 1124 | { |
80e14822 | 1125 | int err; |
31dbd01f | 1126 | |
80e14822 | 1127 | err = try_to_merge_with_ksm_page(rmap_item, page, NULL); |
31dbd01f | 1128 | if (!err) { |
8dd3557a | 1129 | err = try_to_merge_with_ksm_page(tree_rmap_item, |
80e14822 | 1130 | tree_page, page); |
31dbd01f | 1131 | /* |
81464e30 HD |
1132 | * If that fails, we have a ksm page with only one pte |
1133 | * pointing to it: so break it. | |
31dbd01f | 1134 | */ |
4035c07a | 1135 | if (err) |
8dd3557a | 1136 | break_cow(rmap_item); |
31dbd01f | 1137 | } |
80e14822 | 1138 | return err ? NULL : page; |
31dbd01f IE |
1139 | } |
1140 | ||
31dbd01f | 1141 | /* |
8dd3557a | 1142 | * stable_tree_search - search for page inside the stable tree |
31dbd01f IE |
1143 | * |
1144 | * This function checks if there is a page inside the stable tree | |
1145 | * with identical content to the page that we are scanning right now. | |
1146 | * | |
7b6ba2c7 | 1147 | * This function returns the stable tree node of identical content if found, |
31dbd01f IE |
1148 | * NULL otherwise. |
1149 | */ | |
62b61f61 | 1150 | static struct page *stable_tree_search(struct page *page) |
31dbd01f | 1151 | { |
90bd6fd3 | 1152 | int nid; |
ef53d16c | 1153 | struct rb_root *root; |
4146d2d6 HD |
1154 | struct rb_node **new; |
1155 | struct rb_node *parent; | |
1156 | struct stable_node *stable_node; | |
1157 | struct stable_node *page_node; | |
31dbd01f | 1158 | |
4146d2d6 HD |
1159 | page_node = page_stable_node(page); |
1160 | if (page_node && page_node->head != &migrate_nodes) { | |
1161 | /* ksm page forked */ | |
08beca44 | 1162 | get_page(page); |
62b61f61 | 1163 | return page; |
08beca44 HD |
1164 | } |
1165 | ||
90bd6fd3 | 1166 | nid = get_kpfn_nid(page_to_pfn(page)); |
ef53d16c | 1167 | root = root_stable_tree + nid; |
4146d2d6 | 1168 | again: |
ef53d16c | 1169 | new = &root->rb_node; |
4146d2d6 | 1170 | parent = NULL; |
90bd6fd3 | 1171 | |
4146d2d6 | 1172 | while (*new) { |
4035c07a | 1173 | struct page *tree_page; |
31dbd01f IE |
1174 | int ret; |
1175 | ||
08beca44 | 1176 | cond_resched(); |
4146d2d6 | 1177 | stable_node = rb_entry(*new, struct stable_node, node); |
8aafa6a4 | 1178 | tree_page = get_ksm_page(stable_node, false); |
f2e5ff85 AA |
1179 | if (!tree_page) { |
1180 | /* | |
1181 | * If we walked over a stale stable_node, | |
1182 | * get_ksm_page() will call rb_erase() and it | |
1183 | * may rebalance the tree from under us. So | |
1184 | * restart the search from scratch. Returning | |
1185 | * NULL would be safe too, but we'd generate | |
1186 | * false negative insertions just because some | |
1187 | * stable_node was stale. | |
1188 | */ | |
1189 | goto again; | |
1190 | } | |
31dbd01f | 1191 | |
4035c07a | 1192 | ret = memcmp_pages(page, tree_page); |
c8d6553b | 1193 | put_page(tree_page); |
31dbd01f | 1194 | |
4146d2d6 | 1195 | parent = *new; |
c8d6553b | 1196 | if (ret < 0) |
4146d2d6 | 1197 | new = &parent->rb_left; |
c8d6553b | 1198 | else if (ret > 0) |
4146d2d6 | 1199 | new = &parent->rb_right; |
c8d6553b HD |
1200 | else { |
1201 | /* | |
1202 | * Lock and unlock the stable_node's page (which | |
1203 | * might already have been migrated) so that page | |
1204 | * migration is sure to notice its raised count. | |
1205 | * It would be more elegant to return stable_node | |
1206 | * than kpage, but that involves more changes. | |
1207 | */ | |
1208 | tree_page = get_ksm_page(stable_node, true); | |
4146d2d6 | 1209 | if (tree_page) { |
c8d6553b | 1210 | unlock_page(tree_page); |
4146d2d6 HD |
1211 | if (get_kpfn_nid(stable_node->kpfn) != |
1212 | NUMA(stable_node->nid)) { | |
1213 | put_page(tree_page); | |
1214 | goto replace; | |
1215 | } | |
1216 | return tree_page; | |
1217 | } | |
1218 | /* | |
1219 | * There is now a place for page_node, but the tree may | |
1220 | * have been rebalanced, so re-evaluate parent and new. | |
1221 | */ | |
1222 | if (page_node) | |
1223 | goto again; | |
1224 | return NULL; | |
c8d6553b | 1225 | } |
31dbd01f IE |
1226 | } |
1227 | ||
4146d2d6 HD |
1228 | if (!page_node) |
1229 | return NULL; | |
1230 | ||
1231 | list_del(&page_node->list); | |
1232 | DO_NUMA(page_node->nid = nid); | |
1233 | rb_link_node(&page_node->node, parent, new); | |
ef53d16c | 1234 | rb_insert_color(&page_node->node, root); |
4146d2d6 HD |
1235 | get_page(page); |
1236 | return page; | |
1237 | ||
1238 | replace: | |
1239 | if (page_node) { | |
1240 | list_del(&page_node->list); | |
1241 | DO_NUMA(page_node->nid = nid); | |
ef53d16c | 1242 | rb_replace_node(&stable_node->node, &page_node->node, root); |
4146d2d6 HD |
1243 | get_page(page); |
1244 | } else { | |
ef53d16c | 1245 | rb_erase(&stable_node->node, root); |
4146d2d6 HD |
1246 | page = NULL; |
1247 | } | |
1248 | stable_node->head = &migrate_nodes; | |
1249 | list_add(&stable_node->list, stable_node->head); | |
1250 | return page; | |
31dbd01f IE |
1251 | } |
1252 | ||
1253 | /* | |
e850dcf5 | 1254 | * stable_tree_insert - insert stable tree node pointing to new ksm page |
31dbd01f IE |
1255 | * into the stable tree. |
1256 | * | |
7b6ba2c7 HD |
1257 | * This function returns the stable tree node just allocated on success, |
1258 | * NULL otherwise. | |
31dbd01f | 1259 | */ |
7b6ba2c7 | 1260 | static struct stable_node *stable_tree_insert(struct page *kpage) |
31dbd01f | 1261 | { |
90bd6fd3 PH |
1262 | int nid; |
1263 | unsigned long kpfn; | |
ef53d16c | 1264 | struct rb_root *root; |
90bd6fd3 | 1265 | struct rb_node **new; |
f2e5ff85 | 1266 | struct rb_node *parent; |
7b6ba2c7 | 1267 | struct stable_node *stable_node; |
31dbd01f | 1268 | |
90bd6fd3 PH |
1269 | kpfn = page_to_pfn(kpage); |
1270 | nid = get_kpfn_nid(kpfn); | |
ef53d16c | 1271 | root = root_stable_tree + nid; |
f2e5ff85 AA |
1272 | again: |
1273 | parent = NULL; | |
ef53d16c | 1274 | new = &root->rb_node; |
90bd6fd3 | 1275 | |
31dbd01f | 1276 | while (*new) { |
4035c07a | 1277 | struct page *tree_page; |
31dbd01f IE |
1278 | int ret; |
1279 | ||
08beca44 | 1280 | cond_resched(); |
7b6ba2c7 | 1281 | stable_node = rb_entry(*new, struct stable_node, node); |
8aafa6a4 | 1282 | tree_page = get_ksm_page(stable_node, false); |
f2e5ff85 AA |
1283 | if (!tree_page) { |
1284 | /* | |
1285 | * If we walked over a stale stable_node, | |
1286 | * get_ksm_page() will call rb_erase() and it | |
1287 | * may rebalance the tree from under us. So | |
1288 | * restart the search from scratch. Returning | |
1289 | * NULL would be safe too, but we'd generate | |
1290 | * false negative insertions just because some | |
1291 | * stable_node was stale. | |
1292 | */ | |
1293 | goto again; | |
1294 | } | |
31dbd01f | 1295 | |
4035c07a HD |
1296 | ret = memcmp_pages(kpage, tree_page); |
1297 | put_page(tree_page); | |
31dbd01f IE |
1298 | |
1299 | parent = *new; | |
1300 | if (ret < 0) | |
1301 | new = &parent->rb_left; | |
1302 | else if (ret > 0) | |
1303 | new = &parent->rb_right; | |
1304 | else { | |
1305 | /* | |
1306 | * It is not a bug that stable_tree_search() didn't | |
1307 | * find this node: because at that time our page was | |
1308 | * not yet write-protected, so may have changed since. | |
1309 | */ | |
1310 | return NULL; | |
1311 | } | |
1312 | } | |
1313 | ||
7b6ba2c7 HD |
1314 | stable_node = alloc_stable_node(); |
1315 | if (!stable_node) | |
1316 | return NULL; | |
31dbd01f | 1317 | |
7b6ba2c7 | 1318 | INIT_HLIST_HEAD(&stable_node->hlist); |
90bd6fd3 | 1319 | stable_node->kpfn = kpfn; |
08beca44 | 1320 | set_page_stable_node(kpage, stable_node); |
4146d2d6 | 1321 | DO_NUMA(stable_node->nid = nid); |
e850dcf5 | 1322 | rb_link_node(&stable_node->node, parent, new); |
ef53d16c | 1323 | rb_insert_color(&stable_node->node, root); |
08beca44 | 1324 | |
7b6ba2c7 | 1325 | return stable_node; |
31dbd01f IE |
1326 | } |
1327 | ||
1328 | /* | |
8dd3557a HD |
1329 | * unstable_tree_search_insert - search for identical page, |
1330 | * else insert rmap_item into the unstable tree. | |
31dbd01f IE |
1331 | * |
1332 | * This function searches for a page in the unstable tree identical to the | |
1333 | * page currently being scanned; and if no identical page is found in the | |
1334 | * tree, we insert rmap_item as a new object into the unstable tree. | |
1335 | * | |
1336 | * This function returns pointer to rmap_item found to be identical | |
1337 | * to the currently scanned page, NULL otherwise. | |
1338 | * | |
1339 | * This function does both searching and inserting, because they share | |
1340 | * the same walking algorithm in an rbtree. | |
1341 | */ | |
8dd3557a HD |
1342 | static |
1343 | struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |
1344 | struct page *page, | |
1345 | struct page **tree_pagep) | |
31dbd01f | 1346 | { |
90bd6fd3 PH |
1347 | struct rb_node **new; |
1348 | struct rb_root *root; | |
31dbd01f | 1349 | struct rb_node *parent = NULL; |
90bd6fd3 PH |
1350 | int nid; |
1351 | ||
1352 | nid = get_kpfn_nid(page_to_pfn(page)); | |
ef53d16c | 1353 | root = root_unstable_tree + nid; |
90bd6fd3 | 1354 | new = &root->rb_node; |
31dbd01f IE |
1355 | |
1356 | while (*new) { | |
1357 | struct rmap_item *tree_rmap_item; | |
8dd3557a | 1358 | struct page *tree_page; |
31dbd01f IE |
1359 | int ret; |
1360 | ||
d178f27f | 1361 | cond_resched(); |
31dbd01f | 1362 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
8dd3557a | 1363 | tree_page = get_mergeable_page(tree_rmap_item); |
c8f95ed1 | 1364 | if (!tree_page) |
31dbd01f IE |
1365 | return NULL; |
1366 | ||
1367 | /* | |
8dd3557a | 1368 | * Don't substitute a ksm page for a forked page. |
31dbd01f | 1369 | */ |
8dd3557a HD |
1370 | if (page == tree_page) { |
1371 | put_page(tree_page); | |
31dbd01f IE |
1372 | return NULL; |
1373 | } | |
1374 | ||
8dd3557a | 1375 | ret = memcmp_pages(page, tree_page); |
31dbd01f IE |
1376 | |
1377 | parent = *new; | |
1378 | if (ret < 0) { | |
8dd3557a | 1379 | put_page(tree_page); |
31dbd01f IE |
1380 | new = &parent->rb_left; |
1381 | } else if (ret > 0) { | |
8dd3557a | 1382 | put_page(tree_page); |
31dbd01f | 1383 | new = &parent->rb_right; |
b599cbdf HD |
1384 | } else if (!ksm_merge_across_nodes && |
1385 | page_to_nid(tree_page) != nid) { | |
1386 | /* | |
1387 | * If tree_page has been migrated to another NUMA node, | |
1388 | * it will be flushed out and put in the right unstable | |
1389 | * tree next time: only merge with it when across_nodes. | |
1390 | */ | |
1391 | put_page(tree_page); | |
1392 | return NULL; | |
31dbd01f | 1393 | } else { |
8dd3557a | 1394 | *tree_pagep = tree_page; |
31dbd01f IE |
1395 | return tree_rmap_item; |
1396 | } | |
1397 | } | |
1398 | ||
7b6ba2c7 | 1399 | rmap_item->address |= UNSTABLE_FLAG; |
31dbd01f | 1400 | rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); |
e850dcf5 | 1401 | DO_NUMA(rmap_item->nid = nid); |
31dbd01f | 1402 | rb_link_node(&rmap_item->node, parent, new); |
90bd6fd3 | 1403 | rb_insert_color(&rmap_item->node, root); |
31dbd01f | 1404 | |
473b0ce4 | 1405 | ksm_pages_unshared++; |
31dbd01f IE |
1406 | return NULL; |
1407 | } | |
1408 | ||
1409 | /* | |
1410 | * stable_tree_append - add another rmap_item to the linked list of | |
1411 | * rmap_items hanging off a given node of the stable tree, all sharing | |
1412 | * the same ksm page. | |
1413 | */ | |
1414 | static void stable_tree_append(struct rmap_item *rmap_item, | |
7b6ba2c7 | 1415 | struct stable_node *stable_node) |
31dbd01f | 1416 | { |
7b6ba2c7 | 1417 | rmap_item->head = stable_node; |
31dbd01f | 1418 | rmap_item->address |= STABLE_FLAG; |
7b6ba2c7 | 1419 | hlist_add_head(&rmap_item->hlist, &stable_node->hlist); |
e178dfde | 1420 | |
7b6ba2c7 HD |
1421 | if (rmap_item->hlist.next) |
1422 | ksm_pages_sharing++; | |
1423 | else | |
1424 | ksm_pages_shared++; | |
31dbd01f IE |
1425 | } |
1426 | ||
1427 | /* | |
81464e30 HD |
1428 | * cmp_and_merge_page - first see if page can be merged into the stable tree; |
1429 | * if not, compare checksum to previous and if it's the same, see if page can | |
1430 | * be inserted into the unstable tree, or merged with a page already there and | |
1431 | * both transferred to the stable tree. | |
31dbd01f IE |
1432 | * |
1433 | * @page: the page that we are searching identical page to. | |
1434 | * @rmap_item: the reverse mapping into the virtual address of this page | |
1435 | */ | |
1436 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |
1437 | { | |
31dbd01f | 1438 | struct rmap_item *tree_rmap_item; |
8dd3557a | 1439 | struct page *tree_page = NULL; |
7b6ba2c7 | 1440 | struct stable_node *stable_node; |
8dd3557a | 1441 | struct page *kpage; |
31dbd01f IE |
1442 | unsigned int checksum; |
1443 | int err; | |
1444 | ||
4146d2d6 HD |
1445 | stable_node = page_stable_node(page); |
1446 | if (stable_node) { | |
1447 | if (stable_node->head != &migrate_nodes && | |
1448 | get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) { | |
1449 | rb_erase(&stable_node->node, | |
ef53d16c | 1450 | root_stable_tree + NUMA(stable_node->nid)); |
4146d2d6 HD |
1451 | stable_node->head = &migrate_nodes; |
1452 | list_add(&stable_node->list, stable_node->head); | |
1453 | } | |
1454 | if (stable_node->head != &migrate_nodes && | |
1455 | rmap_item->head == stable_node) | |
1456 | return; | |
1457 | } | |
31dbd01f IE |
1458 | |
1459 | /* We first start with searching the page inside the stable tree */ | |
62b61f61 | 1460 | kpage = stable_tree_search(page); |
4146d2d6 HD |
1461 | if (kpage == page && rmap_item->head == stable_node) { |
1462 | put_page(kpage); | |
1463 | return; | |
1464 | } | |
1465 | ||
1466 | remove_rmap_item_from_tree(rmap_item); | |
1467 | ||
62b61f61 | 1468 | if (kpage) { |
08beca44 | 1469 | err = try_to_merge_with_ksm_page(rmap_item, page, kpage); |
31dbd01f IE |
1470 | if (!err) { |
1471 | /* | |
1472 | * The page was successfully merged: | |
1473 | * add its rmap_item to the stable tree. | |
1474 | */ | |
5ad64688 | 1475 | lock_page(kpage); |
62b61f61 | 1476 | stable_tree_append(rmap_item, page_stable_node(kpage)); |
5ad64688 | 1477 | unlock_page(kpage); |
31dbd01f | 1478 | } |
8dd3557a | 1479 | put_page(kpage); |
31dbd01f IE |
1480 | return; |
1481 | } | |
1482 | ||
1483 | /* | |
4035c07a HD |
1484 | * If the hash value of the page has changed from the last time |
1485 | * we calculated it, this page is changing frequently: therefore we | |
1486 | * don't want to insert it in the unstable tree, and we don't want | |
1487 | * to waste our time searching for something identical to it there. | |
31dbd01f IE |
1488 | */ |
1489 | checksum = calc_checksum(page); | |
1490 | if (rmap_item->oldchecksum != checksum) { | |
1491 | rmap_item->oldchecksum = checksum; | |
1492 | return; | |
1493 | } | |
1494 | ||
e86c59b1 CI |
1495 | /* |
1496 | * Same checksum as an empty page. We attempt to merge it with the | |
1497 | * appropriate zero page if the user enabled this via sysfs. | |
1498 | */ | |
1499 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | |
1500 | struct vm_area_struct *vma; | |
1501 | ||
1502 | vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); | |
1503 | err = try_to_merge_one_page(vma, page, | |
1504 | ZERO_PAGE(rmap_item->address)); | |
1505 | /* | |
1506 | * In case of failure, the page was not really empty, so we | |
1507 | * need to continue. Otherwise we're done. | |
1508 | */ | |
1509 | if (!err) | |
1510 | return; | |
1511 | } | |
8dd3557a HD |
1512 | tree_rmap_item = |
1513 | unstable_tree_search_insert(rmap_item, page, &tree_page); | |
31dbd01f | 1514 | if (tree_rmap_item) { |
8dd3557a HD |
1515 | kpage = try_to_merge_two_pages(rmap_item, page, |
1516 | tree_rmap_item, tree_page); | |
1517 | put_page(tree_page); | |
8dd3557a | 1518 | if (kpage) { |
bc56620b HD |
1519 | /* |
1520 | * The pages were successfully merged: insert new | |
1521 | * node in the stable tree and add both rmap_items. | |
1522 | */ | |
5ad64688 | 1523 | lock_page(kpage); |
7b6ba2c7 HD |
1524 | stable_node = stable_tree_insert(kpage); |
1525 | if (stable_node) { | |
1526 | stable_tree_append(tree_rmap_item, stable_node); | |
1527 | stable_tree_append(rmap_item, stable_node); | |
1528 | } | |
5ad64688 | 1529 | unlock_page(kpage); |
7b6ba2c7 | 1530 | |
31dbd01f IE |
1531 | /* |
1532 | * If we fail to insert the page into the stable tree, | |
1533 | * we will have 2 virtual addresses that are pointing | |
1534 | * to a ksm page left outside the stable tree, | |
1535 | * in which case we need to break_cow on both. | |
1536 | */ | |
7b6ba2c7 | 1537 | if (!stable_node) { |
8dd3557a HD |
1538 | break_cow(tree_rmap_item); |
1539 | break_cow(rmap_item); | |
31dbd01f IE |
1540 | } |
1541 | } | |
31dbd01f IE |
1542 | } |
1543 | } | |
1544 | ||
1545 | static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, | |
6514d511 | 1546 | struct rmap_item **rmap_list, |
31dbd01f IE |
1547 | unsigned long addr) |
1548 | { | |
1549 | struct rmap_item *rmap_item; | |
1550 | ||
6514d511 HD |
1551 | while (*rmap_list) { |
1552 | rmap_item = *rmap_list; | |
93d17715 | 1553 | if ((rmap_item->address & PAGE_MASK) == addr) |
31dbd01f | 1554 | return rmap_item; |
31dbd01f IE |
1555 | if (rmap_item->address > addr) |
1556 | break; | |
6514d511 | 1557 | *rmap_list = rmap_item->rmap_list; |
31dbd01f | 1558 | remove_rmap_item_from_tree(rmap_item); |
31dbd01f IE |
1559 | free_rmap_item(rmap_item); |
1560 | } | |
1561 | ||
1562 | rmap_item = alloc_rmap_item(); | |
1563 | if (rmap_item) { | |
1564 | /* It has already been zeroed */ | |
1565 | rmap_item->mm = mm_slot->mm; | |
1566 | rmap_item->address = addr; | |
6514d511 HD |
1567 | rmap_item->rmap_list = *rmap_list; |
1568 | *rmap_list = rmap_item; | |
31dbd01f IE |
1569 | } |
1570 | return rmap_item; | |
1571 | } | |
1572 | ||
1573 | static struct rmap_item *scan_get_next_rmap_item(struct page **page) | |
1574 | { | |
1575 | struct mm_struct *mm; | |
1576 | struct mm_slot *slot; | |
1577 | struct vm_area_struct *vma; | |
1578 | struct rmap_item *rmap_item; | |
90bd6fd3 | 1579 | int nid; |
31dbd01f IE |
1580 | |
1581 | if (list_empty(&ksm_mm_head.mm_list)) | |
1582 | return NULL; | |
1583 | ||
1584 | slot = ksm_scan.mm_slot; | |
1585 | if (slot == &ksm_mm_head) { | |
2919bfd0 HD |
1586 | /* |
1587 | * A number of pages can hang around indefinitely on per-cpu | |
1588 | * pagevecs, raised page count preventing write_protect_page | |
1589 | * from merging them. Though it doesn't really matter much, | |
1590 | * it is puzzling to see some stuck in pages_volatile until | |
1591 | * other activity jostles them out, and they also prevented | |
1592 | * LTP's KSM test from succeeding deterministically; so drain | |
1593 | * them here (here rather than on entry to ksm_do_scan(), | |
1594 | * so we don't IPI too often when pages_to_scan is set low). | |
1595 | */ | |
1596 | lru_add_drain_all(); | |
1597 | ||
4146d2d6 HD |
1598 | /* |
1599 | * Whereas stale stable_nodes on the stable_tree itself | |
1600 | * get pruned in the regular course of stable_tree_search(), | |
1601 | * those moved out to the migrate_nodes list can accumulate: | |
1602 | * so prune them once before each full scan. | |
1603 | */ | |
1604 | if (!ksm_merge_across_nodes) { | |
03640418 | 1605 | struct stable_node *stable_node, *next; |
4146d2d6 HD |
1606 | struct page *page; |
1607 | ||
03640418 GT |
1608 | list_for_each_entry_safe(stable_node, next, |
1609 | &migrate_nodes, list) { | |
4146d2d6 HD |
1610 | page = get_ksm_page(stable_node, false); |
1611 | if (page) | |
1612 | put_page(page); | |
1613 | cond_resched(); | |
1614 | } | |
1615 | } | |
1616 | ||
ef53d16c | 1617 | for (nid = 0; nid < ksm_nr_node_ids; nid++) |
90bd6fd3 | 1618 | root_unstable_tree[nid] = RB_ROOT; |
31dbd01f IE |
1619 | |
1620 | spin_lock(&ksm_mmlist_lock); | |
1621 | slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); | |
1622 | ksm_scan.mm_slot = slot; | |
1623 | spin_unlock(&ksm_mmlist_lock); | |
2b472611 HD |
1624 | /* |
1625 | * Although we tested list_empty() above, a racing __ksm_exit | |
1626 | * of the last mm on the list may have removed it since then. | |
1627 | */ | |
1628 | if (slot == &ksm_mm_head) | |
1629 | return NULL; | |
31dbd01f IE |
1630 | next_mm: |
1631 | ksm_scan.address = 0; | |
6514d511 | 1632 | ksm_scan.rmap_list = &slot->rmap_list; |
31dbd01f IE |
1633 | } |
1634 | ||
1635 | mm = slot->mm; | |
1636 | down_read(&mm->mmap_sem); | |
9ba69294 HD |
1637 | if (ksm_test_exit(mm)) |
1638 | vma = NULL; | |
1639 | else | |
1640 | vma = find_vma(mm, ksm_scan.address); | |
1641 | ||
1642 | for (; vma; vma = vma->vm_next) { | |
31dbd01f IE |
1643 | if (!(vma->vm_flags & VM_MERGEABLE)) |
1644 | continue; | |
1645 | if (ksm_scan.address < vma->vm_start) | |
1646 | ksm_scan.address = vma->vm_start; | |
1647 | if (!vma->anon_vma) | |
1648 | ksm_scan.address = vma->vm_end; | |
1649 | ||
1650 | while (ksm_scan.address < vma->vm_end) { | |
9ba69294 HD |
1651 | if (ksm_test_exit(mm)) |
1652 | break; | |
31dbd01f | 1653 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
21ae5b01 AA |
1654 | if (IS_ERR_OR_NULL(*page)) { |
1655 | ksm_scan.address += PAGE_SIZE; | |
1656 | cond_resched(); | |
1657 | continue; | |
1658 | } | |
f765f540 | 1659 | if (PageAnon(*page)) { |
31dbd01f IE |
1660 | flush_anon_page(vma, *page, ksm_scan.address); |
1661 | flush_dcache_page(*page); | |
1662 | rmap_item = get_next_rmap_item(slot, | |
6514d511 | 1663 | ksm_scan.rmap_list, ksm_scan.address); |
31dbd01f | 1664 | if (rmap_item) { |
6514d511 HD |
1665 | ksm_scan.rmap_list = |
1666 | &rmap_item->rmap_list; | |
31dbd01f IE |
1667 | ksm_scan.address += PAGE_SIZE; |
1668 | } else | |
1669 | put_page(*page); | |
1670 | up_read(&mm->mmap_sem); | |
1671 | return rmap_item; | |
1672 | } | |
21ae5b01 | 1673 | put_page(*page); |
31dbd01f IE |
1674 | ksm_scan.address += PAGE_SIZE; |
1675 | cond_resched(); | |
1676 | } | |
1677 | } | |
1678 | ||
9ba69294 HD |
1679 | if (ksm_test_exit(mm)) { |
1680 | ksm_scan.address = 0; | |
6514d511 | 1681 | ksm_scan.rmap_list = &slot->rmap_list; |
9ba69294 | 1682 | } |
31dbd01f IE |
1683 | /* |
1684 | * Nuke all the rmap_items that are above this current rmap: | |
1685 | * because there were no VM_MERGEABLE vmas with such addresses. | |
1686 | */ | |
6514d511 | 1687 | remove_trailing_rmap_items(slot, ksm_scan.rmap_list); |
31dbd01f IE |
1688 | |
1689 | spin_lock(&ksm_mmlist_lock); | |
cd551f97 HD |
1690 | ksm_scan.mm_slot = list_entry(slot->mm_list.next, |
1691 | struct mm_slot, mm_list); | |
1692 | if (ksm_scan.address == 0) { | |
1693 | /* | |
1694 | * We've completed a full scan of all vmas, holding mmap_sem | |
1695 | * throughout, and found no VM_MERGEABLE: so do the same as | |
1696 | * __ksm_exit does to remove this mm from all our lists now. | |
9ba69294 HD |
1697 | * This applies either when cleaning up after __ksm_exit |
1698 | * (but beware: we can reach here even before __ksm_exit), | |
1699 | * or when all VM_MERGEABLE areas have been unmapped (and | |
1700 | * mmap_sem then protects against race with MADV_MERGEABLE). | |
cd551f97 | 1701 | */ |
4ca3a69b | 1702 | hash_del(&slot->link); |
cd551f97 | 1703 | list_del(&slot->mm_list); |
9ba69294 HD |
1704 | spin_unlock(&ksm_mmlist_lock); |
1705 | ||
cd551f97 HD |
1706 | free_mm_slot(slot); |
1707 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
9ba69294 HD |
1708 | up_read(&mm->mmap_sem); |
1709 | mmdrop(mm); | |
1710 | } else { | |
9ba69294 | 1711 | up_read(&mm->mmap_sem); |
7496fea9 ZC |
1712 | /* |
1713 | * up_read(&mm->mmap_sem) first because after | |
1714 | * spin_unlock(&ksm_mmlist_lock) run, the "mm" may | |
1715 | * already have been freed under us by __ksm_exit() | |
1716 | * because the "mm_slot" is still hashed and | |
1717 | * ksm_scan.mm_slot doesn't point to it anymore. | |
1718 | */ | |
1719 | spin_unlock(&ksm_mmlist_lock); | |
cd551f97 | 1720 | } |
31dbd01f IE |
1721 | |
1722 | /* Repeat until we've completed scanning the whole list */ | |
cd551f97 | 1723 | slot = ksm_scan.mm_slot; |
31dbd01f IE |
1724 | if (slot != &ksm_mm_head) |
1725 | goto next_mm; | |
1726 | ||
31dbd01f IE |
1727 | ksm_scan.seqnr++; |
1728 | return NULL; | |
1729 | } | |
1730 | ||
1731 | /** | |
1732 | * ksm_do_scan - the ksm scanner main worker function. | |
1733 | * @scan_npages - number of pages we want to scan before we return. | |
1734 | */ | |
1735 | static void ksm_do_scan(unsigned int scan_npages) | |
1736 | { | |
1737 | struct rmap_item *rmap_item; | |
22eccdd7 | 1738 | struct page *uninitialized_var(page); |
31dbd01f | 1739 | |
878aee7d | 1740 | while (scan_npages-- && likely(!freezing(current))) { |
31dbd01f IE |
1741 | cond_resched(); |
1742 | rmap_item = scan_get_next_rmap_item(&page); | |
1743 | if (!rmap_item) | |
1744 | return; | |
4146d2d6 | 1745 | cmp_and_merge_page(page, rmap_item); |
31dbd01f IE |
1746 | put_page(page); |
1747 | } | |
1748 | } | |
1749 | ||
6e158384 HD |
1750 | static int ksmd_should_run(void) |
1751 | { | |
1752 | return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); | |
1753 | } | |
1754 | ||
31dbd01f IE |
1755 | static int ksm_scan_thread(void *nothing) |
1756 | { | |
878aee7d | 1757 | set_freezable(); |
339aa624 | 1758 | set_user_nice(current, 5); |
31dbd01f IE |
1759 | |
1760 | while (!kthread_should_stop()) { | |
6e158384 | 1761 | mutex_lock(&ksm_thread_mutex); |
ef4d43a8 | 1762 | wait_while_offlining(); |
6e158384 | 1763 | if (ksmd_should_run()) |
31dbd01f | 1764 | ksm_do_scan(ksm_thread_pages_to_scan); |
6e158384 HD |
1765 | mutex_unlock(&ksm_thread_mutex); |
1766 | ||
878aee7d AA |
1767 | try_to_freeze(); |
1768 | ||
6e158384 | 1769 | if (ksmd_should_run()) { |
31dbd01f IE |
1770 | schedule_timeout_interruptible( |
1771 | msecs_to_jiffies(ksm_thread_sleep_millisecs)); | |
1772 | } else { | |
878aee7d | 1773 | wait_event_freezable(ksm_thread_wait, |
6e158384 | 1774 | ksmd_should_run() || kthread_should_stop()); |
31dbd01f IE |
1775 | } |
1776 | } | |
1777 | return 0; | |
1778 | } | |
1779 | ||
f8af4da3 HD |
1780 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
1781 | unsigned long end, int advice, unsigned long *vm_flags) | |
1782 | { | |
1783 | struct mm_struct *mm = vma->vm_mm; | |
d952b791 | 1784 | int err; |
f8af4da3 HD |
1785 | |
1786 | switch (advice) { | |
1787 | case MADV_MERGEABLE: | |
1788 | /* | |
1789 | * Be somewhat over-protective for now! | |
1790 | */ | |
1791 | if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | | |
1792 | VM_PFNMAP | VM_IO | VM_DONTEXPAND | | |
0661a336 | 1793 | VM_HUGETLB | VM_MIXEDMAP)) |
f8af4da3 HD |
1794 | return 0; /* just ignore the advice */ |
1795 | ||
cc2383ec KK |
1796 | #ifdef VM_SAO |
1797 | if (*vm_flags & VM_SAO) | |
1798 | return 0; | |
1799 | #endif | |
1800 | ||
d952b791 HD |
1801 | if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { |
1802 | err = __ksm_enter(mm); | |
1803 | if (err) | |
1804 | return err; | |
1805 | } | |
f8af4da3 HD |
1806 | |
1807 | *vm_flags |= VM_MERGEABLE; | |
1808 | break; | |
1809 | ||
1810 | case MADV_UNMERGEABLE: | |
1811 | if (!(*vm_flags & VM_MERGEABLE)) | |
1812 | return 0; /* just ignore the advice */ | |
1813 | ||
d952b791 HD |
1814 | if (vma->anon_vma) { |
1815 | err = unmerge_ksm_pages(vma, start, end); | |
1816 | if (err) | |
1817 | return err; | |
1818 | } | |
f8af4da3 HD |
1819 | |
1820 | *vm_flags &= ~VM_MERGEABLE; | |
1821 | break; | |
1822 | } | |
1823 | ||
1824 | return 0; | |
1825 | } | |
1826 | ||
1827 | int __ksm_enter(struct mm_struct *mm) | |
1828 | { | |
6e158384 HD |
1829 | struct mm_slot *mm_slot; |
1830 | int needs_wakeup; | |
1831 | ||
1832 | mm_slot = alloc_mm_slot(); | |
31dbd01f IE |
1833 | if (!mm_slot) |
1834 | return -ENOMEM; | |
1835 | ||
6e158384 HD |
1836 | /* Check ksm_run too? Would need tighter locking */ |
1837 | needs_wakeup = list_empty(&ksm_mm_head.mm_list); | |
1838 | ||
31dbd01f IE |
1839 | spin_lock(&ksm_mmlist_lock); |
1840 | insert_to_mm_slots_hash(mm, mm_slot); | |
1841 | /* | |
cbf86cfe HD |
1842 | * When KSM_RUN_MERGE (or KSM_RUN_STOP), |
1843 | * insert just behind the scanning cursor, to let the area settle | |
31dbd01f IE |
1844 | * down a little; when fork is followed by immediate exec, we don't |
1845 | * want ksmd to waste time setting up and tearing down an rmap_list. | |
cbf86cfe HD |
1846 | * |
1847 | * But when KSM_RUN_UNMERGE, it's important to insert ahead of its | |
1848 | * scanning cursor, otherwise KSM pages in newly forked mms will be | |
1849 | * missed: then we might as well insert at the end of the list. | |
31dbd01f | 1850 | */ |
cbf86cfe HD |
1851 | if (ksm_run & KSM_RUN_UNMERGE) |
1852 | list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); | |
1853 | else | |
1854 | list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); | |
31dbd01f IE |
1855 | spin_unlock(&ksm_mmlist_lock); |
1856 | ||
f8af4da3 | 1857 | set_bit(MMF_VM_MERGEABLE, &mm->flags); |
f1f10076 | 1858 | mmgrab(mm); |
6e158384 HD |
1859 | |
1860 | if (needs_wakeup) | |
1861 | wake_up_interruptible(&ksm_thread_wait); | |
1862 | ||
f8af4da3 HD |
1863 | return 0; |
1864 | } | |
1865 | ||
1c2fb7a4 | 1866 | void __ksm_exit(struct mm_struct *mm) |
f8af4da3 | 1867 | { |
cd551f97 | 1868 | struct mm_slot *mm_slot; |
9ba69294 | 1869 | int easy_to_free = 0; |
cd551f97 | 1870 | |
31dbd01f | 1871 | /* |
9ba69294 HD |
1872 | * This process is exiting: if it's straightforward (as is the |
1873 | * case when ksmd was never running), free mm_slot immediately. | |
1874 | * But if it's at the cursor or has rmap_items linked to it, use | |
1875 | * mmap_sem to synchronize with any break_cows before pagetables | |
1876 | * are freed, and leave the mm_slot on the list for ksmd to free. | |
1877 | * Beware: ksm may already have noticed it exiting and freed the slot. | |
31dbd01f | 1878 | */ |
9ba69294 | 1879 | |
cd551f97 HD |
1880 | spin_lock(&ksm_mmlist_lock); |
1881 | mm_slot = get_mm_slot(mm); | |
9ba69294 | 1882 | if (mm_slot && ksm_scan.mm_slot != mm_slot) { |
6514d511 | 1883 | if (!mm_slot->rmap_list) { |
4ca3a69b | 1884 | hash_del(&mm_slot->link); |
9ba69294 HD |
1885 | list_del(&mm_slot->mm_list); |
1886 | easy_to_free = 1; | |
1887 | } else { | |
1888 | list_move(&mm_slot->mm_list, | |
1889 | &ksm_scan.mm_slot->mm_list); | |
1890 | } | |
cd551f97 | 1891 | } |
cd551f97 HD |
1892 | spin_unlock(&ksm_mmlist_lock); |
1893 | ||
9ba69294 HD |
1894 | if (easy_to_free) { |
1895 | free_mm_slot(mm_slot); | |
1896 | clear_bit(MMF_VM_MERGEABLE, &mm->flags); | |
1897 | mmdrop(mm); | |
1898 | } else if (mm_slot) { | |
9ba69294 HD |
1899 | down_write(&mm->mmap_sem); |
1900 | up_write(&mm->mmap_sem); | |
9ba69294 | 1901 | } |
31dbd01f IE |
1902 | } |
1903 | ||
cbf86cfe | 1904 | struct page *ksm_might_need_to_copy(struct page *page, |
5ad64688 HD |
1905 | struct vm_area_struct *vma, unsigned long address) |
1906 | { | |
cbf86cfe | 1907 | struct anon_vma *anon_vma = page_anon_vma(page); |
5ad64688 HD |
1908 | struct page *new_page; |
1909 | ||
cbf86cfe HD |
1910 | if (PageKsm(page)) { |
1911 | if (page_stable_node(page) && | |
1912 | !(ksm_run & KSM_RUN_UNMERGE)) | |
1913 | return page; /* no need to copy it */ | |
1914 | } else if (!anon_vma) { | |
1915 | return page; /* no need to copy it */ | |
1916 | } else if (anon_vma->root == vma->anon_vma->root && | |
1917 | page->index == linear_page_index(vma, address)) { | |
1918 | return page; /* still no need to copy it */ | |
1919 | } | |
1920 | if (!PageUptodate(page)) | |
1921 | return page; /* let do_swap_page report the error */ | |
1922 | ||
5ad64688 HD |
1923 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); |
1924 | if (new_page) { | |
1925 | copy_user_highpage(new_page, page, address, vma); | |
1926 | ||
1927 | SetPageDirty(new_page); | |
1928 | __SetPageUptodate(new_page); | |
48c935ad | 1929 | __SetPageLocked(new_page); |
5ad64688 HD |
1930 | } |
1931 | ||
5ad64688 HD |
1932 | return new_page; |
1933 | } | |
1934 | ||
1df631ae | 1935 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) |
e9995ef9 HD |
1936 | { |
1937 | struct stable_node *stable_node; | |
e9995ef9 | 1938 | struct rmap_item *rmap_item; |
e9995ef9 HD |
1939 | int search_new_forks = 0; |
1940 | ||
309381fe | 1941 | VM_BUG_ON_PAGE(!PageKsm(page), page); |
9f32624b JK |
1942 | |
1943 | /* | |
1944 | * Rely on the page lock to protect against concurrent modifications | |
1945 | * to that page's node of the stable tree. | |
1946 | */ | |
309381fe | 1947 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
e9995ef9 HD |
1948 | |
1949 | stable_node = page_stable_node(page); | |
1950 | if (!stable_node) | |
1df631ae | 1951 | return; |
e9995ef9 | 1952 | again: |
b67bfe0d | 1953 | hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { |
e9995ef9 | 1954 | struct anon_vma *anon_vma = rmap_item->anon_vma; |
5beb4930 | 1955 | struct anon_vma_chain *vmac; |
e9995ef9 HD |
1956 | struct vm_area_struct *vma; |
1957 | ||
ad12695f | 1958 | cond_resched(); |
b6b19f25 | 1959 | anon_vma_lock_read(anon_vma); |
bf181b9f ML |
1960 | anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, |
1961 | 0, ULONG_MAX) { | |
ad12695f | 1962 | cond_resched(); |
5beb4930 | 1963 | vma = vmac->vma; |
e9995ef9 HD |
1964 | if (rmap_item->address < vma->vm_start || |
1965 | rmap_item->address >= vma->vm_end) | |
1966 | continue; | |
1967 | /* | |
1968 | * Initially we examine only the vma which covers this | |
1969 | * rmap_item; but later, if there is still work to do, | |
1970 | * we examine covering vmas in other mms: in case they | |
1971 | * were forked from the original since ksmd passed. | |
1972 | */ | |
1973 | if ((rmap_item->mm == vma->vm_mm) == search_new_forks) | |
1974 | continue; | |
1975 | ||
0dd1c7bb JK |
1976 | if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) |
1977 | continue; | |
1978 | ||
e4b82222 | 1979 | if (!rwc->rmap_one(page, vma, |
1df631ae | 1980 | rmap_item->address, rwc->arg)) { |
b6b19f25 | 1981 | anon_vma_unlock_read(anon_vma); |
1df631ae | 1982 | return; |
e9995ef9 | 1983 | } |
0dd1c7bb JK |
1984 | if (rwc->done && rwc->done(page)) { |
1985 | anon_vma_unlock_read(anon_vma); | |
1df631ae | 1986 | return; |
0dd1c7bb | 1987 | } |
e9995ef9 | 1988 | } |
b6b19f25 | 1989 | anon_vma_unlock_read(anon_vma); |
e9995ef9 HD |
1990 | } |
1991 | if (!search_new_forks++) | |
1992 | goto again; | |
e9995ef9 HD |
1993 | } |
1994 | ||
52629506 | 1995 | #ifdef CONFIG_MIGRATION |
e9995ef9 HD |
1996 | void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
1997 | { | |
1998 | struct stable_node *stable_node; | |
1999 | ||
309381fe SL |
2000 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
2001 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); | |
2002 | VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); | |
e9995ef9 HD |
2003 | |
2004 | stable_node = page_stable_node(newpage); | |
2005 | if (stable_node) { | |
309381fe | 2006 | VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); |
62b61f61 | 2007 | stable_node->kpfn = page_to_pfn(newpage); |
c8d6553b HD |
2008 | /* |
2009 | * newpage->mapping was set in advance; now we need smp_wmb() | |
2010 | * to make sure that the new stable_node->kpfn is visible | |
2011 | * to get_ksm_page() before it can see that oldpage->mapping | |
2012 | * has gone stale (or that PageSwapCache has been cleared). | |
2013 | */ | |
2014 | smp_wmb(); | |
2015 | set_page_stable_node(oldpage, NULL); | |
e9995ef9 HD |
2016 | } |
2017 | } | |
2018 | #endif /* CONFIG_MIGRATION */ | |
2019 | ||
62b61f61 | 2020 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 HD |
2021 | static void wait_while_offlining(void) |
2022 | { | |
2023 | while (ksm_run & KSM_RUN_OFFLINE) { | |
2024 | mutex_unlock(&ksm_thread_mutex); | |
2025 | wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), | |
74316201 | 2026 | TASK_UNINTERRUPTIBLE); |
ef4d43a8 HD |
2027 | mutex_lock(&ksm_thread_mutex); |
2028 | } | |
2029 | } | |
2030 | ||
ee0ea59c HD |
2031 | static void ksm_check_stable_tree(unsigned long start_pfn, |
2032 | unsigned long end_pfn) | |
62b61f61 | 2033 | { |
03640418 | 2034 | struct stable_node *stable_node, *next; |
62b61f61 | 2035 | struct rb_node *node; |
90bd6fd3 | 2036 | int nid; |
62b61f61 | 2037 | |
ef53d16c HD |
2038 | for (nid = 0; nid < ksm_nr_node_ids; nid++) { |
2039 | node = rb_first(root_stable_tree + nid); | |
ee0ea59c | 2040 | while (node) { |
90bd6fd3 PH |
2041 | stable_node = rb_entry(node, struct stable_node, node); |
2042 | if (stable_node->kpfn >= start_pfn && | |
ee0ea59c HD |
2043 | stable_node->kpfn < end_pfn) { |
2044 | /* | |
2045 | * Don't get_ksm_page, page has already gone: | |
2046 | * which is why we keep kpfn instead of page* | |
2047 | */ | |
2048 | remove_node_from_stable_tree(stable_node); | |
ef53d16c | 2049 | node = rb_first(root_stable_tree + nid); |
ee0ea59c HD |
2050 | } else |
2051 | node = rb_next(node); | |
2052 | cond_resched(); | |
90bd6fd3 | 2053 | } |
ee0ea59c | 2054 | } |
03640418 | 2055 | list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { |
4146d2d6 HD |
2056 | if (stable_node->kpfn >= start_pfn && |
2057 | stable_node->kpfn < end_pfn) | |
2058 | remove_node_from_stable_tree(stable_node); | |
2059 | cond_resched(); | |
2060 | } | |
62b61f61 HD |
2061 | } |
2062 | ||
2063 | static int ksm_memory_callback(struct notifier_block *self, | |
2064 | unsigned long action, void *arg) | |
2065 | { | |
2066 | struct memory_notify *mn = arg; | |
62b61f61 HD |
2067 | |
2068 | switch (action) { | |
2069 | case MEM_GOING_OFFLINE: | |
2070 | /* | |
ef4d43a8 HD |
2071 | * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() |
2072 | * and remove_all_stable_nodes() while memory is going offline: | |
2073 | * it is unsafe for them to touch the stable tree at this time. | |
2074 | * But unmerge_ksm_pages(), rmap lookups and other entry points | |
2075 | * which do not need the ksm_thread_mutex are all safe. | |
62b61f61 | 2076 | */ |
ef4d43a8 HD |
2077 | mutex_lock(&ksm_thread_mutex); |
2078 | ksm_run |= KSM_RUN_OFFLINE; | |
2079 | mutex_unlock(&ksm_thread_mutex); | |
62b61f61 HD |
2080 | break; |
2081 | ||
2082 | case MEM_OFFLINE: | |
2083 | /* | |
2084 | * Most of the work is done by page migration; but there might | |
2085 | * be a few stable_nodes left over, still pointing to struct | |
ee0ea59c HD |
2086 | * pages which have been offlined: prune those from the tree, |
2087 | * otherwise get_ksm_page() might later try to access a | |
2088 | * non-existent struct page. | |
62b61f61 | 2089 | */ |
ee0ea59c HD |
2090 | ksm_check_stable_tree(mn->start_pfn, |
2091 | mn->start_pfn + mn->nr_pages); | |
62b61f61 HD |
2092 | /* fallthrough */ |
2093 | ||
2094 | case MEM_CANCEL_OFFLINE: | |
ef4d43a8 HD |
2095 | mutex_lock(&ksm_thread_mutex); |
2096 | ksm_run &= ~KSM_RUN_OFFLINE; | |
62b61f61 | 2097 | mutex_unlock(&ksm_thread_mutex); |
ef4d43a8 HD |
2098 | |
2099 | smp_mb(); /* wake_up_bit advises this */ | |
2100 | wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); | |
62b61f61 HD |
2101 | break; |
2102 | } | |
2103 | return NOTIFY_OK; | |
2104 | } | |
ef4d43a8 HD |
2105 | #else |
2106 | static void wait_while_offlining(void) | |
2107 | { | |
2108 | } | |
62b61f61 HD |
2109 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
2110 | ||
2ffd8679 HD |
2111 | #ifdef CONFIG_SYSFS |
2112 | /* | |
2113 | * This all compiles without CONFIG_SYSFS, but is a waste of space. | |
2114 | */ | |
2115 | ||
31dbd01f IE |
2116 | #define KSM_ATTR_RO(_name) \ |
2117 | static struct kobj_attribute _name##_attr = __ATTR_RO(_name) | |
2118 | #define KSM_ATTR(_name) \ | |
2119 | static struct kobj_attribute _name##_attr = \ | |
2120 | __ATTR(_name, 0644, _name##_show, _name##_store) | |
2121 | ||
2122 | static ssize_t sleep_millisecs_show(struct kobject *kobj, | |
2123 | struct kobj_attribute *attr, char *buf) | |
2124 | { | |
2125 | return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); | |
2126 | } | |
2127 | ||
2128 | static ssize_t sleep_millisecs_store(struct kobject *kobj, | |
2129 | struct kobj_attribute *attr, | |
2130 | const char *buf, size_t count) | |
2131 | { | |
2132 | unsigned long msecs; | |
2133 | int err; | |
2134 | ||
3dbb95f7 | 2135 | err = kstrtoul(buf, 10, &msecs); |
31dbd01f IE |
2136 | if (err || msecs > UINT_MAX) |
2137 | return -EINVAL; | |
2138 | ||
2139 | ksm_thread_sleep_millisecs = msecs; | |
2140 | ||
2141 | return count; | |
2142 | } | |
2143 | KSM_ATTR(sleep_millisecs); | |
2144 | ||
2145 | static ssize_t pages_to_scan_show(struct kobject *kobj, | |
2146 | struct kobj_attribute *attr, char *buf) | |
2147 | { | |
2148 | return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); | |
2149 | } | |
2150 | ||
2151 | static ssize_t pages_to_scan_store(struct kobject *kobj, | |
2152 | struct kobj_attribute *attr, | |
2153 | const char *buf, size_t count) | |
2154 | { | |
2155 | int err; | |
2156 | unsigned long nr_pages; | |
2157 | ||
3dbb95f7 | 2158 | err = kstrtoul(buf, 10, &nr_pages); |
31dbd01f IE |
2159 | if (err || nr_pages > UINT_MAX) |
2160 | return -EINVAL; | |
2161 | ||
2162 | ksm_thread_pages_to_scan = nr_pages; | |
2163 | ||
2164 | return count; | |
2165 | } | |
2166 | KSM_ATTR(pages_to_scan); | |
2167 | ||
2168 | static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, | |
2169 | char *buf) | |
2170 | { | |
ef4d43a8 | 2171 | return sprintf(buf, "%lu\n", ksm_run); |
31dbd01f IE |
2172 | } |
2173 | ||
2174 | static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, | |
2175 | const char *buf, size_t count) | |
2176 | { | |
2177 | int err; | |
2178 | unsigned long flags; | |
2179 | ||
3dbb95f7 | 2180 | err = kstrtoul(buf, 10, &flags); |
31dbd01f IE |
2181 | if (err || flags > UINT_MAX) |
2182 | return -EINVAL; | |
2183 | if (flags > KSM_RUN_UNMERGE) | |
2184 | return -EINVAL; | |
2185 | ||
2186 | /* | |
2187 | * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. | |
2188 | * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, | |
d0f209f6 HD |
2189 | * breaking COW to free the pages_shared (but leaves mm_slots |
2190 | * on the list for when ksmd may be set running again). | |
31dbd01f IE |
2191 | */ |
2192 | ||
2193 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 2194 | wait_while_offlining(); |
31dbd01f IE |
2195 | if (ksm_run != flags) { |
2196 | ksm_run = flags; | |
d952b791 | 2197 | if (flags & KSM_RUN_UNMERGE) { |
e1e12d2f | 2198 | set_current_oom_origin(); |
d952b791 | 2199 | err = unmerge_and_remove_all_rmap_items(); |
e1e12d2f | 2200 | clear_current_oom_origin(); |
d952b791 HD |
2201 | if (err) { |
2202 | ksm_run = KSM_RUN_STOP; | |
2203 | count = err; | |
2204 | } | |
2205 | } | |
31dbd01f IE |
2206 | } |
2207 | mutex_unlock(&ksm_thread_mutex); | |
2208 | ||
2209 | if (flags & KSM_RUN_MERGE) | |
2210 | wake_up_interruptible(&ksm_thread_wait); | |
2211 | ||
2212 | return count; | |
2213 | } | |
2214 | KSM_ATTR(run); | |
2215 | ||
90bd6fd3 PH |
2216 | #ifdef CONFIG_NUMA |
2217 | static ssize_t merge_across_nodes_show(struct kobject *kobj, | |
2218 | struct kobj_attribute *attr, char *buf) | |
2219 | { | |
2220 | return sprintf(buf, "%u\n", ksm_merge_across_nodes); | |
2221 | } | |
2222 | ||
2223 | static ssize_t merge_across_nodes_store(struct kobject *kobj, | |
2224 | struct kobj_attribute *attr, | |
2225 | const char *buf, size_t count) | |
2226 | { | |
2227 | int err; | |
2228 | unsigned long knob; | |
2229 | ||
2230 | err = kstrtoul(buf, 10, &knob); | |
2231 | if (err) | |
2232 | return err; | |
2233 | if (knob > 1) | |
2234 | return -EINVAL; | |
2235 | ||
2236 | mutex_lock(&ksm_thread_mutex); | |
ef4d43a8 | 2237 | wait_while_offlining(); |
90bd6fd3 | 2238 | if (ksm_merge_across_nodes != knob) { |
cbf86cfe | 2239 | if (ksm_pages_shared || remove_all_stable_nodes()) |
90bd6fd3 | 2240 | err = -EBUSY; |
ef53d16c HD |
2241 | else if (root_stable_tree == one_stable_tree) { |
2242 | struct rb_root *buf; | |
2243 | /* | |
2244 | * This is the first time that we switch away from the | |
2245 | * default of merging across nodes: must now allocate | |
2246 | * a buffer to hold as many roots as may be needed. | |
2247 | * Allocate stable and unstable together: | |
2248 | * MAXSMP NODES_SHIFT 10 will use 16kB. | |
2249 | */ | |
bafe1e14 JP |
2250 | buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), |
2251 | GFP_KERNEL); | |
ef53d16c HD |
2252 | /* Let us assume that RB_ROOT is NULL is zero */ |
2253 | if (!buf) | |
2254 | err = -ENOMEM; | |
2255 | else { | |
2256 | root_stable_tree = buf; | |
2257 | root_unstable_tree = buf + nr_node_ids; | |
2258 | /* Stable tree is empty but not the unstable */ | |
2259 | root_unstable_tree[0] = one_unstable_tree[0]; | |
2260 | } | |
2261 | } | |
2262 | if (!err) { | |
90bd6fd3 | 2263 | ksm_merge_across_nodes = knob; |
ef53d16c HD |
2264 | ksm_nr_node_ids = knob ? 1 : nr_node_ids; |
2265 | } | |
90bd6fd3 PH |
2266 | } |
2267 | mutex_unlock(&ksm_thread_mutex); | |
2268 | ||
2269 | return err ? err : count; | |
2270 | } | |
2271 | KSM_ATTR(merge_across_nodes); | |
2272 | #endif | |
2273 | ||
e86c59b1 CI |
2274 | static ssize_t use_zero_pages_show(struct kobject *kobj, |
2275 | struct kobj_attribute *attr, char *buf) | |
2276 | { | |
2277 | return sprintf(buf, "%u\n", ksm_use_zero_pages); | |
2278 | } | |
2279 | static ssize_t use_zero_pages_store(struct kobject *kobj, | |
2280 | struct kobj_attribute *attr, | |
2281 | const char *buf, size_t count) | |
2282 | { | |
2283 | int err; | |
2284 | bool value; | |
2285 | ||
2286 | err = kstrtobool(buf, &value); | |
2287 | if (err) | |
2288 | return -EINVAL; | |
2289 | ||
2290 | ksm_use_zero_pages = value; | |
2291 | ||
2292 | return count; | |
2293 | } | |
2294 | KSM_ATTR(use_zero_pages); | |
2295 | ||
b4028260 HD |
2296 | static ssize_t pages_shared_show(struct kobject *kobj, |
2297 | struct kobj_attribute *attr, char *buf) | |
2298 | { | |
2299 | return sprintf(buf, "%lu\n", ksm_pages_shared); | |
2300 | } | |
2301 | KSM_ATTR_RO(pages_shared); | |
2302 | ||
2303 | static ssize_t pages_sharing_show(struct kobject *kobj, | |
2304 | struct kobj_attribute *attr, char *buf) | |
2305 | { | |
e178dfde | 2306 | return sprintf(buf, "%lu\n", ksm_pages_sharing); |
b4028260 HD |
2307 | } |
2308 | KSM_ATTR_RO(pages_sharing); | |
2309 | ||
473b0ce4 HD |
2310 | static ssize_t pages_unshared_show(struct kobject *kobj, |
2311 | struct kobj_attribute *attr, char *buf) | |
2312 | { | |
2313 | return sprintf(buf, "%lu\n", ksm_pages_unshared); | |
2314 | } | |
2315 | KSM_ATTR_RO(pages_unshared); | |
2316 | ||
2317 | static ssize_t pages_volatile_show(struct kobject *kobj, | |
2318 | struct kobj_attribute *attr, char *buf) | |
2319 | { | |
2320 | long ksm_pages_volatile; | |
2321 | ||
2322 | ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared | |
2323 | - ksm_pages_sharing - ksm_pages_unshared; | |
2324 | /* | |
2325 | * It was not worth any locking to calculate that statistic, | |
2326 | * but it might therefore sometimes be negative: conceal that. | |
2327 | */ | |
2328 | if (ksm_pages_volatile < 0) | |
2329 | ksm_pages_volatile = 0; | |
2330 | return sprintf(buf, "%ld\n", ksm_pages_volatile); | |
2331 | } | |
2332 | KSM_ATTR_RO(pages_volatile); | |
2333 | ||
2334 | static ssize_t full_scans_show(struct kobject *kobj, | |
2335 | struct kobj_attribute *attr, char *buf) | |
2336 | { | |
2337 | return sprintf(buf, "%lu\n", ksm_scan.seqnr); | |
2338 | } | |
2339 | KSM_ATTR_RO(full_scans); | |
2340 | ||
31dbd01f IE |
2341 | static struct attribute *ksm_attrs[] = { |
2342 | &sleep_millisecs_attr.attr, | |
2343 | &pages_to_scan_attr.attr, | |
2344 | &run_attr.attr, | |
b4028260 HD |
2345 | &pages_shared_attr.attr, |
2346 | &pages_sharing_attr.attr, | |
473b0ce4 HD |
2347 | &pages_unshared_attr.attr, |
2348 | &pages_volatile_attr.attr, | |
2349 | &full_scans_attr.attr, | |
90bd6fd3 PH |
2350 | #ifdef CONFIG_NUMA |
2351 | &merge_across_nodes_attr.attr, | |
2352 | #endif | |
e86c59b1 | 2353 | &use_zero_pages_attr.attr, |
31dbd01f IE |
2354 | NULL, |
2355 | }; | |
2356 | ||
2357 | static struct attribute_group ksm_attr_group = { | |
2358 | .attrs = ksm_attrs, | |
2359 | .name = "ksm", | |
2360 | }; | |
2ffd8679 | 2361 | #endif /* CONFIG_SYSFS */ |
31dbd01f IE |
2362 | |
2363 | static int __init ksm_init(void) | |
2364 | { | |
2365 | struct task_struct *ksm_thread; | |
2366 | int err; | |
2367 | ||
e86c59b1 CI |
2368 | /* The correct value depends on page size and endianness */ |
2369 | zero_checksum = calc_checksum(ZERO_PAGE(0)); | |
2370 | /* Default to false for backwards compatibility */ | |
2371 | ksm_use_zero_pages = false; | |
2372 | ||
31dbd01f IE |
2373 | err = ksm_slab_init(); |
2374 | if (err) | |
2375 | goto out; | |
2376 | ||
31dbd01f IE |
2377 | ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); |
2378 | if (IS_ERR(ksm_thread)) { | |
25acde31 | 2379 | pr_err("ksm: creating kthread failed\n"); |
31dbd01f | 2380 | err = PTR_ERR(ksm_thread); |
d9f8984c | 2381 | goto out_free; |
31dbd01f IE |
2382 | } |
2383 | ||
2ffd8679 | 2384 | #ifdef CONFIG_SYSFS |
31dbd01f IE |
2385 | err = sysfs_create_group(mm_kobj, &ksm_attr_group); |
2386 | if (err) { | |
25acde31 | 2387 | pr_err("ksm: register sysfs failed\n"); |
2ffd8679 | 2388 | kthread_stop(ksm_thread); |
d9f8984c | 2389 | goto out_free; |
31dbd01f | 2390 | } |
c73602ad HD |
2391 | #else |
2392 | ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ | |
2393 | ||
2ffd8679 | 2394 | #endif /* CONFIG_SYSFS */ |
31dbd01f | 2395 | |
62b61f61 | 2396 | #ifdef CONFIG_MEMORY_HOTREMOVE |
ef4d43a8 | 2397 | /* There is no significance to this priority 100 */ |
62b61f61 HD |
2398 | hotplug_memory_notifier(ksm_memory_callback, 100); |
2399 | #endif | |
31dbd01f IE |
2400 | return 0; |
2401 | ||
d9f8984c | 2402 | out_free: |
31dbd01f IE |
2403 | ksm_slab_free(); |
2404 | out: | |
2405 | return err; | |
f8af4da3 | 2406 | } |
a64fb3cd | 2407 | subsys_initcall(ksm_init); |