]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/khugepaged.c
Merge remote-tracking branch 'regulator/fix/max77802' into regulator-linus
[mirror_ubuntu-artful-kernel.git] / mm / khugepaged.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/mm.h>
4 #include <linux/sched.h>
5 #include <linux/sched/mm.h>
6 #include <linux/sched/coredump.h>
7 #include <linux/mmu_notifier.h>
8 #include <linux/rmap.h>
9 #include <linux/swap.h>
10 #include <linux/mm_inline.h>
11 #include <linux/kthread.h>
12 #include <linux/khugepaged.h>
13 #include <linux/freezer.h>
14 #include <linux/mman.h>
15 #include <linux/hashtable.h>
16 #include <linux/userfaultfd_k.h>
17 #include <linux/page_idle.h>
18 #include <linux/swapops.h>
19 #include <linux/shmem_fs.h>
20
21 #include <asm/tlb.h>
22 #include <asm/pgalloc.h>
23 #include "internal.h"
24
25 enum scan_result {
26 SCAN_FAIL,
27 SCAN_SUCCEED,
28 SCAN_PMD_NULL,
29 SCAN_EXCEED_NONE_PTE,
30 SCAN_PTE_NON_PRESENT,
31 SCAN_PAGE_RO,
32 SCAN_LACK_REFERENCED_PAGE,
33 SCAN_PAGE_NULL,
34 SCAN_SCAN_ABORT,
35 SCAN_PAGE_COUNT,
36 SCAN_PAGE_LRU,
37 SCAN_PAGE_LOCK,
38 SCAN_PAGE_ANON,
39 SCAN_PAGE_COMPOUND,
40 SCAN_ANY_PROCESS,
41 SCAN_VMA_NULL,
42 SCAN_VMA_CHECK,
43 SCAN_ADDRESS_RANGE,
44 SCAN_SWAP_CACHE_PAGE,
45 SCAN_DEL_PAGE_LRU,
46 SCAN_ALLOC_HUGE_PAGE_FAIL,
47 SCAN_CGROUP_CHARGE_FAIL,
48 SCAN_EXCEED_SWAP_PTE,
49 SCAN_TRUNCATED,
50 };
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/huge_memory.h>
54
55 /* default scan 8*512 pte (or vmas) every 30 second */
56 static unsigned int khugepaged_pages_to_scan __read_mostly;
57 static unsigned int khugepaged_pages_collapsed;
58 static unsigned int khugepaged_full_scans;
59 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
60 /* during fragmentation poll the hugepage allocator once every minute */
61 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
62 static unsigned long khugepaged_sleep_expire;
63 static DEFINE_SPINLOCK(khugepaged_mm_lock);
64 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
65 /*
66 * default collapse hugepages if there is at least one pte mapped like
67 * it would have happened if the vma was large enough during page
68 * fault.
69 */
70 static unsigned int khugepaged_max_ptes_none __read_mostly;
71 static unsigned int khugepaged_max_ptes_swap __read_mostly;
72
73 #define MM_SLOTS_HASH_BITS 10
74 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
75
76 static struct kmem_cache *mm_slot_cache __read_mostly;
77
78 /**
79 * struct mm_slot - hash lookup from mm to mm_slot
80 * @hash: hash collision list
81 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
82 * @mm: the mm that this information is valid for
83 */
84 struct mm_slot {
85 struct hlist_node hash;
86 struct list_head mm_node;
87 struct mm_struct *mm;
88 };
89
90 /**
91 * struct khugepaged_scan - cursor for scanning
92 * @mm_head: the head of the mm list to scan
93 * @mm_slot: the current mm_slot we are scanning
94 * @address: the next address inside that to be scanned
95 *
96 * There is only the one khugepaged_scan instance of this cursor structure.
97 */
98 struct khugepaged_scan {
99 struct list_head mm_head;
100 struct mm_slot *mm_slot;
101 unsigned long address;
102 };
103
104 static struct khugepaged_scan khugepaged_scan = {
105 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
106 };
107
108 #ifdef CONFIG_SYSFS
109 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
110 struct kobj_attribute *attr,
111 char *buf)
112 {
113 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
114 }
115
116 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
117 struct kobj_attribute *attr,
118 const char *buf, size_t count)
119 {
120 unsigned long msecs;
121 int err;
122
123 err = kstrtoul(buf, 10, &msecs);
124 if (err || msecs > UINT_MAX)
125 return -EINVAL;
126
127 khugepaged_scan_sleep_millisecs = msecs;
128 khugepaged_sleep_expire = 0;
129 wake_up_interruptible(&khugepaged_wait);
130
131 return count;
132 }
133 static struct kobj_attribute scan_sleep_millisecs_attr =
134 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
135 scan_sleep_millisecs_store);
136
137 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
138 struct kobj_attribute *attr,
139 char *buf)
140 {
141 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
142 }
143
144 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
145 struct kobj_attribute *attr,
146 const char *buf, size_t count)
147 {
148 unsigned long msecs;
149 int err;
150
151 err = kstrtoul(buf, 10, &msecs);
152 if (err || msecs > UINT_MAX)
153 return -EINVAL;
154
155 khugepaged_alloc_sleep_millisecs = msecs;
156 khugepaged_sleep_expire = 0;
157 wake_up_interruptible(&khugepaged_wait);
158
159 return count;
160 }
161 static struct kobj_attribute alloc_sleep_millisecs_attr =
162 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
163 alloc_sleep_millisecs_store);
164
165 static ssize_t pages_to_scan_show(struct kobject *kobj,
166 struct kobj_attribute *attr,
167 char *buf)
168 {
169 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
170 }
171 static ssize_t pages_to_scan_store(struct kobject *kobj,
172 struct kobj_attribute *attr,
173 const char *buf, size_t count)
174 {
175 int err;
176 unsigned long pages;
177
178 err = kstrtoul(buf, 10, &pages);
179 if (err || !pages || pages > UINT_MAX)
180 return -EINVAL;
181
182 khugepaged_pages_to_scan = pages;
183
184 return count;
185 }
186 static struct kobj_attribute pages_to_scan_attr =
187 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
188 pages_to_scan_store);
189
190 static ssize_t pages_collapsed_show(struct kobject *kobj,
191 struct kobj_attribute *attr,
192 char *buf)
193 {
194 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
195 }
196 static struct kobj_attribute pages_collapsed_attr =
197 __ATTR_RO(pages_collapsed);
198
199 static ssize_t full_scans_show(struct kobject *kobj,
200 struct kobj_attribute *attr,
201 char *buf)
202 {
203 return sprintf(buf, "%u\n", khugepaged_full_scans);
204 }
205 static struct kobj_attribute full_scans_attr =
206 __ATTR_RO(full_scans);
207
208 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
209 struct kobj_attribute *attr, char *buf)
210 {
211 return single_hugepage_flag_show(kobj, attr, buf,
212 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
213 }
214 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
215 struct kobj_attribute *attr,
216 const char *buf, size_t count)
217 {
218 return single_hugepage_flag_store(kobj, attr, buf, count,
219 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
220 }
221 static struct kobj_attribute khugepaged_defrag_attr =
222 __ATTR(defrag, 0644, khugepaged_defrag_show,
223 khugepaged_defrag_store);
224
225 /*
226 * max_ptes_none controls if khugepaged should collapse hugepages over
227 * any unmapped ptes in turn potentially increasing the memory
228 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
229 * reduce the available free memory in the system as it
230 * runs. Increasing max_ptes_none will instead potentially reduce the
231 * free memory in the system during the khugepaged scan.
232 */
233 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
234 struct kobj_attribute *attr,
235 char *buf)
236 {
237 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
238 }
239 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
240 struct kobj_attribute *attr,
241 const char *buf, size_t count)
242 {
243 int err;
244 unsigned long max_ptes_none;
245
246 err = kstrtoul(buf, 10, &max_ptes_none);
247 if (err || max_ptes_none > HPAGE_PMD_NR-1)
248 return -EINVAL;
249
250 khugepaged_max_ptes_none = max_ptes_none;
251
252 return count;
253 }
254 static struct kobj_attribute khugepaged_max_ptes_none_attr =
255 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
256 khugepaged_max_ptes_none_store);
257
258 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
259 struct kobj_attribute *attr,
260 char *buf)
261 {
262 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
263 }
264
265 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
266 struct kobj_attribute *attr,
267 const char *buf, size_t count)
268 {
269 int err;
270 unsigned long max_ptes_swap;
271
272 err = kstrtoul(buf, 10, &max_ptes_swap);
273 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
274 return -EINVAL;
275
276 khugepaged_max_ptes_swap = max_ptes_swap;
277
278 return count;
279 }
280
281 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
282 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
283 khugepaged_max_ptes_swap_store);
284
285 static struct attribute *khugepaged_attr[] = {
286 &khugepaged_defrag_attr.attr,
287 &khugepaged_max_ptes_none_attr.attr,
288 &pages_to_scan_attr.attr,
289 &pages_collapsed_attr.attr,
290 &full_scans_attr.attr,
291 &scan_sleep_millisecs_attr.attr,
292 &alloc_sleep_millisecs_attr.attr,
293 &khugepaged_max_ptes_swap_attr.attr,
294 NULL,
295 };
296
297 struct attribute_group khugepaged_attr_group = {
298 .attrs = khugepaged_attr,
299 .name = "khugepaged",
300 };
301 #endif /* CONFIG_SYSFS */
302
303 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
304
305 int hugepage_madvise(struct vm_area_struct *vma,
306 unsigned long *vm_flags, int advice)
307 {
308 switch (advice) {
309 case MADV_HUGEPAGE:
310 #ifdef CONFIG_S390
311 /*
312 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
313 * can't handle this properly after s390_enable_sie, so we simply
314 * ignore the madvise to prevent qemu from causing a SIGSEGV.
315 */
316 if (mm_has_pgste(vma->vm_mm))
317 return 0;
318 #endif
319 *vm_flags &= ~VM_NOHUGEPAGE;
320 *vm_flags |= VM_HUGEPAGE;
321 /*
322 * If the vma become good for khugepaged to scan,
323 * register it here without waiting a page fault that
324 * may not happen any time soon.
325 */
326 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
327 khugepaged_enter_vma_merge(vma, *vm_flags))
328 return -ENOMEM;
329 break;
330 case MADV_NOHUGEPAGE:
331 *vm_flags &= ~VM_HUGEPAGE;
332 *vm_flags |= VM_NOHUGEPAGE;
333 /*
334 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
335 * this vma even if we leave the mm registered in khugepaged if
336 * it got registered before VM_NOHUGEPAGE was set.
337 */
338 break;
339 }
340
341 return 0;
342 }
343
344 int __init khugepaged_init(void)
345 {
346 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
347 sizeof(struct mm_slot),
348 __alignof__(struct mm_slot), 0, NULL);
349 if (!mm_slot_cache)
350 return -ENOMEM;
351
352 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
353 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
354 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
355
356 return 0;
357 }
358
359 void __init khugepaged_destroy(void)
360 {
361 kmem_cache_destroy(mm_slot_cache);
362 }
363
364 static inline struct mm_slot *alloc_mm_slot(void)
365 {
366 if (!mm_slot_cache) /* initialization failed */
367 return NULL;
368 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
369 }
370
371 static inline void free_mm_slot(struct mm_slot *mm_slot)
372 {
373 kmem_cache_free(mm_slot_cache, mm_slot);
374 }
375
376 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
377 {
378 struct mm_slot *mm_slot;
379
380 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
381 if (mm == mm_slot->mm)
382 return mm_slot;
383
384 return NULL;
385 }
386
387 static void insert_to_mm_slots_hash(struct mm_struct *mm,
388 struct mm_slot *mm_slot)
389 {
390 mm_slot->mm = mm;
391 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
392 }
393
394 static inline int khugepaged_test_exit(struct mm_struct *mm)
395 {
396 return atomic_read(&mm->mm_users) == 0;
397 }
398
399 int __khugepaged_enter(struct mm_struct *mm)
400 {
401 struct mm_slot *mm_slot;
402 int wakeup;
403
404 mm_slot = alloc_mm_slot();
405 if (!mm_slot)
406 return -ENOMEM;
407
408 /* __khugepaged_exit() must not run from under us */
409 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
410 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
411 free_mm_slot(mm_slot);
412 return 0;
413 }
414
415 spin_lock(&khugepaged_mm_lock);
416 insert_to_mm_slots_hash(mm, mm_slot);
417 /*
418 * Insert just behind the scanning cursor, to let the area settle
419 * down a little.
420 */
421 wakeup = list_empty(&khugepaged_scan.mm_head);
422 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
423 spin_unlock(&khugepaged_mm_lock);
424
425 mmgrab(mm);
426 if (wakeup)
427 wake_up_interruptible(&khugepaged_wait);
428
429 return 0;
430 }
431
432 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
433 unsigned long vm_flags)
434 {
435 unsigned long hstart, hend;
436 if (!vma->anon_vma)
437 /*
438 * Not yet faulted in so we will register later in the
439 * page fault if needed.
440 */
441 return 0;
442 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
443 /* khugepaged not yet working on file or special mappings */
444 return 0;
445 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
446 hend = vma->vm_end & HPAGE_PMD_MASK;
447 if (hstart < hend)
448 return khugepaged_enter(vma, vm_flags);
449 return 0;
450 }
451
452 void __khugepaged_exit(struct mm_struct *mm)
453 {
454 struct mm_slot *mm_slot;
455 int free = 0;
456
457 spin_lock(&khugepaged_mm_lock);
458 mm_slot = get_mm_slot(mm);
459 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
460 hash_del(&mm_slot->hash);
461 list_del(&mm_slot->mm_node);
462 free = 1;
463 }
464 spin_unlock(&khugepaged_mm_lock);
465
466 if (free) {
467 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
468 free_mm_slot(mm_slot);
469 mmdrop(mm);
470 } else if (mm_slot) {
471 /*
472 * This is required to serialize against
473 * khugepaged_test_exit() (which is guaranteed to run
474 * under mmap sem read mode). Stop here (after we
475 * return all pagetables will be destroyed) until
476 * khugepaged has finished working on the pagetables
477 * under the mmap_sem.
478 */
479 down_write(&mm->mmap_sem);
480 up_write(&mm->mmap_sem);
481 }
482 }
483
484 static void release_pte_page(struct page *page)
485 {
486 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
487 unlock_page(page);
488 putback_lru_page(page);
489 }
490
491 static void release_pte_pages(pte_t *pte, pte_t *_pte)
492 {
493 while (--_pte >= pte) {
494 pte_t pteval = *_pte;
495 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
496 release_pte_page(pte_page(pteval));
497 }
498 }
499
500 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
501 unsigned long address,
502 pte_t *pte)
503 {
504 struct page *page = NULL;
505 pte_t *_pte;
506 int none_or_zero = 0, result = 0, referenced = 0;
507 bool writable = false;
508
509 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
510 _pte++, address += PAGE_SIZE) {
511 pte_t pteval = *_pte;
512 if (pte_none(pteval) || (pte_present(pteval) &&
513 is_zero_pfn(pte_pfn(pteval)))) {
514 if (!userfaultfd_armed(vma) &&
515 ++none_or_zero <= khugepaged_max_ptes_none) {
516 continue;
517 } else {
518 result = SCAN_EXCEED_NONE_PTE;
519 goto out;
520 }
521 }
522 if (!pte_present(pteval)) {
523 result = SCAN_PTE_NON_PRESENT;
524 goto out;
525 }
526 page = vm_normal_page(vma, address, pteval);
527 if (unlikely(!page)) {
528 result = SCAN_PAGE_NULL;
529 goto out;
530 }
531
532 VM_BUG_ON_PAGE(PageCompound(page), page);
533 VM_BUG_ON_PAGE(!PageAnon(page), page);
534
535 /*
536 * We can do it before isolate_lru_page because the
537 * page can't be freed from under us. NOTE: PG_lock
538 * is needed to serialize against split_huge_page
539 * when invoked from the VM.
540 */
541 if (!trylock_page(page)) {
542 result = SCAN_PAGE_LOCK;
543 goto out;
544 }
545
546 /*
547 * cannot use mapcount: can't collapse if there's a gup pin.
548 * The page must only be referenced by the scanned process
549 * and page swap cache.
550 */
551 if (page_count(page) != 1 + PageSwapCache(page)) {
552 unlock_page(page);
553 result = SCAN_PAGE_COUNT;
554 goto out;
555 }
556 if (pte_write(pteval)) {
557 writable = true;
558 } else {
559 if (PageSwapCache(page) &&
560 !reuse_swap_page(page, NULL)) {
561 unlock_page(page);
562 result = SCAN_SWAP_CACHE_PAGE;
563 goto out;
564 }
565 /*
566 * Page is not in the swap cache. It can be collapsed
567 * into a THP.
568 */
569 }
570
571 /*
572 * Isolate the page to avoid collapsing an hugepage
573 * currently in use by the VM.
574 */
575 if (isolate_lru_page(page)) {
576 unlock_page(page);
577 result = SCAN_DEL_PAGE_LRU;
578 goto out;
579 }
580 inc_node_page_state(page,
581 NR_ISOLATED_ANON + page_is_file_cache(page));
582 VM_BUG_ON_PAGE(!PageLocked(page), page);
583 VM_BUG_ON_PAGE(PageLRU(page), page);
584
585 /* There should be enough young pte to collapse the page */
586 if (pte_young(pteval) ||
587 page_is_young(page) || PageReferenced(page) ||
588 mmu_notifier_test_young(vma->vm_mm, address))
589 referenced++;
590 }
591 if (likely(writable)) {
592 if (likely(referenced)) {
593 result = SCAN_SUCCEED;
594 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
595 referenced, writable, result);
596 return 1;
597 }
598 } else {
599 result = SCAN_PAGE_RO;
600 }
601
602 out:
603 release_pte_pages(pte, _pte);
604 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
605 referenced, writable, result);
606 return 0;
607 }
608
609 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
610 struct vm_area_struct *vma,
611 unsigned long address,
612 spinlock_t *ptl)
613 {
614 pte_t *_pte;
615 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
616 _pte++, page++, address += PAGE_SIZE) {
617 pte_t pteval = *_pte;
618 struct page *src_page;
619
620 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
621 clear_user_highpage(page, address);
622 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
623 if (is_zero_pfn(pte_pfn(pteval))) {
624 /*
625 * ptl mostly unnecessary.
626 */
627 spin_lock(ptl);
628 /*
629 * paravirt calls inside pte_clear here are
630 * superfluous.
631 */
632 pte_clear(vma->vm_mm, address, _pte);
633 spin_unlock(ptl);
634 }
635 } else {
636 src_page = pte_page(pteval);
637 copy_user_highpage(page, src_page, address, vma);
638 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
639 release_pte_page(src_page);
640 /*
641 * ptl mostly unnecessary, but preempt has to
642 * be disabled to update the per-cpu stats
643 * inside page_remove_rmap().
644 */
645 spin_lock(ptl);
646 /*
647 * paravirt calls inside pte_clear here are
648 * superfluous.
649 */
650 pte_clear(vma->vm_mm, address, _pte);
651 page_remove_rmap(src_page, false);
652 spin_unlock(ptl);
653 free_page_and_swap_cache(src_page);
654 }
655 }
656 }
657
658 static void khugepaged_alloc_sleep(void)
659 {
660 DEFINE_WAIT(wait);
661
662 add_wait_queue(&khugepaged_wait, &wait);
663 freezable_schedule_timeout_interruptible(
664 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
665 remove_wait_queue(&khugepaged_wait, &wait);
666 }
667
668 static int khugepaged_node_load[MAX_NUMNODES];
669
670 static bool khugepaged_scan_abort(int nid)
671 {
672 int i;
673
674 /*
675 * If node_reclaim_mode is disabled, then no extra effort is made to
676 * allocate memory locally.
677 */
678 if (!node_reclaim_mode)
679 return false;
680
681 /* If there is a count for this node already, it must be acceptable */
682 if (khugepaged_node_load[nid])
683 return false;
684
685 for (i = 0; i < MAX_NUMNODES; i++) {
686 if (!khugepaged_node_load[i])
687 continue;
688 if (node_distance(nid, i) > RECLAIM_DISTANCE)
689 return true;
690 }
691 return false;
692 }
693
694 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
695 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
696 {
697 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
698 }
699
700 #ifdef CONFIG_NUMA
701 static int khugepaged_find_target_node(void)
702 {
703 static int last_khugepaged_target_node = NUMA_NO_NODE;
704 int nid, target_node = 0, max_value = 0;
705
706 /* find first node with max normal pages hit */
707 for (nid = 0; nid < MAX_NUMNODES; nid++)
708 if (khugepaged_node_load[nid] > max_value) {
709 max_value = khugepaged_node_load[nid];
710 target_node = nid;
711 }
712
713 /* do some balance if several nodes have the same hit record */
714 if (target_node <= last_khugepaged_target_node)
715 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
716 nid++)
717 if (max_value == khugepaged_node_load[nid]) {
718 target_node = nid;
719 break;
720 }
721
722 last_khugepaged_target_node = target_node;
723 return target_node;
724 }
725
726 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
727 {
728 if (IS_ERR(*hpage)) {
729 if (!*wait)
730 return false;
731
732 *wait = false;
733 *hpage = NULL;
734 khugepaged_alloc_sleep();
735 } else if (*hpage) {
736 put_page(*hpage);
737 *hpage = NULL;
738 }
739
740 return true;
741 }
742
743 static struct page *
744 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
745 {
746 VM_BUG_ON_PAGE(*hpage, *hpage);
747
748 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
749 if (unlikely(!*hpage)) {
750 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
751 *hpage = ERR_PTR(-ENOMEM);
752 return NULL;
753 }
754
755 prep_transhuge_page(*hpage);
756 count_vm_event(THP_COLLAPSE_ALLOC);
757 return *hpage;
758 }
759 #else
760 static int khugepaged_find_target_node(void)
761 {
762 return 0;
763 }
764
765 static inline struct page *alloc_khugepaged_hugepage(void)
766 {
767 struct page *page;
768
769 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
770 HPAGE_PMD_ORDER);
771 if (page)
772 prep_transhuge_page(page);
773 return page;
774 }
775
776 static struct page *khugepaged_alloc_hugepage(bool *wait)
777 {
778 struct page *hpage;
779
780 do {
781 hpage = alloc_khugepaged_hugepage();
782 if (!hpage) {
783 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
784 if (!*wait)
785 return NULL;
786
787 *wait = false;
788 khugepaged_alloc_sleep();
789 } else
790 count_vm_event(THP_COLLAPSE_ALLOC);
791 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
792
793 return hpage;
794 }
795
796 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
797 {
798 if (!*hpage)
799 *hpage = khugepaged_alloc_hugepage(wait);
800
801 if (unlikely(!*hpage))
802 return false;
803
804 return true;
805 }
806
807 static struct page *
808 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
809 {
810 VM_BUG_ON(!*hpage);
811
812 return *hpage;
813 }
814 #endif
815
816 static bool hugepage_vma_check(struct vm_area_struct *vma)
817 {
818 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
819 (vma->vm_flags & VM_NOHUGEPAGE))
820 return false;
821 if (shmem_file(vma->vm_file)) {
822 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
823 return false;
824 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
825 HPAGE_PMD_NR);
826 }
827 if (!vma->anon_vma || vma->vm_ops)
828 return false;
829 if (is_vma_temporary_stack(vma))
830 return false;
831 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
832 }
833
834 /*
835 * If mmap_sem temporarily dropped, revalidate vma
836 * before taking mmap_sem.
837 * Return 0 if succeeds, otherwise return none-zero
838 * value (scan code).
839 */
840
841 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
842 struct vm_area_struct **vmap)
843 {
844 struct vm_area_struct *vma;
845 unsigned long hstart, hend;
846
847 if (unlikely(khugepaged_test_exit(mm)))
848 return SCAN_ANY_PROCESS;
849
850 *vmap = vma = find_vma(mm, address);
851 if (!vma)
852 return SCAN_VMA_NULL;
853
854 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
855 hend = vma->vm_end & HPAGE_PMD_MASK;
856 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
857 return SCAN_ADDRESS_RANGE;
858 if (!hugepage_vma_check(vma))
859 return SCAN_VMA_CHECK;
860 return 0;
861 }
862
863 /*
864 * Bring missing pages in from swap, to complete THP collapse.
865 * Only done if khugepaged_scan_pmd believes it is worthwhile.
866 *
867 * Called and returns without pte mapped or spinlocks held,
868 * but with mmap_sem held to protect against vma changes.
869 */
870
871 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
872 struct vm_area_struct *vma,
873 unsigned long address, pmd_t *pmd,
874 int referenced)
875 {
876 int swapped_in = 0, ret = 0;
877 struct vm_fault vmf = {
878 .vma = vma,
879 .address = address,
880 .flags = FAULT_FLAG_ALLOW_RETRY,
881 .pmd = pmd,
882 .pgoff = linear_page_index(vma, address),
883 };
884
885 /* we only decide to swapin, if there is enough young ptes */
886 if (referenced < HPAGE_PMD_NR/2) {
887 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
888 return false;
889 }
890 vmf.pte = pte_offset_map(pmd, address);
891 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
892 vmf.pte++, vmf.address += PAGE_SIZE) {
893 vmf.orig_pte = *vmf.pte;
894 if (!is_swap_pte(vmf.orig_pte))
895 continue;
896 swapped_in++;
897 ret = do_swap_page(&vmf);
898
899 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
900 if (ret & VM_FAULT_RETRY) {
901 down_read(&mm->mmap_sem);
902 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
903 /* vma is no longer available, don't continue to swapin */
904 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
905 return false;
906 }
907 /* check if the pmd is still valid */
908 if (mm_find_pmd(mm, address) != pmd) {
909 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
910 return false;
911 }
912 }
913 if (ret & VM_FAULT_ERROR) {
914 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
915 return false;
916 }
917 /* pte is unmapped now, we need to map it */
918 vmf.pte = pte_offset_map(pmd, vmf.address);
919 }
920 vmf.pte--;
921 pte_unmap(vmf.pte);
922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
923 return true;
924 }
925
926 static void collapse_huge_page(struct mm_struct *mm,
927 unsigned long address,
928 struct page **hpage,
929 int node, int referenced)
930 {
931 pmd_t *pmd, _pmd;
932 pte_t *pte;
933 pgtable_t pgtable;
934 struct page *new_page;
935 spinlock_t *pmd_ptl, *pte_ptl;
936 int isolated = 0, result = 0;
937 struct mem_cgroup *memcg;
938 struct vm_area_struct *vma;
939 unsigned long mmun_start; /* For mmu_notifiers */
940 unsigned long mmun_end; /* For mmu_notifiers */
941 gfp_t gfp;
942
943 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
944
945 /* Only allocate from the target node */
946 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
947
948 /*
949 * Before allocating the hugepage, release the mmap_sem read lock.
950 * The allocation can take potentially a long time if it involves
951 * sync compaction, and we do not need to hold the mmap_sem during
952 * that. We will recheck the vma after taking it again in write mode.
953 */
954 up_read(&mm->mmap_sem);
955 new_page = khugepaged_alloc_page(hpage, gfp, node);
956 if (!new_page) {
957 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
958 goto out_nolock;
959 }
960
961 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
962 result = SCAN_CGROUP_CHARGE_FAIL;
963 goto out_nolock;
964 }
965
966 down_read(&mm->mmap_sem);
967 result = hugepage_vma_revalidate(mm, address, &vma);
968 if (result) {
969 mem_cgroup_cancel_charge(new_page, memcg, true);
970 up_read(&mm->mmap_sem);
971 goto out_nolock;
972 }
973
974 pmd = mm_find_pmd(mm, address);
975 if (!pmd) {
976 result = SCAN_PMD_NULL;
977 mem_cgroup_cancel_charge(new_page, memcg, true);
978 up_read(&mm->mmap_sem);
979 goto out_nolock;
980 }
981
982 /*
983 * __collapse_huge_page_swapin always returns with mmap_sem locked.
984 * If it fails, we release mmap_sem and jump out_nolock.
985 * Continuing to collapse causes inconsistency.
986 */
987 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
988 mem_cgroup_cancel_charge(new_page, memcg, true);
989 up_read(&mm->mmap_sem);
990 goto out_nolock;
991 }
992
993 up_read(&mm->mmap_sem);
994 /*
995 * Prevent all access to pagetables with the exception of
996 * gup_fast later handled by the ptep_clear_flush and the VM
997 * handled by the anon_vma lock + PG_lock.
998 */
999 down_write(&mm->mmap_sem);
1000 result = hugepage_vma_revalidate(mm, address, &vma);
1001 if (result)
1002 goto out;
1003 /* check if the pmd is still valid */
1004 if (mm_find_pmd(mm, address) != pmd)
1005 goto out;
1006
1007 anon_vma_lock_write(vma->anon_vma);
1008
1009 pte = pte_offset_map(pmd, address);
1010 pte_ptl = pte_lockptr(mm, pmd);
1011
1012 mmun_start = address;
1013 mmun_end = address + HPAGE_PMD_SIZE;
1014 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1015 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1016 /*
1017 * After this gup_fast can't run anymore. This also removes
1018 * any huge TLB entry from the CPU so we won't allow
1019 * huge and small TLB entries for the same virtual address
1020 * to avoid the risk of CPU bugs in that area.
1021 */
1022 _pmd = pmdp_collapse_flush(vma, address, pmd);
1023 spin_unlock(pmd_ptl);
1024 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1025
1026 spin_lock(pte_ptl);
1027 isolated = __collapse_huge_page_isolate(vma, address, pte);
1028 spin_unlock(pte_ptl);
1029
1030 if (unlikely(!isolated)) {
1031 pte_unmap(pte);
1032 spin_lock(pmd_ptl);
1033 BUG_ON(!pmd_none(*pmd));
1034 /*
1035 * We can only use set_pmd_at when establishing
1036 * hugepmds and never for establishing regular pmds that
1037 * points to regular pagetables. Use pmd_populate for that
1038 */
1039 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1040 spin_unlock(pmd_ptl);
1041 anon_vma_unlock_write(vma->anon_vma);
1042 result = SCAN_FAIL;
1043 goto out;
1044 }
1045
1046 /*
1047 * All pages are isolated and locked so anon_vma rmap
1048 * can't run anymore.
1049 */
1050 anon_vma_unlock_write(vma->anon_vma);
1051
1052 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1053 pte_unmap(pte);
1054 __SetPageUptodate(new_page);
1055 pgtable = pmd_pgtable(_pmd);
1056
1057 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1058 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1059
1060 /*
1061 * spin_lock() below is not the equivalent of smp_wmb(), so
1062 * this is needed to avoid the copy_huge_page writes to become
1063 * visible after the set_pmd_at() write.
1064 */
1065 smp_wmb();
1066
1067 spin_lock(pmd_ptl);
1068 BUG_ON(!pmd_none(*pmd));
1069 page_add_new_anon_rmap(new_page, vma, address, true);
1070 mem_cgroup_commit_charge(new_page, memcg, false, true);
1071 lru_cache_add_active_or_unevictable(new_page, vma);
1072 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1073 set_pmd_at(mm, address, pmd, _pmd);
1074 update_mmu_cache_pmd(vma, address, pmd);
1075 spin_unlock(pmd_ptl);
1076
1077 *hpage = NULL;
1078
1079 khugepaged_pages_collapsed++;
1080 result = SCAN_SUCCEED;
1081 out_up_write:
1082 up_write(&mm->mmap_sem);
1083 out_nolock:
1084 trace_mm_collapse_huge_page(mm, isolated, result);
1085 return;
1086 out:
1087 mem_cgroup_cancel_charge(new_page, memcg, true);
1088 goto out_up_write;
1089 }
1090
1091 static int khugepaged_scan_pmd(struct mm_struct *mm,
1092 struct vm_area_struct *vma,
1093 unsigned long address,
1094 struct page **hpage)
1095 {
1096 pmd_t *pmd;
1097 pte_t *pte, *_pte;
1098 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1099 struct page *page = NULL;
1100 unsigned long _address;
1101 spinlock_t *ptl;
1102 int node = NUMA_NO_NODE, unmapped = 0;
1103 bool writable = false;
1104
1105 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1106
1107 pmd = mm_find_pmd(mm, address);
1108 if (!pmd) {
1109 result = SCAN_PMD_NULL;
1110 goto out;
1111 }
1112
1113 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1114 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1115 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1116 _pte++, _address += PAGE_SIZE) {
1117 pte_t pteval = *_pte;
1118 if (is_swap_pte(pteval)) {
1119 if (++unmapped <= khugepaged_max_ptes_swap) {
1120 continue;
1121 } else {
1122 result = SCAN_EXCEED_SWAP_PTE;
1123 goto out_unmap;
1124 }
1125 }
1126 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1127 if (!userfaultfd_armed(vma) &&
1128 ++none_or_zero <= khugepaged_max_ptes_none) {
1129 continue;
1130 } else {
1131 result = SCAN_EXCEED_NONE_PTE;
1132 goto out_unmap;
1133 }
1134 }
1135 if (!pte_present(pteval)) {
1136 result = SCAN_PTE_NON_PRESENT;
1137 goto out_unmap;
1138 }
1139 if (pte_write(pteval))
1140 writable = true;
1141
1142 page = vm_normal_page(vma, _address, pteval);
1143 if (unlikely(!page)) {
1144 result = SCAN_PAGE_NULL;
1145 goto out_unmap;
1146 }
1147
1148 /* TODO: teach khugepaged to collapse THP mapped with pte */
1149 if (PageCompound(page)) {
1150 result = SCAN_PAGE_COMPOUND;
1151 goto out_unmap;
1152 }
1153
1154 /*
1155 * Record which node the original page is from and save this
1156 * information to khugepaged_node_load[].
1157 * Khupaged will allocate hugepage from the node has the max
1158 * hit record.
1159 */
1160 node = page_to_nid(page);
1161 if (khugepaged_scan_abort(node)) {
1162 result = SCAN_SCAN_ABORT;
1163 goto out_unmap;
1164 }
1165 khugepaged_node_load[node]++;
1166 if (!PageLRU(page)) {
1167 result = SCAN_PAGE_LRU;
1168 goto out_unmap;
1169 }
1170 if (PageLocked(page)) {
1171 result = SCAN_PAGE_LOCK;
1172 goto out_unmap;
1173 }
1174 if (!PageAnon(page)) {
1175 result = SCAN_PAGE_ANON;
1176 goto out_unmap;
1177 }
1178
1179 /*
1180 * cannot use mapcount: can't collapse if there's a gup pin.
1181 * The page must only be referenced by the scanned process
1182 * and page swap cache.
1183 */
1184 if (page_count(page) != 1 + PageSwapCache(page)) {
1185 result = SCAN_PAGE_COUNT;
1186 goto out_unmap;
1187 }
1188 if (pte_young(pteval) ||
1189 page_is_young(page) || PageReferenced(page) ||
1190 mmu_notifier_test_young(vma->vm_mm, address))
1191 referenced++;
1192 }
1193 if (writable) {
1194 if (referenced) {
1195 result = SCAN_SUCCEED;
1196 ret = 1;
1197 } else {
1198 result = SCAN_LACK_REFERENCED_PAGE;
1199 }
1200 } else {
1201 result = SCAN_PAGE_RO;
1202 }
1203 out_unmap:
1204 pte_unmap_unlock(pte, ptl);
1205 if (ret) {
1206 node = khugepaged_find_target_node();
1207 /* collapse_huge_page will return with the mmap_sem released */
1208 collapse_huge_page(mm, address, hpage, node, referenced);
1209 }
1210 out:
1211 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1212 none_or_zero, result, unmapped);
1213 return ret;
1214 }
1215
1216 static void collect_mm_slot(struct mm_slot *mm_slot)
1217 {
1218 struct mm_struct *mm = mm_slot->mm;
1219
1220 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1221
1222 if (khugepaged_test_exit(mm)) {
1223 /* free mm_slot */
1224 hash_del(&mm_slot->hash);
1225 list_del(&mm_slot->mm_node);
1226
1227 /*
1228 * Not strictly needed because the mm exited already.
1229 *
1230 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1231 */
1232
1233 /* khugepaged_mm_lock actually not necessary for the below */
1234 free_mm_slot(mm_slot);
1235 mmdrop(mm);
1236 }
1237 }
1238
1239 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1240 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1241 {
1242 struct vm_area_struct *vma;
1243 unsigned long addr;
1244 pmd_t *pmd, _pmd;
1245
1246 i_mmap_lock_write(mapping);
1247 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1248 /* probably overkill */
1249 if (vma->anon_vma)
1250 continue;
1251 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1252 if (addr & ~HPAGE_PMD_MASK)
1253 continue;
1254 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1255 continue;
1256 pmd = mm_find_pmd(vma->vm_mm, addr);
1257 if (!pmd)
1258 continue;
1259 /*
1260 * We need exclusive mmap_sem to retract page table.
1261 * If trylock fails we would end up with pte-mapped THP after
1262 * re-fault. Not ideal, but it's more important to not disturb
1263 * the system too much.
1264 */
1265 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1266 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1267 /* assume page table is clear */
1268 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1269 spin_unlock(ptl);
1270 up_write(&vma->vm_mm->mmap_sem);
1271 atomic_long_dec(&vma->vm_mm->nr_ptes);
1272 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
1273 }
1274 }
1275 i_mmap_unlock_write(mapping);
1276 }
1277
1278 /**
1279 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1280 *
1281 * Basic scheme is simple, details are more complex:
1282 * - allocate and freeze a new huge page;
1283 * - scan over radix tree replacing old pages the new one
1284 * + swap in pages if necessary;
1285 * + fill in gaps;
1286 * + keep old pages around in case if rollback is required;
1287 * - if replacing succeed:
1288 * + copy data over;
1289 * + free old pages;
1290 * + unfreeze huge page;
1291 * - if replacing failed;
1292 * + put all pages back and unfreeze them;
1293 * + restore gaps in the radix-tree;
1294 * + free huge page;
1295 */
1296 static void collapse_shmem(struct mm_struct *mm,
1297 struct address_space *mapping, pgoff_t start,
1298 struct page **hpage, int node)
1299 {
1300 gfp_t gfp;
1301 struct page *page, *new_page, *tmp;
1302 struct mem_cgroup *memcg;
1303 pgoff_t index, end = start + HPAGE_PMD_NR;
1304 LIST_HEAD(pagelist);
1305 struct radix_tree_iter iter;
1306 void **slot;
1307 int nr_none = 0, result = SCAN_SUCCEED;
1308
1309 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1310
1311 /* Only allocate from the target node */
1312 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1313
1314 new_page = khugepaged_alloc_page(hpage, gfp, node);
1315 if (!new_page) {
1316 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1317 goto out;
1318 }
1319
1320 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1321 result = SCAN_CGROUP_CHARGE_FAIL;
1322 goto out;
1323 }
1324
1325 new_page->index = start;
1326 new_page->mapping = mapping;
1327 __SetPageSwapBacked(new_page);
1328 __SetPageLocked(new_page);
1329 BUG_ON(!page_ref_freeze(new_page, 1));
1330
1331
1332 /*
1333 * At this point the new_page is 'frozen' (page_count() is zero), locked
1334 * and not up-to-date. It's safe to insert it into radix tree, because
1335 * nobody would be able to map it or use it in other way until we
1336 * unfreeze it.
1337 */
1338
1339 index = start;
1340 spin_lock_irq(&mapping->tree_lock);
1341 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1342 int n = min(iter.index, end) - index;
1343
1344 /*
1345 * Handle holes in the radix tree: charge it from shmem and
1346 * insert relevant subpage of new_page into the radix-tree.
1347 */
1348 if (n && !shmem_charge(mapping->host, n)) {
1349 result = SCAN_FAIL;
1350 break;
1351 }
1352 nr_none += n;
1353 for (; index < min(iter.index, end); index++) {
1354 radix_tree_insert(&mapping->page_tree, index,
1355 new_page + (index % HPAGE_PMD_NR));
1356 }
1357
1358 /* We are done. */
1359 if (index >= end)
1360 break;
1361
1362 page = radix_tree_deref_slot_protected(slot,
1363 &mapping->tree_lock);
1364 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1365 spin_unlock_irq(&mapping->tree_lock);
1366 /* swap in or instantiate fallocated page */
1367 if (shmem_getpage(mapping->host, index, &page,
1368 SGP_NOHUGE)) {
1369 result = SCAN_FAIL;
1370 goto tree_unlocked;
1371 }
1372 spin_lock_irq(&mapping->tree_lock);
1373 } else if (trylock_page(page)) {
1374 get_page(page);
1375 } else {
1376 result = SCAN_PAGE_LOCK;
1377 break;
1378 }
1379
1380 /*
1381 * The page must be locked, so we can drop the tree_lock
1382 * without racing with truncate.
1383 */
1384 VM_BUG_ON_PAGE(!PageLocked(page), page);
1385 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1386 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1387
1388 if (page_mapping(page) != mapping) {
1389 result = SCAN_TRUNCATED;
1390 goto out_unlock;
1391 }
1392 spin_unlock_irq(&mapping->tree_lock);
1393
1394 if (isolate_lru_page(page)) {
1395 result = SCAN_DEL_PAGE_LRU;
1396 goto out_isolate_failed;
1397 }
1398
1399 if (page_mapped(page))
1400 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1401 PAGE_SIZE, 0);
1402
1403 spin_lock_irq(&mapping->tree_lock);
1404
1405 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1406 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1407 &mapping->tree_lock), page);
1408 VM_BUG_ON_PAGE(page_mapped(page), page);
1409
1410 /*
1411 * The page is expected to have page_count() == 3:
1412 * - we hold a pin on it;
1413 * - one reference from radix tree;
1414 * - one from isolate_lru_page;
1415 */
1416 if (!page_ref_freeze(page, 3)) {
1417 result = SCAN_PAGE_COUNT;
1418 goto out_lru;
1419 }
1420
1421 /*
1422 * Add the page to the list to be able to undo the collapse if
1423 * something go wrong.
1424 */
1425 list_add_tail(&page->lru, &pagelist);
1426
1427 /* Finally, replace with the new page. */
1428 radix_tree_replace_slot(&mapping->page_tree, slot,
1429 new_page + (index % HPAGE_PMD_NR));
1430
1431 slot = radix_tree_iter_resume(slot, &iter);
1432 index++;
1433 continue;
1434 out_lru:
1435 spin_unlock_irq(&mapping->tree_lock);
1436 putback_lru_page(page);
1437 out_isolate_failed:
1438 unlock_page(page);
1439 put_page(page);
1440 goto tree_unlocked;
1441 out_unlock:
1442 unlock_page(page);
1443 put_page(page);
1444 break;
1445 }
1446
1447 /*
1448 * Handle hole in radix tree at the end of the range.
1449 * This code only triggers if there's nothing in radix tree
1450 * beyond 'end'.
1451 */
1452 if (result == SCAN_SUCCEED && index < end) {
1453 int n = end - index;
1454
1455 if (!shmem_charge(mapping->host, n)) {
1456 result = SCAN_FAIL;
1457 goto tree_locked;
1458 }
1459
1460 for (; index < end; index++) {
1461 radix_tree_insert(&mapping->page_tree, index,
1462 new_page + (index % HPAGE_PMD_NR));
1463 }
1464 nr_none += n;
1465 }
1466
1467 tree_locked:
1468 spin_unlock_irq(&mapping->tree_lock);
1469 tree_unlocked:
1470
1471 if (result == SCAN_SUCCEED) {
1472 unsigned long flags;
1473 struct zone *zone = page_zone(new_page);
1474
1475 /*
1476 * Replacing old pages with new one has succeed, now we need to
1477 * copy the content and free old pages.
1478 */
1479 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1480 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1481 page);
1482 list_del(&page->lru);
1483 unlock_page(page);
1484 page_ref_unfreeze(page, 1);
1485 page->mapping = NULL;
1486 ClearPageActive(page);
1487 ClearPageUnevictable(page);
1488 put_page(page);
1489 }
1490
1491 local_irq_save(flags);
1492 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1493 if (nr_none) {
1494 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1495 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1496 }
1497 local_irq_restore(flags);
1498
1499 /*
1500 * Remove pte page tables, so we can re-faulti
1501 * the page as huge.
1502 */
1503 retract_page_tables(mapping, start);
1504
1505 /* Everything is ready, let's unfreeze the new_page */
1506 set_page_dirty(new_page);
1507 SetPageUptodate(new_page);
1508 page_ref_unfreeze(new_page, HPAGE_PMD_NR);
1509 mem_cgroup_commit_charge(new_page, memcg, false, true);
1510 lru_cache_add_anon(new_page);
1511 unlock_page(new_page);
1512
1513 *hpage = NULL;
1514 } else {
1515 /* Something went wrong: rollback changes to the radix-tree */
1516 shmem_uncharge(mapping->host, nr_none);
1517 spin_lock_irq(&mapping->tree_lock);
1518 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1519 start) {
1520 if (iter.index >= end)
1521 break;
1522 page = list_first_entry_or_null(&pagelist,
1523 struct page, lru);
1524 if (!page || iter.index < page->index) {
1525 if (!nr_none)
1526 break;
1527 nr_none--;
1528 /* Put holes back where they were */
1529 radix_tree_delete(&mapping->page_tree,
1530 iter.index);
1531 continue;
1532 }
1533
1534 VM_BUG_ON_PAGE(page->index != iter.index, page);
1535
1536 /* Unfreeze the page. */
1537 list_del(&page->lru);
1538 page_ref_unfreeze(page, 2);
1539 radix_tree_replace_slot(&mapping->page_tree,
1540 slot, page);
1541 slot = radix_tree_iter_resume(slot, &iter);
1542 spin_unlock_irq(&mapping->tree_lock);
1543 putback_lru_page(page);
1544 unlock_page(page);
1545 spin_lock_irq(&mapping->tree_lock);
1546 }
1547 VM_BUG_ON(nr_none);
1548 spin_unlock_irq(&mapping->tree_lock);
1549
1550 /* Unfreeze new_page, caller would take care about freeing it */
1551 page_ref_unfreeze(new_page, 1);
1552 mem_cgroup_cancel_charge(new_page, memcg, true);
1553 unlock_page(new_page);
1554 new_page->mapping = NULL;
1555 }
1556 out:
1557 VM_BUG_ON(!list_empty(&pagelist));
1558 /* TODO: tracepoints */
1559 }
1560
1561 static void khugepaged_scan_shmem(struct mm_struct *mm,
1562 struct address_space *mapping,
1563 pgoff_t start, struct page **hpage)
1564 {
1565 struct page *page = NULL;
1566 struct radix_tree_iter iter;
1567 void **slot;
1568 int present, swap;
1569 int node = NUMA_NO_NODE;
1570 int result = SCAN_SUCCEED;
1571
1572 present = 0;
1573 swap = 0;
1574 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1575 rcu_read_lock();
1576 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1577 if (iter.index >= start + HPAGE_PMD_NR)
1578 break;
1579
1580 page = radix_tree_deref_slot(slot);
1581 if (radix_tree_deref_retry(page)) {
1582 slot = radix_tree_iter_retry(&iter);
1583 continue;
1584 }
1585
1586 if (radix_tree_exception(page)) {
1587 if (++swap > khugepaged_max_ptes_swap) {
1588 result = SCAN_EXCEED_SWAP_PTE;
1589 break;
1590 }
1591 continue;
1592 }
1593
1594 if (PageTransCompound(page)) {
1595 result = SCAN_PAGE_COMPOUND;
1596 break;
1597 }
1598
1599 node = page_to_nid(page);
1600 if (khugepaged_scan_abort(node)) {
1601 result = SCAN_SCAN_ABORT;
1602 break;
1603 }
1604 khugepaged_node_load[node]++;
1605
1606 if (!PageLRU(page)) {
1607 result = SCAN_PAGE_LRU;
1608 break;
1609 }
1610
1611 if (page_count(page) != 1 + page_mapcount(page)) {
1612 result = SCAN_PAGE_COUNT;
1613 break;
1614 }
1615
1616 /*
1617 * We probably should check if the page is referenced here, but
1618 * nobody would transfer pte_young() to PageReferenced() for us.
1619 * And rmap walk here is just too costly...
1620 */
1621
1622 present++;
1623
1624 if (need_resched()) {
1625 slot = radix_tree_iter_resume(slot, &iter);
1626 cond_resched_rcu();
1627 }
1628 }
1629 rcu_read_unlock();
1630
1631 if (result == SCAN_SUCCEED) {
1632 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1633 result = SCAN_EXCEED_NONE_PTE;
1634 } else {
1635 node = khugepaged_find_target_node();
1636 collapse_shmem(mm, mapping, start, hpage, node);
1637 }
1638 }
1639
1640 /* TODO: tracepoints */
1641 }
1642 #else
1643 static void khugepaged_scan_shmem(struct mm_struct *mm,
1644 struct address_space *mapping,
1645 pgoff_t start, struct page **hpage)
1646 {
1647 BUILD_BUG();
1648 }
1649 #endif
1650
1651 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1652 struct page **hpage)
1653 __releases(&khugepaged_mm_lock)
1654 __acquires(&khugepaged_mm_lock)
1655 {
1656 struct mm_slot *mm_slot;
1657 struct mm_struct *mm;
1658 struct vm_area_struct *vma;
1659 int progress = 0;
1660
1661 VM_BUG_ON(!pages);
1662 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1663
1664 if (khugepaged_scan.mm_slot)
1665 mm_slot = khugepaged_scan.mm_slot;
1666 else {
1667 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1668 struct mm_slot, mm_node);
1669 khugepaged_scan.address = 0;
1670 khugepaged_scan.mm_slot = mm_slot;
1671 }
1672 spin_unlock(&khugepaged_mm_lock);
1673
1674 mm = mm_slot->mm;
1675 down_read(&mm->mmap_sem);
1676 if (unlikely(khugepaged_test_exit(mm)))
1677 vma = NULL;
1678 else
1679 vma = find_vma(mm, khugepaged_scan.address);
1680
1681 progress++;
1682 for (; vma; vma = vma->vm_next) {
1683 unsigned long hstart, hend;
1684
1685 cond_resched();
1686 if (unlikely(khugepaged_test_exit(mm))) {
1687 progress++;
1688 break;
1689 }
1690 if (!hugepage_vma_check(vma)) {
1691 skip:
1692 progress++;
1693 continue;
1694 }
1695 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1696 hend = vma->vm_end & HPAGE_PMD_MASK;
1697 if (hstart >= hend)
1698 goto skip;
1699 if (khugepaged_scan.address > hend)
1700 goto skip;
1701 if (khugepaged_scan.address < hstart)
1702 khugepaged_scan.address = hstart;
1703 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1704
1705 while (khugepaged_scan.address < hend) {
1706 int ret;
1707 cond_resched();
1708 if (unlikely(khugepaged_test_exit(mm)))
1709 goto breakouterloop;
1710
1711 VM_BUG_ON(khugepaged_scan.address < hstart ||
1712 khugepaged_scan.address + HPAGE_PMD_SIZE >
1713 hend);
1714 if (shmem_file(vma->vm_file)) {
1715 struct file *file;
1716 pgoff_t pgoff = linear_page_index(vma,
1717 khugepaged_scan.address);
1718 if (!shmem_huge_enabled(vma))
1719 goto skip;
1720 file = get_file(vma->vm_file);
1721 up_read(&mm->mmap_sem);
1722 ret = 1;
1723 khugepaged_scan_shmem(mm, file->f_mapping,
1724 pgoff, hpage);
1725 fput(file);
1726 } else {
1727 ret = khugepaged_scan_pmd(mm, vma,
1728 khugepaged_scan.address,
1729 hpage);
1730 }
1731 /* move to next address */
1732 khugepaged_scan.address += HPAGE_PMD_SIZE;
1733 progress += HPAGE_PMD_NR;
1734 if (ret)
1735 /* we released mmap_sem so break loop */
1736 goto breakouterloop_mmap_sem;
1737 if (progress >= pages)
1738 goto breakouterloop;
1739 }
1740 }
1741 breakouterloop:
1742 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1743 breakouterloop_mmap_sem:
1744
1745 spin_lock(&khugepaged_mm_lock);
1746 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1747 /*
1748 * Release the current mm_slot if this mm is about to die, or
1749 * if we scanned all vmas of this mm.
1750 */
1751 if (khugepaged_test_exit(mm) || !vma) {
1752 /*
1753 * Make sure that if mm_users is reaching zero while
1754 * khugepaged runs here, khugepaged_exit will find
1755 * mm_slot not pointing to the exiting mm.
1756 */
1757 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1758 khugepaged_scan.mm_slot = list_entry(
1759 mm_slot->mm_node.next,
1760 struct mm_slot, mm_node);
1761 khugepaged_scan.address = 0;
1762 } else {
1763 khugepaged_scan.mm_slot = NULL;
1764 khugepaged_full_scans++;
1765 }
1766
1767 collect_mm_slot(mm_slot);
1768 }
1769
1770 return progress;
1771 }
1772
1773 static int khugepaged_has_work(void)
1774 {
1775 return !list_empty(&khugepaged_scan.mm_head) &&
1776 khugepaged_enabled();
1777 }
1778
1779 static int khugepaged_wait_event(void)
1780 {
1781 return !list_empty(&khugepaged_scan.mm_head) ||
1782 kthread_should_stop();
1783 }
1784
1785 static void khugepaged_do_scan(void)
1786 {
1787 struct page *hpage = NULL;
1788 unsigned int progress = 0, pass_through_head = 0;
1789 unsigned int pages = khugepaged_pages_to_scan;
1790 bool wait = true;
1791
1792 barrier(); /* write khugepaged_pages_to_scan to local stack */
1793
1794 while (progress < pages) {
1795 if (!khugepaged_prealloc_page(&hpage, &wait))
1796 break;
1797
1798 cond_resched();
1799
1800 if (unlikely(kthread_should_stop() || try_to_freeze()))
1801 break;
1802
1803 spin_lock(&khugepaged_mm_lock);
1804 if (!khugepaged_scan.mm_slot)
1805 pass_through_head++;
1806 if (khugepaged_has_work() &&
1807 pass_through_head < 2)
1808 progress += khugepaged_scan_mm_slot(pages - progress,
1809 &hpage);
1810 else
1811 progress = pages;
1812 spin_unlock(&khugepaged_mm_lock);
1813 }
1814
1815 if (!IS_ERR_OR_NULL(hpage))
1816 put_page(hpage);
1817 }
1818
1819 static bool khugepaged_should_wakeup(void)
1820 {
1821 return kthread_should_stop() ||
1822 time_after_eq(jiffies, khugepaged_sleep_expire);
1823 }
1824
1825 static void khugepaged_wait_work(void)
1826 {
1827 if (khugepaged_has_work()) {
1828 const unsigned long scan_sleep_jiffies =
1829 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1830
1831 if (!scan_sleep_jiffies)
1832 return;
1833
1834 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1835 wait_event_freezable_timeout(khugepaged_wait,
1836 khugepaged_should_wakeup(),
1837 scan_sleep_jiffies);
1838 return;
1839 }
1840
1841 if (khugepaged_enabled())
1842 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1843 }
1844
1845 static int khugepaged(void *none)
1846 {
1847 struct mm_slot *mm_slot;
1848
1849 set_freezable();
1850 set_user_nice(current, MAX_NICE);
1851
1852 while (!kthread_should_stop()) {
1853 khugepaged_do_scan();
1854 khugepaged_wait_work();
1855 }
1856
1857 spin_lock(&khugepaged_mm_lock);
1858 mm_slot = khugepaged_scan.mm_slot;
1859 khugepaged_scan.mm_slot = NULL;
1860 if (mm_slot)
1861 collect_mm_slot(mm_slot);
1862 spin_unlock(&khugepaged_mm_lock);
1863 return 0;
1864 }
1865
1866 static void set_recommended_min_free_kbytes(void)
1867 {
1868 struct zone *zone;
1869 int nr_zones = 0;
1870 unsigned long recommended_min;
1871
1872 for_each_populated_zone(zone)
1873 nr_zones++;
1874
1875 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1876 recommended_min = pageblock_nr_pages * nr_zones * 2;
1877
1878 /*
1879 * Make sure that on average at least two pageblocks are almost free
1880 * of another type, one for a migratetype to fall back to and a
1881 * second to avoid subsequent fallbacks of other types There are 3
1882 * MIGRATE_TYPES we care about.
1883 */
1884 recommended_min += pageblock_nr_pages * nr_zones *
1885 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1886
1887 /* don't ever allow to reserve more than 5% of the lowmem */
1888 recommended_min = min(recommended_min,
1889 (unsigned long) nr_free_buffer_pages() / 20);
1890 recommended_min <<= (PAGE_SHIFT-10);
1891
1892 if (recommended_min > min_free_kbytes) {
1893 if (user_min_free_kbytes >= 0)
1894 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1895 min_free_kbytes, recommended_min);
1896
1897 min_free_kbytes = recommended_min;
1898 }
1899 setup_per_zone_wmarks();
1900 }
1901
1902 int start_stop_khugepaged(void)
1903 {
1904 static struct task_struct *khugepaged_thread __read_mostly;
1905 static DEFINE_MUTEX(khugepaged_mutex);
1906 int err = 0;
1907
1908 mutex_lock(&khugepaged_mutex);
1909 if (khugepaged_enabled()) {
1910 if (!khugepaged_thread)
1911 khugepaged_thread = kthread_run(khugepaged, NULL,
1912 "khugepaged");
1913 if (IS_ERR(khugepaged_thread)) {
1914 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1915 err = PTR_ERR(khugepaged_thread);
1916 khugepaged_thread = NULL;
1917 goto fail;
1918 }
1919
1920 if (!list_empty(&khugepaged_scan.mm_head))
1921 wake_up_interruptible(&khugepaged_wait);
1922
1923 set_recommended_min_free_kbytes();
1924 } else if (khugepaged_thread) {
1925 kthread_stop(khugepaged_thread);
1926 khugepaged_thread = NULL;
1927 }
1928 fail:
1929 mutex_unlock(&khugepaged_mutex);
1930 return err;
1931 }