]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/khugepaged.c
mm/vma: make vma_is_foreign() available for general use
[mirror_ubuntu-jammy-kernel.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
f3f0e1d2 20#include <linux/shmem_fs.h>
b46e756f
KS
21
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
31 SCAN_PTE_NON_PRESENT,
32 SCAN_PAGE_RO,
0db501f7 33 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
34 SCAN_PAGE_NULL,
35 SCAN_SCAN_ABORT,
36 SCAN_PAGE_COUNT,
37 SCAN_PAGE_LRU,
38 SCAN_PAGE_LOCK,
39 SCAN_PAGE_ANON,
40 SCAN_PAGE_COMPOUND,
41 SCAN_ANY_PROCESS,
42 SCAN_VMA_NULL,
43 SCAN_VMA_CHECK,
44 SCAN_ADDRESS_RANGE,
45 SCAN_SWAP_CACHE_PAGE,
46 SCAN_DEL_PAGE_LRU,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2
KS
49 SCAN_EXCEED_SWAP_PTE,
50 SCAN_TRUNCATED,
99cb0dbd 51 SCAN_PAGE_HAS_PRIVATE,
b46e756f
KS
52};
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/huge_memory.h>
56
57/* default scan 8*512 pte (or vmas) every 30 second */
58static unsigned int khugepaged_pages_to_scan __read_mostly;
59static unsigned int khugepaged_pages_collapsed;
60static unsigned int khugepaged_full_scans;
61static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
62/* during fragmentation poll the hugepage allocator once every minute */
63static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
64static unsigned long khugepaged_sleep_expire;
65static DEFINE_SPINLOCK(khugepaged_mm_lock);
66static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
67/*
68 * default collapse hugepages if there is at least one pte mapped like
69 * it would have happened if the vma was large enough during page
70 * fault.
71 */
72static unsigned int khugepaged_max_ptes_none __read_mostly;
73static unsigned int khugepaged_max_ptes_swap __read_mostly;
74
75#define MM_SLOTS_HASH_BITS 10
76static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77
78static struct kmem_cache *mm_slot_cache __read_mostly;
79
27e1f827
SL
80#define MAX_PTE_MAPPED_THP 8
81
b46e756f
KS
82/**
83 * struct mm_slot - hash lookup from mm to mm_slot
84 * @hash: hash collision list
85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86 * @mm: the mm that this information is valid for
87 */
88struct mm_slot {
89 struct hlist_node hash;
90 struct list_head mm_node;
91 struct mm_struct *mm;
27e1f827
SL
92
93 /* pte-mapped THP in this mm */
94 int nr_pte_mapped_thp;
95 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
96};
97
98/**
99 * struct khugepaged_scan - cursor for scanning
100 * @mm_head: the head of the mm list to scan
101 * @mm_slot: the current mm_slot we are scanning
102 * @address: the next address inside that to be scanned
103 *
104 * There is only the one khugepaged_scan instance of this cursor structure.
105 */
106struct khugepaged_scan {
107 struct list_head mm_head;
108 struct mm_slot *mm_slot;
109 unsigned long address;
110};
111
112static struct khugepaged_scan khugepaged_scan = {
113 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
114};
115
e1465d12 116#ifdef CONFIG_SYSFS
b46e756f
KS
117static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 char *buf)
120{
121 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
122}
123
124static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
125 struct kobj_attribute *attr,
126 const char *buf, size_t count)
127{
128 unsigned long msecs;
129 int err;
130
131 err = kstrtoul(buf, 10, &msecs);
132 if (err || msecs > UINT_MAX)
133 return -EINVAL;
134
135 khugepaged_scan_sleep_millisecs = msecs;
136 khugepaged_sleep_expire = 0;
137 wake_up_interruptible(&khugepaged_wait);
138
139 return count;
140}
141static struct kobj_attribute scan_sleep_millisecs_attr =
142 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
143 scan_sleep_millisecs_store);
144
145static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 char *buf)
148{
149 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
150}
151
152static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
153 struct kobj_attribute *attr,
154 const char *buf, size_t count)
155{
156 unsigned long msecs;
157 int err;
158
159 err = kstrtoul(buf, 10, &msecs);
160 if (err || msecs > UINT_MAX)
161 return -EINVAL;
162
163 khugepaged_alloc_sleep_millisecs = msecs;
164 khugepaged_sleep_expire = 0;
165 wake_up_interruptible(&khugepaged_wait);
166
167 return count;
168}
169static struct kobj_attribute alloc_sleep_millisecs_attr =
170 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
171 alloc_sleep_millisecs_store);
172
173static ssize_t pages_to_scan_show(struct kobject *kobj,
174 struct kobj_attribute *attr,
175 char *buf)
176{
177 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
178}
179static ssize_t pages_to_scan_store(struct kobject *kobj,
180 struct kobj_attribute *attr,
181 const char *buf, size_t count)
182{
183 int err;
184 unsigned long pages;
185
186 err = kstrtoul(buf, 10, &pages);
187 if (err || !pages || pages > UINT_MAX)
188 return -EINVAL;
189
190 khugepaged_pages_to_scan = pages;
191
192 return count;
193}
194static struct kobj_attribute pages_to_scan_attr =
195 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
196 pages_to_scan_store);
197
198static ssize_t pages_collapsed_show(struct kobject *kobj,
199 struct kobj_attribute *attr,
200 char *buf)
201{
202 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
203}
204static struct kobj_attribute pages_collapsed_attr =
205 __ATTR_RO(pages_collapsed);
206
207static ssize_t full_scans_show(struct kobject *kobj,
208 struct kobj_attribute *attr,
209 char *buf)
210{
211 return sprintf(buf, "%u\n", khugepaged_full_scans);
212}
213static struct kobj_attribute full_scans_attr =
214 __ATTR_RO(full_scans);
215
216static ssize_t khugepaged_defrag_show(struct kobject *kobj,
217 struct kobj_attribute *attr, char *buf)
218{
219 return single_hugepage_flag_show(kobj, attr, buf,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
221}
222static ssize_t khugepaged_defrag_store(struct kobject *kobj,
223 struct kobj_attribute *attr,
224 const char *buf, size_t count)
225{
226 return single_hugepage_flag_store(kobj, attr, buf, count,
227 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
228}
229static struct kobj_attribute khugepaged_defrag_attr =
230 __ATTR(defrag, 0644, khugepaged_defrag_show,
231 khugepaged_defrag_store);
232
233/*
234 * max_ptes_none controls if khugepaged should collapse hugepages over
235 * any unmapped ptes in turn potentially increasing the memory
236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
237 * reduce the available free memory in the system as it
238 * runs. Increasing max_ptes_none will instead potentially reduce the
239 * free memory in the system during the khugepaged scan.
240 */
241static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
242 struct kobj_attribute *attr,
243 char *buf)
244{
245 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
246}
247static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
248 struct kobj_attribute *attr,
249 const char *buf, size_t count)
250{
251 int err;
252 unsigned long max_ptes_none;
253
254 err = kstrtoul(buf, 10, &max_ptes_none);
255 if (err || max_ptes_none > HPAGE_PMD_NR-1)
256 return -EINVAL;
257
258 khugepaged_max_ptes_none = max_ptes_none;
259
260 return count;
261}
262static struct kobj_attribute khugepaged_max_ptes_none_attr =
263 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
264 khugepaged_max_ptes_none_store);
265
266static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 char *buf)
269{
270 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
271}
272
273static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
274 struct kobj_attribute *attr,
275 const char *buf, size_t count)
276{
277 int err;
278 unsigned long max_ptes_swap;
279
280 err = kstrtoul(buf, 10, &max_ptes_swap);
281 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
282 return -EINVAL;
283
284 khugepaged_max_ptes_swap = max_ptes_swap;
285
286 return count;
287}
288
289static struct kobj_attribute khugepaged_max_ptes_swap_attr =
290 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
291 khugepaged_max_ptes_swap_store);
292
293static struct attribute *khugepaged_attr[] = {
294 &khugepaged_defrag_attr.attr,
295 &khugepaged_max_ptes_none_attr.attr,
296 &pages_to_scan_attr.attr,
297 &pages_collapsed_attr.attr,
298 &full_scans_attr.attr,
299 &scan_sleep_millisecs_attr.attr,
300 &alloc_sleep_millisecs_attr.attr,
301 &khugepaged_max_ptes_swap_attr.attr,
302 NULL,
303};
304
305struct attribute_group khugepaged_attr_group = {
306 .attrs = khugepaged_attr,
307 .name = "khugepaged",
308};
e1465d12 309#endif /* CONFIG_SYSFS */
b46e756f 310
b46e756f
KS
311int hugepage_madvise(struct vm_area_struct *vma,
312 unsigned long *vm_flags, int advice)
313{
314 switch (advice) {
315 case MADV_HUGEPAGE:
316#ifdef CONFIG_S390
317 /*
318 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
319 * can't handle this properly after s390_enable_sie, so we simply
320 * ignore the madvise to prevent qemu from causing a SIGSEGV.
321 */
322 if (mm_has_pgste(vma->vm_mm))
323 return 0;
324#endif
325 *vm_flags &= ~VM_NOHUGEPAGE;
326 *vm_flags |= VM_HUGEPAGE;
327 /*
328 * If the vma become good for khugepaged to scan,
329 * register it here without waiting a page fault that
330 * may not happen any time soon.
331 */
332 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
333 khugepaged_enter_vma_merge(vma, *vm_flags))
334 return -ENOMEM;
335 break;
336 case MADV_NOHUGEPAGE:
337 *vm_flags &= ~VM_HUGEPAGE;
338 *vm_flags |= VM_NOHUGEPAGE;
339 /*
340 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
341 * this vma even if we leave the mm registered in khugepaged if
342 * it got registered before VM_NOHUGEPAGE was set.
343 */
344 break;
345 }
346
347 return 0;
348}
349
350int __init khugepaged_init(void)
351{
352 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
353 sizeof(struct mm_slot),
354 __alignof__(struct mm_slot), 0, NULL);
355 if (!mm_slot_cache)
356 return -ENOMEM;
357
358 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
359 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
360 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
361
362 return 0;
363}
364
365void __init khugepaged_destroy(void)
366{
367 kmem_cache_destroy(mm_slot_cache);
368}
369
370static inline struct mm_slot *alloc_mm_slot(void)
371{
372 if (!mm_slot_cache) /* initialization failed */
373 return NULL;
374 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
375}
376
377static inline void free_mm_slot(struct mm_slot *mm_slot)
378{
379 kmem_cache_free(mm_slot_cache, mm_slot);
380}
381
382static struct mm_slot *get_mm_slot(struct mm_struct *mm)
383{
384 struct mm_slot *mm_slot;
385
386 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
387 if (mm == mm_slot->mm)
388 return mm_slot;
389
390 return NULL;
391}
392
393static void insert_to_mm_slots_hash(struct mm_struct *mm,
394 struct mm_slot *mm_slot)
395{
396 mm_slot->mm = mm;
397 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
398}
399
400static inline int khugepaged_test_exit(struct mm_struct *mm)
401{
402 return atomic_read(&mm->mm_users) == 0;
403}
404
50f8b92f
SL
405static bool hugepage_vma_check(struct vm_area_struct *vma,
406 unsigned long vm_flags)
c2231020 407{
50f8b92f
SL
408 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
409 (vm_flags & VM_NOHUGEPAGE) ||
c2231020
YS
410 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
411 return false;
99cb0dbd
SL
412
413 if (shmem_file(vma->vm_file) ||
414 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
415 vma->vm_file &&
416 (vm_flags & VM_DENYWRITE))) {
c2231020
YS
417 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
418 return false;
419 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
420 HPAGE_PMD_NR);
421 }
422 if (!vma->anon_vma || vma->vm_ops)
423 return false;
424 if (is_vma_temporary_stack(vma))
425 return false;
50f8b92f 426 return !(vm_flags & VM_NO_KHUGEPAGED);
c2231020
YS
427}
428
b46e756f
KS
429int __khugepaged_enter(struct mm_struct *mm)
430{
431 struct mm_slot *mm_slot;
432 int wakeup;
433
434 mm_slot = alloc_mm_slot();
435 if (!mm_slot)
436 return -ENOMEM;
437
438 /* __khugepaged_exit() must not run from under us */
439 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
440 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
441 free_mm_slot(mm_slot);
442 return 0;
443 }
444
445 spin_lock(&khugepaged_mm_lock);
446 insert_to_mm_slots_hash(mm, mm_slot);
447 /*
448 * Insert just behind the scanning cursor, to let the area settle
449 * down a little.
450 */
451 wakeup = list_empty(&khugepaged_scan.mm_head);
452 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
453 spin_unlock(&khugepaged_mm_lock);
454
f1f10076 455 mmgrab(mm);
b46e756f
KS
456 if (wakeup)
457 wake_up_interruptible(&khugepaged_wait);
458
459 return 0;
460}
461
462int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
463 unsigned long vm_flags)
464{
465 unsigned long hstart, hend;
c2231020
YS
466
467 /*
99cb0dbd
SL
468 * khugepaged only supports read-only files for non-shmem files.
469 * khugepaged does not yet work on special mappings. And
470 * file-private shmem THP is not supported.
c2231020 471 */
50f8b92f 472 if (!hugepage_vma_check(vma, vm_flags))
b46e756f 473 return 0;
c2231020 474
b46e756f
KS
475 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
476 hend = vma->vm_end & HPAGE_PMD_MASK;
477 if (hstart < hend)
478 return khugepaged_enter(vma, vm_flags);
479 return 0;
480}
481
482void __khugepaged_exit(struct mm_struct *mm)
483{
484 struct mm_slot *mm_slot;
485 int free = 0;
486
487 spin_lock(&khugepaged_mm_lock);
488 mm_slot = get_mm_slot(mm);
489 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
490 hash_del(&mm_slot->hash);
491 list_del(&mm_slot->mm_node);
492 free = 1;
493 }
494 spin_unlock(&khugepaged_mm_lock);
495
496 if (free) {
497 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
498 free_mm_slot(mm_slot);
499 mmdrop(mm);
500 } else if (mm_slot) {
501 /*
502 * This is required to serialize against
503 * khugepaged_test_exit() (which is guaranteed to run
504 * under mmap sem read mode). Stop here (after we
505 * return all pagetables will be destroyed) until
506 * khugepaged has finished working on the pagetables
507 * under the mmap_sem.
508 */
509 down_write(&mm->mmap_sem);
510 up_write(&mm->mmap_sem);
511 }
512}
513
514static void release_pte_page(struct page *page)
515{
d44d363f 516 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
b46e756f
KS
517 unlock_page(page);
518 putback_lru_page(page);
519}
520
521static void release_pte_pages(pte_t *pte, pte_t *_pte)
522{
523 while (--_pte >= pte) {
524 pte_t pteval = *_pte;
525 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
526 release_pte_page(pte_page(pteval));
527 }
528}
529
530static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
531 unsigned long address,
532 pte_t *pte)
533{
534 struct page *page = NULL;
535 pte_t *_pte;
0db501f7
EA
536 int none_or_zero = 0, result = 0, referenced = 0;
537 bool writable = false;
b46e756f
KS
538
539 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
540 _pte++, address += PAGE_SIZE) {
541 pte_t pteval = *_pte;
542 if (pte_none(pteval) || (pte_present(pteval) &&
543 is_zero_pfn(pte_pfn(pteval)))) {
544 if (!userfaultfd_armed(vma) &&
545 ++none_or_zero <= khugepaged_max_ptes_none) {
546 continue;
547 } else {
548 result = SCAN_EXCEED_NONE_PTE;
549 goto out;
550 }
551 }
552 if (!pte_present(pteval)) {
553 result = SCAN_PTE_NON_PRESENT;
554 goto out;
555 }
556 page = vm_normal_page(vma, address, pteval);
557 if (unlikely(!page)) {
558 result = SCAN_PAGE_NULL;
559 goto out;
560 }
561
fece2029
KS
562 /* TODO: teach khugepaged to collapse THP mapped with pte */
563 if (PageCompound(page)) {
564 result = SCAN_PAGE_COMPOUND;
565 goto out;
566 }
567
b46e756f 568 VM_BUG_ON_PAGE(!PageAnon(page), page);
b46e756f
KS
569
570 /*
571 * We can do it before isolate_lru_page because the
572 * page can't be freed from under us. NOTE: PG_lock
573 * is needed to serialize against split_huge_page
574 * when invoked from the VM.
575 */
576 if (!trylock_page(page)) {
577 result = SCAN_PAGE_LOCK;
578 goto out;
579 }
580
581 /*
582 * cannot use mapcount: can't collapse if there's a gup pin.
583 * The page must only be referenced by the scanned process
584 * and page swap cache.
585 */
2948be5a 586 if (page_count(page) != 1 + PageSwapCache(page)) {
b46e756f
KS
587 unlock_page(page);
588 result = SCAN_PAGE_COUNT;
589 goto out;
590 }
591 if (pte_write(pteval)) {
592 writable = true;
593 } else {
594 if (PageSwapCache(page) &&
595 !reuse_swap_page(page, NULL)) {
596 unlock_page(page);
597 result = SCAN_SWAP_CACHE_PAGE;
598 goto out;
599 }
600 /*
601 * Page is not in the swap cache. It can be collapsed
602 * into a THP.
603 */
604 }
605
606 /*
607 * Isolate the page to avoid collapsing an hugepage
608 * currently in use by the VM.
609 */
610 if (isolate_lru_page(page)) {
611 unlock_page(page);
612 result = SCAN_DEL_PAGE_LRU;
613 goto out;
614 }
d44d363f
SL
615 inc_node_page_state(page,
616 NR_ISOLATED_ANON + page_is_file_cache(page));
b46e756f
KS
617 VM_BUG_ON_PAGE(!PageLocked(page), page);
618 VM_BUG_ON_PAGE(PageLRU(page), page);
619
0db501f7 620 /* There should be enough young pte to collapse the page */
b46e756f
KS
621 if (pte_young(pteval) ||
622 page_is_young(page) || PageReferenced(page) ||
623 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 624 referenced++;
b46e756f
KS
625 }
626 if (likely(writable)) {
627 if (likely(referenced)) {
628 result = SCAN_SUCCEED;
629 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
630 referenced, writable, result);
631 return 1;
632 }
633 } else {
634 result = SCAN_PAGE_RO;
635 }
636
637out:
638 release_pte_pages(pte, _pte);
639 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
640 referenced, writable, result);
641 return 0;
642}
643
644static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
645 struct vm_area_struct *vma,
646 unsigned long address,
647 spinlock_t *ptl)
648{
649 pte_t *_pte;
338a16ba
DR
650 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
651 _pte++, page++, address += PAGE_SIZE) {
b46e756f
KS
652 pte_t pteval = *_pte;
653 struct page *src_page;
654
655 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
656 clear_user_highpage(page, address);
657 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
658 if (is_zero_pfn(pte_pfn(pteval))) {
659 /*
660 * ptl mostly unnecessary.
661 */
662 spin_lock(ptl);
663 /*
664 * paravirt calls inside pte_clear here are
665 * superfluous.
666 */
667 pte_clear(vma->vm_mm, address, _pte);
668 spin_unlock(ptl);
669 }
670 } else {
671 src_page = pte_page(pteval);
672 copy_user_highpage(page, src_page, address, vma);
673 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
674 release_pte_page(src_page);
675 /*
676 * ptl mostly unnecessary, but preempt has to
677 * be disabled to update the per-cpu stats
678 * inside page_remove_rmap().
679 */
680 spin_lock(ptl);
681 /*
682 * paravirt calls inside pte_clear here are
683 * superfluous.
684 */
685 pte_clear(vma->vm_mm, address, _pte);
686 page_remove_rmap(src_page, false);
687 spin_unlock(ptl);
688 free_page_and_swap_cache(src_page);
689 }
b46e756f
KS
690 }
691}
692
693static void khugepaged_alloc_sleep(void)
694{
695 DEFINE_WAIT(wait);
696
697 add_wait_queue(&khugepaged_wait, &wait);
698 freezable_schedule_timeout_interruptible(
699 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
700 remove_wait_queue(&khugepaged_wait, &wait);
701}
702
703static int khugepaged_node_load[MAX_NUMNODES];
704
705static bool khugepaged_scan_abort(int nid)
706{
707 int i;
708
709 /*
a5f5f91d 710 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
711 * allocate memory locally.
712 */
a5f5f91d 713 if (!node_reclaim_mode)
b46e756f
KS
714 return false;
715
716 /* If there is a count for this node already, it must be acceptable */
717 if (khugepaged_node_load[nid])
718 return false;
719
720 for (i = 0; i < MAX_NUMNODES; i++) {
721 if (!khugepaged_node_load[i])
722 continue;
a55c7454 723 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
724 return true;
725 }
726 return false;
727}
728
729/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
730static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
731{
25160354 732 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
733}
734
735#ifdef CONFIG_NUMA
736static int khugepaged_find_target_node(void)
737{
738 static int last_khugepaged_target_node = NUMA_NO_NODE;
739 int nid, target_node = 0, max_value = 0;
740
741 /* find first node with max normal pages hit */
742 for (nid = 0; nid < MAX_NUMNODES; nid++)
743 if (khugepaged_node_load[nid] > max_value) {
744 max_value = khugepaged_node_load[nid];
745 target_node = nid;
746 }
747
748 /* do some balance if several nodes have the same hit record */
749 if (target_node <= last_khugepaged_target_node)
750 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
751 nid++)
752 if (max_value == khugepaged_node_load[nid]) {
753 target_node = nid;
754 break;
755 }
756
757 last_khugepaged_target_node = target_node;
758 return target_node;
759}
760
761static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
762{
763 if (IS_ERR(*hpage)) {
764 if (!*wait)
765 return false;
766
767 *wait = false;
768 *hpage = NULL;
769 khugepaged_alloc_sleep();
770 } else if (*hpage) {
771 put_page(*hpage);
772 *hpage = NULL;
773 }
774
775 return true;
776}
777
778static struct page *
988ddb71 779khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f
KS
780{
781 VM_BUG_ON_PAGE(*hpage, *hpage);
782
b46e756f
KS
783 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
784 if (unlikely(!*hpage)) {
785 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
786 *hpage = ERR_PTR(-ENOMEM);
787 return NULL;
788 }
789
790 prep_transhuge_page(*hpage);
791 count_vm_event(THP_COLLAPSE_ALLOC);
792 return *hpage;
793}
794#else
795static int khugepaged_find_target_node(void)
796{
797 return 0;
798}
799
800static inline struct page *alloc_khugepaged_hugepage(void)
801{
802 struct page *page;
803
804 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
805 HPAGE_PMD_ORDER);
806 if (page)
807 prep_transhuge_page(page);
808 return page;
809}
810
811static struct page *khugepaged_alloc_hugepage(bool *wait)
812{
813 struct page *hpage;
814
815 do {
816 hpage = alloc_khugepaged_hugepage();
817 if (!hpage) {
818 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
819 if (!*wait)
820 return NULL;
821
822 *wait = false;
823 khugepaged_alloc_sleep();
824 } else
825 count_vm_event(THP_COLLAPSE_ALLOC);
826 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
827
828 return hpage;
829}
830
831static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
832{
833 if (!*hpage)
834 *hpage = khugepaged_alloc_hugepage(wait);
835
836 if (unlikely(!*hpage))
837 return false;
838
839 return true;
840}
841
842static struct page *
988ddb71 843khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f 844{
b46e756f
KS
845 VM_BUG_ON(!*hpage);
846
847 return *hpage;
848}
849#endif
850
b46e756f
KS
851/*
852 * If mmap_sem temporarily dropped, revalidate vma
853 * before taking mmap_sem.
854 * Return 0 if succeeds, otherwise return none-zero
855 * value (scan code).
856 */
857
c131f751
KS
858static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
859 struct vm_area_struct **vmap)
b46e756f
KS
860{
861 struct vm_area_struct *vma;
862 unsigned long hstart, hend;
863
864 if (unlikely(khugepaged_test_exit(mm)))
865 return SCAN_ANY_PROCESS;
866
c131f751 867 *vmap = vma = find_vma(mm, address);
b46e756f
KS
868 if (!vma)
869 return SCAN_VMA_NULL;
870
871 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
872 hend = vma->vm_end & HPAGE_PMD_MASK;
873 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
874 return SCAN_ADDRESS_RANGE;
50f8b92f 875 if (!hugepage_vma_check(vma, vma->vm_flags))
b46e756f
KS
876 return SCAN_VMA_CHECK;
877 return 0;
878}
879
880/*
881 * Bring missing pages in from swap, to complete THP collapse.
882 * Only done if khugepaged_scan_pmd believes it is worthwhile.
883 *
884 * Called and returns without pte mapped or spinlocks held,
885 * but with mmap_sem held to protect against vma changes.
886 */
887
888static bool __collapse_huge_page_swapin(struct mm_struct *mm,
889 struct vm_area_struct *vma,
0db501f7
EA
890 unsigned long address, pmd_t *pmd,
891 int referenced)
b46e756f 892{
2b740303
SJ
893 int swapped_in = 0;
894 vm_fault_t ret = 0;
82b0f8c3 895 struct vm_fault vmf = {
b46e756f
KS
896 .vma = vma,
897 .address = address,
898 .flags = FAULT_FLAG_ALLOW_RETRY,
899 .pmd = pmd,
0721ec8b 900 .pgoff = linear_page_index(vma, address),
b46e756f
KS
901 };
902
982785c6
EA
903 /* we only decide to swapin, if there is enough young ptes */
904 if (referenced < HPAGE_PMD_NR/2) {
905 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
906 return false;
907 }
82b0f8c3
JK
908 vmf.pte = pte_offset_map(pmd, address);
909 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
910 vmf.pte++, vmf.address += PAGE_SIZE) {
2994302b
JK
911 vmf.orig_pte = *vmf.pte;
912 if (!is_swap_pte(vmf.orig_pte))
b46e756f
KS
913 continue;
914 swapped_in++;
2994302b 915 ret = do_swap_page(&vmf);
0db501f7 916
b46e756f
KS
917 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
918 if (ret & VM_FAULT_RETRY) {
919 down_read(&mm->mmap_sem);
82b0f8c3 920 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
47f863ea 921 /* vma is no longer available, don't continue to swapin */
0db501f7 922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 923 return false;
47f863ea 924 }
b46e756f 925 /* check if the pmd is still valid */
835152a2
SP
926 if (mm_find_pmd(mm, address) != pmd) {
927 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 928 return false;
835152a2 929 }
b46e756f
KS
930 }
931 if (ret & VM_FAULT_ERROR) {
0db501f7 932 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f
KS
933 return false;
934 }
935 /* pte is unmapped now, we need to map it */
82b0f8c3 936 vmf.pte = pte_offset_map(pmd, vmf.address);
b46e756f 937 }
82b0f8c3
JK
938 vmf.pte--;
939 pte_unmap(vmf.pte);
0db501f7 940 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
b46e756f
KS
941 return true;
942}
943
944static void collapse_huge_page(struct mm_struct *mm,
945 unsigned long address,
946 struct page **hpage,
0db501f7 947 int node, int referenced)
b46e756f
KS
948{
949 pmd_t *pmd, _pmd;
950 pte_t *pte;
951 pgtable_t pgtable;
952 struct page *new_page;
953 spinlock_t *pmd_ptl, *pte_ptl;
954 int isolated = 0, result = 0;
955 struct mem_cgroup *memcg;
c131f751 956 struct vm_area_struct *vma;
ac46d4f3 957 struct mmu_notifier_range range;
b46e756f
KS
958 gfp_t gfp;
959
960 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
961
962 /* Only allocate from the target node */
41b6167e 963 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
b46e756f 964
988ddb71
KS
965 /*
966 * Before allocating the hugepage, release the mmap_sem read lock.
967 * The allocation can take potentially a long time if it involves
968 * sync compaction, and we do not need to hold the mmap_sem during
969 * that. We will recheck the vma after taking it again in write mode.
970 */
971 up_read(&mm->mmap_sem);
972 new_page = khugepaged_alloc_page(hpage, gfp, node);
b46e756f
KS
973 if (!new_page) {
974 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
975 goto out_nolock;
976 }
977
2a70f6a7 978 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
b46e756f
KS
979 result = SCAN_CGROUP_CHARGE_FAIL;
980 goto out_nolock;
981 }
982
983 down_read(&mm->mmap_sem);
c131f751 984 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f
KS
985 if (result) {
986 mem_cgroup_cancel_charge(new_page, memcg, true);
987 up_read(&mm->mmap_sem);
988 goto out_nolock;
989 }
990
991 pmd = mm_find_pmd(mm, address);
992 if (!pmd) {
993 result = SCAN_PMD_NULL;
994 mem_cgroup_cancel_charge(new_page, memcg, true);
995 up_read(&mm->mmap_sem);
996 goto out_nolock;
997 }
998
999 /*
1000 * __collapse_huge_page_swapin always returns with mmap_sem locked.
47f863ea 1001 * If it fails, we release mmap_sem and jump out_nolock.
b46e756f
KS
1002 * Continuing to collapse causes inconsistency.
1003 */
0db501f7 1004 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
b46e756f
KS
1005 mem_cgroup_cancel_charge(new_page, memcg, true);
1006 up_read(&mm->mmap_sem);
1007 goto out_nolock;
1008 }
1009
1010 up_read(&mm->mmap_sem);
1011 /*
1012 * Prevent all access to pagetables with the exception of
1013 * gup_fast later handled by the ptep_clear_flush and the VM
1014 * handled by the anon_vma lock + PG_lock.
1015 */
1016 down_write(&mm->mmap_sem);
59ea6d06
AA
1017 result = SCAN_ANY_PROCESS;
1018 if (!mmget_still_valid(mm))
1019 goto out;
c131f751 1020 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f
KS
1021 if (result)
1022 goto out;
1023 /* check if the pmd is still valid */
1024 if (mm_find_pmd(mm, address) != pmd)
1025 goto out;
1026
1027 anon_vma_lock_write(vma->anon_vma);
1028
7269f999 1029 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
6f4f13e8 1030 address, address + HPAGE_PMD_SIZE);
ac46d4f3 1031 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1032
1033 pte = pte_offset_map(pmd, address);
1034 pte_ptl = pte_lockptr(mm, pmd);
1035
b46e756f
KS
1036 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1037 /*
1038 * After this gup_fast can't run anymore. This also removes
1039 * any huge TLB entry from the CPU so we won't allow
1040 * huge and small TLB entries for the same virtual address
1041 * to avoid the risk of CPU bugs in that area.
1042 */
1043 _pmd = pmdp_collapse_flush(vma, address, pmd);
1044 spin_unlock(pmd_ptl);
ac46d4f3 1045 mmu_notifier_invalidate_range_end(&range);
b46e756f
KS
1046
1047 spin_lock(pte_ptl);
1048 isolated = __collapse_huge_page_isolate(vma, address, pte);
1049 spin_unlock(pte_ptl);
1050
1051 if (unlikely(!isolated)) {
1052 pte_unmap(pte);
1053 spin_lock(pmd_ptl);
1054 BUG_ON(!pmd_none(*pmd));
1055 /*
1056 * We can only use set_pmd_at when establishing
1057 * hugepmds and never for establishing regular pmds that
1058 * points to regular pagetables. Use pmd_populate for that
1059 */
1060 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1061 spin_unlock(pmd_ptl);
1062 anon_vma_unlock_write(vma->anon_vma);
1063 result = SCAN_FAIL;
1064 goto out;
1065 }
1066
1067 /*
1068 * All pages are isolated and locked so anon_vma rmap
1069 * can't run anymore.
1070 */
1071 anon_vma_unlock_write(vma->anon_vma);
1072
1073 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1074 pte_unmap(pte);
1075 __SetPageUptodate(new_page);
1076 pgtable = pmd_pgtable(_pmd);
1077
1078 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014 1079 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f
KS
1080
1081 /*
1082 * spin_lock() below is not the equivalent of smp_wmb(), so
1083 * this is needed to avoid the copy_huge_page writes to become
1084 * visible after the set_pmd_at() write.
1085 */
1086 smp_wmb();
1087
1088 spin_lock(pmd_ptl);
1089 BUG_ON(!pmd_none(*pmd));
1090 page_add_new_anon_rmap(new_page, vma, address, true);
1091 mem_cgroup_commit_charge(new_page, memcg, false, true);
1ff9e6e1 1092 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
b46e756f
KS
1093 lru_cache_add_active_or_unevictable(new_page, vma);
1094 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1095 set_pmd_at(mm, address, pmd, _pmd);
1096 update_mmu_cache_pmd(vma, address, pmd);
1097 spin_unlock(pmd_ptl);
1098
1099 *hpage = NULL;
1100
1101 khugepaged_pages_collapsed++;
1102 result = SCAN_SUCCEED;
1103out_up_write:
1104 up_write(&mm->mmap_sem);
1105out_nolock:
1106 trace_mm_collapse_huge_page(mm, isolated, result);
1107 return;
1108out:
1109 mem_cgroup_cancel_charge(new_page, memcg, true);
1110 goto out_up_write;
1111}
1112
1113static int khugepaged_scan_pmd(struct mm_struct *mm,
1114 struct vm_area_struct *vma,
1115 unsigned long address,
1116 struct page **hpage)
1117{
1118 pmd_t *pmd;
1119 pte_t *pte, *_pte;
0db501f7 1120 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
b46e756f
KS
1121 struct page *page = NULL;
1122 unsigned long _address;
1123 spinlock_t *ptl;
1124 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1125 bool writable = false;
b46e756f
KS
1126
1127 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1128
1129 pmd = mm_find_pmd(mm, address);
1130 if (!pmd) {
1131 result = SCAN_PMD_NULL;
1132 goto out;
1133 }
1134
1135 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1136 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1137 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1138 _pte++, _address += PAGE_SIZE) {
1139 pte_t pteval = *_pte;
1140 if (is_swap_pte(pteval)) {
1141 if (++unmapped <= khugepaged_max_ptes_swap) {
1142 continue;
1143 } else {
1144 result = SCAN_EXCEED_SWAP_PTE;
1145 goto out_unmap;
1146 }
1147 }
1148 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1149 if (!userfaultfd_armed(vma) &&
1150 ++none_or_zero <= khugepaged_max_ptes_none) {
1151 continue;
1152 } else {
1153 result = SCAN_EXCEED_NONE_PTE;
1154 goto out_unmap;
1155 }
1156 }
1157 if (!pte_present(pteval)) {
1158 result = SCAN_PTE_NON_PRESENT;
1159 goto out_unmap;
1160 }
1161 if (pte_write(pteval))
1162 writable = true;
1163
1164 page = vm_normal_page(vma, _address, pteval);
1165 if (unlikely(!page)) {
1166 result = SCAN_PAGE_NULL;
1167 goto out_unmap;
1168 }
1169
1170 /* TODO: teach khugepaged to collapse THP mapped with pte */
1171 if (PageCompound(page)) {
1172 result = SCAN_PAGE_COMPOUND;
1173 goto out_unmap;
1174 }
1175
1176 /*
1177 * Record which node the original page is from and save this
1178 * information to khugepaged_node_load[].
1179 * Khupaged will allocate hugepage from the node has the max
1180 * hit record.
1181 */
1182 node = page_to_nid(page);
1183 if (khugepaged_scan_abort(node)) {
1184 result = SCAN_SCAN_ABORT;
1185 goto out_unmap;
1186 }
1187 khugepaged_node_load[node]++;
1188 if (!PageLRU(page)) {
1189 result = SCAN_PAGE_LRU;
1190 goto out_unmap;
1191 }
1192 if (PageLocked(page)) {
1193 result = SCAN_PAGE_LOCK;
1194 goto out_unmap;
1195 }
1196 if (!PageAnon(page)) {
1197 result = SCAN_PAGE_ANON;
1198 goto out_unmap;
1199 }
1200
1201 /*
1202 * cannot use mapcount: can't collapse if there's a gup pin.
1203 * The page must only be referenced by the scanned process
1204 * and page swap cache.
1205 */
2948be5a 1206 if (page_count(page) != 1 + PageSwapCache(page)) {
b46e756f
KS
1207 result = SCAN_PAGE_COUNT;
1208 goto out_unmap;
1209 }
1210 if (pte_young(pteval) ||
1211 page_is_young(page) || PageReferenced(page) ||
1212 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 1213 referenced++;
b46e756f
KS
1214 }
1215 if (writable) {
1216 if (referenced) {
1217 result = SCAN_SUCCEED;
1218 ret = 1;
1219 } else {
0db501f7 1220 result = SCAN_LACK_REFERENCED_PAGE;
b46e756f
KS
1221 }
1222 } else {
1223 result = SCAN_PAGE_RO;
1224 }
1225out_unmap:
1226 pte_unmap_unlock(pte, ptl);
1227 if (ret) {
1228 node = khugepaged_find_target_node();
1229 /* collapse_huge_page will return with the mmap_sem released */
c131f751 1230 collapse_huge_page(mm, address, hpage, node, referenced);
b46e756f
KS
1231 }
1232out:
1233 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1234 none_or_zero, result, unmapped);
1235 return ret;
1236}
1237
1238static void collect_mm_slot(struct mm_slot *mm_slot)
1239{
1240 struct mm_struct *mm = mm_slot->mm;
1241
35f3aa39 1242 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1243
1244 if (khugepaged_test_exit(mm)) {
1245 /* free mm_slot */
1246 hash_del(&mm_slot->hash);
1247 list_del(&mm_slot->mm_node);
1248
1249 /*
1250 * Not strictly needed because the mm exited already.
1251 *
1252 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1253 */
1254
1255 /* khugepaged_mm_lock actually not necessary for the below */
1256 free_mm_slot(mm_slot);
1257 mmdrop(mm);
1258 }
1259}
1260
e496cf3d 1261#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
27e1f827
SL
1262/*
1263 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1264 * khugepaged should try to collapse the page table.
1265 */
1266static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1267 unsigned long addr)
1268{
1269 struct mm_slot *mm_slot;
1270
1271 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1272
1273 spin_lock(&khugepaged_mm_lock);
1274 mm_slot = get_mm_slot(mm);
1275 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1276 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1277 spin_unlock(&khugepaged_mm_lock);
1278 return 0;
1279}
1280
1281/**
1282 * Try to collapse a pte-mapped THP for mm at address haddr.
1283 *
1284 * This function checks whether all the PTEs in the PMD are pointing to the
1285 * right THP. If so, retract the page table so the THP can refault in with
1286 * as pmd-mapped.
1287 */
1288void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1289{
1290 unsigned long haddr = addr & HPAGE_PMD_MASK;
1291 struct vm_area_struct *vma = find_vma(mm, haddr);
1292 struct page *hpage = NULL;
1293 pte_t *start_pte, *pte;
1294 pmd_t *pmd, _pmd;
1295 spinlock_t *ptl;
1296 int count = 0;
1297 int i;
1298
1299 if (!vma || !vma->vm_file ||
1300 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1301 return;
1302
1303 /*
1304 * This vm_flags may not have VM_HUGEPAGE if the page was not
1305 * collapsed by this mm. But we can still collapse if the page is
1306 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1307 * will not fail the vma for missing VM_HUGEPAGE
1308 */
1309 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1310 return;
1311
1312 pmd = mm_find_pmd(mm, haddr);
1313 if (!pmd)
1314 return;
1315
1316 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1317
1318 /* step 1: check all mapped PTEs are to the right huge page */
1319 for (i = 0, addr = haddr, pte = start_pte;
1320 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1321 struct page *page;
1322
1323 /* empty pte, skip */
1324 if (pte_none(*pte))
1325 continue;
1326
1327 /* page swapped out, abort */
1328 if (!pte_present(*pte))
1329 goto abort;
1330
1331 page = vm_normal_page(vma, addr, *pte);
1332
1333 if (!page || !PageCompound(page))
1334 goto abort;
1335
1336 if (!hpage) {
1337 hpage = compound_head(page);
1338 /*
1339 * The mapping of the THP should not change.
1340 *
1341 * Note that uprobe, debugger, or MAP_PRIVATE may
1342 * change the page table, but the new page will
1343 * not pass PageCompound() check.
1344 */
1345 if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping))
1346 goto abort;
1347 }
1348
1349 /*
1350 * Confirm the page maps to the correct subpage.
1351 *
1352 * Note that uprobe, debugger, or MAP_PRIVATE may change
1353 * the page table, but the new page will not pass
1354 * PageCompound() check.
1355 */
1356 if (WARN_ON(hpage + i != page))
1357 goto abort;
1358 count++;
1359 }
1360
1361 /* step 2: adjust rmap */
1362 for (i = 0, addr = haddr, pte = start_pte;
1363 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1364 struct page *page;
1365
1366 if (pte_none(*pte))
1367 continue;
1368 page = vm_normal_page(vma, addr, *pte);
1369 page_remove_rmap(page, false);
1370 }
1371
1372 pte_unmap_unlock(start_pte, ptl);
1373
1374 /* step 3: set proper refcount and mm_counters. */
1375 if (hpage) {
1376 page_ref_sub(hpage, count);
1377 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1378 }
1379
1380 /* step 4: collapse pmd */
1381 ptl = pmd_lock(vma->vm_mm, pmd);
1382 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1383 spin_unlock(ptl);
1384 mm_dec_nr_ptes(mm);
1385 pte_free(mm, pmd_pgtable(_pmd));
1386 return;
1387
1388abort:
1389 pte_unmap_unlock(start_pte, ptl);
1390}
1391
1392static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1393{
1394 struct mm_struct *mm = mm_slot->mm;
1395 int i;
1396
1397 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1398 return 0;
1399
1400 if (!down_write_trylock(&mm->mmap_sem))
1401 return -EBUSY;
1402
1403 if (unlikely(khugepaged_test_exit(mm)))
1404 goto out;
1405
1406 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1407 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1408
1409out:
1410 mm_slot->nr_pte_mapped_thp = 0;
1411 up_write(&mm->mmap_sem);
1412 return 0;
1413}
1414
f3f0e1d2
KS
1415static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1416{
1417 struct vm_area_struct *vma;
1418 unsigned long addr;
1419 pmd_t *pmd, _pmd;
1420
1421 i_mmap_lock_write(mapping);
1422 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
27e1f827
SL
1423 /*
1424 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1425 * got written to. These VMAs are likely not worth investing
1426 * down_write(mmap_sem) as PMD-mapping is likely to be split
1427 * later.
1428 *
1429 * Not that vma->anon_vma check is racy: it can be set up after
1430 * the check but before we took mmap_sem by the fault path.
1431 * But page lock would prevent establishing any new ptes of the
1432 * page, so we are safe.
1433 *
1434 * An alternative would be drop the check, but check that page
1435 * table is clear before calling pmdp_collapse_flush() under
1436 * ptl. It has higher chance to recover THP for the VMA, but
1437 * has higher cost too.
1438 */
f3f0e1d2
KS
1439 if (vma->anon_vma)
1440 continue;
1441 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1442 if (addr & ~HPAGE_PMD_MASK)
1443 continue;
1444 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1445 continue;
1446 pmd = mm_find_pmd(vma->vm_mm, addr);
1447 if (!pmd)
1448 continue;
1449 /*
1450 * We need exclusive mmap_sem to retract page table.
27e1f827
SL
1451 *
1452 * We use trylock due to lock inversion: we need to acquire
1453 * mmap_sem while holding page lock. Fault path does it in
1454 * reverse order. Trylock is a way to avoid deadlock.
f3f0e1d2
KS
1455 */
1456 if (down_write_trylock(&vma->vm_mm->mmap_sem)) {
1457 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
1458 /* assume page table is clear */
1459 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1460 spin_unlock(ptl);
1461 up_write(&vma->vm_mm->mmap_sem);
c4812909 1462 mm_dec_nr_ptes(vma->vm_mm);
d670ffd8 1463 pte_free(vma->vm_mm, pmd_pgtable(_pmd));
27e1f827
SL
1464 } else {
1465 /* Try again later */
1466 khugepaged_add_pte_mapped_thp(vma->vm_mm, addr);
f3f0e1d2
KS
1467 }
1468 }
1469 i_mmap_unlock_write(mapping);
1470}
1471
1472/**
99cb0dbd 1473 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2
KS
1474 *
1475 * Basic scheme is simple, details are more complex:
87c460a0 1476 * - allocate and lock a new huge page;
77da9389 1477 * - scan page cache replacing old pages with the new one
99cb0dbd 1478 * + swap/gup in pages if necessary;
f3f0e1d2 1479 * + fill in gaps;
77da9389
MW
1480 * + keep old pages around in case rollback is required;
1481 * - if replacing succeeds:
f3f0e1d2
KS
1482 * + copy data over;
1483 * + free old pages;
87c460a0 1484 * + unlock huge page;
f3f0e1d2
KS
1485 * - if replacing failed;
1486 * + put all pages back and unfreeze them;
77da9389 1487 * + restore gaps in the page cache;
87c460a0 1488 * + unlock and free huge page;
f3f0e1d2 1489 */
579c571e
SL
1490static void collapse_file(struct mm_struct *mm,
1491 struct file *file, pgoff_t start,
f3f0e1d2
KS
1492 struct page **hpage, int node)
1493{
579c571e 1494 struct address_space *mapping = file->f_mapping;
f3f0e1d2 1495 gfp_t gfp;
77da9389 1496 struct page *new_page;
f3f0e1d2
KS
1497 struct mem_cgroup *memcg;
1498 pgoff_t index, end = start + HPAGE_PMD_NR;
1499 LIST_HEAD(pagelist);
77da9389 1500 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1501 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1502 bool is_shmem = shmem_file(file);
f3f0e1d2 1503
99cb0dbd 1504 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1505 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1506
1507 /* Only allocate from the target node */
41b6167e 1508 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
f3f0e1d2
KS
1509
1510 new_page = khugepaged_alloc_page(hpage, gfp, node);
1511 if (!new_page) {
1512 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1513 goto out;
1514 }
1515
2a70f6a7 1516 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
f3f0e1d2
KS
1517 result = SCAN_CGROUP_CHARGE_FAIL;
1518 goto out;
1519 }
1520
95feeabb
HD
1521 /* This will be less messy when we use multi-index entries */
1522 do {
1523 xas_lock_irq(&xas);
1524 xas_create_range(&xas);
1525 if (!xas_error(&xas))
1526 break;
1527 xas_unlock_irq(&xas);
1528 if (!xas_nomem(&xas, GFP_KERNEL)) {
1529 mem_cgroup_cancel_charge(new_page, memcg, true);
1530 result = SCAN_FAIL;
1531 goto out;
1532 }
1533 } while (1);
1534
042a3082 1535 __SetPageLocked(new_page);
99cb0dbd
SL
1536 if (is_shmem)
1537 __SetPageSwapBacked(new_page);
f3f0e1d2
KS
1538 new_page->index = start;
1539 new_page->mapping = mapping;
f3f0e1d2 1540
f3f0e1d2 1541 /*
87c460a0
HD
1542 * At this point the new_page is locked and not up-to-date.
1543 * It's safe to insert it into the page cache, because nobody would
1544 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1545 */
1546
77da9389
MW
1547 xas_set(&xas, start);
1548 for (index = start; index < end; index++) {
1549 struct page *page = xas_next(&xas);
1550
1551 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1552 if (is_shmem) {
1553 if (!page) {
1554 /*
1555 * Stop if extent has been truncated or
1556 * hole-punched, and is now completely
1557 * empty.
1558 */
1559 if (index == start) {
1560 if (!xas_next_entry(&xas, end - 1)) {
1561 result = SCAN_TRUNCATED;
1562 goto xa_locked;
1563 }
1564 xas_set(&xas, index);
1565 }
1566 if (!shmem_charge(mapping->host, 1)) {
1567 result = SCAN_FAIL;
042a3082 1568 goto xa_locked;
701270fa 1569 }
99cb0dbd
SL
1570 xas_store(&xas, new_page);
1571 nr_none++;
1572 continue;
701270fa 1573 }
99cb0dbd
SL
1574
1575 if (xa_is_value(page) || !PageUptodate(page)) {
1576 xas_unlock_irq(&xas);
1577 /* swap in or instantiate fallocated page */
1578 if (shmem_getpage(mapping->host, index, &page,
1579 SGP_NOHUGE)) {
1580 result = SCAN_FAIL;
1581 goto xa_unlocked;
1582 }
1583 } else if (trylock_page(page)) {
1584 get_page(page);
1585 xas_unlock_irq(&xas);
1586 } else {
1587 result = SCAN_PAGE_LOCK;
042a3082 1588 goto xa_locked;
77da9389 1589 }
99cb0dbd
SL
1590 } else { /* !is_shmem */
1591 if (!page || xa_is_value(page)) {
1592 xas_unlock_irq(&xas);
1593 page_cache_sync_readahead(mapping, &file->f_ra,
1594 file, index,
1595 PAGE_SIZE);
1596 /* drain pagevecs to help isolate_lru_page() */
1597 lru_add_drain();
1598 page = find_lock_page(mapping, index);
1599 if (unlikely(page == NULL)) {
1600 result = SCAN_FAIL;
1601 goto xa_unlocked;
1602 }
75f36069
SL
1603 } else if (PageDirty(page)) {
1604 /*
1605 * khugepaged only works on read-only fd,
1606 * so this page is dirty because it hasn't
1607 * been flushed since first write. There
1608 * won't be new dirty pages.
1609 *
1610 * Trigger async flush here and hope the
1611 * writeback is done when khugepaged
1612 * revisits this page.
1613 *
1614 * This is a one-off situation. We are not
1615 * forcing writeback in loop.
1616 */
1617 xas_unlock_irq(&xas);
1618 filemap_flush(mapping);
1619 result = SCAN_FAIL;
1620 goto xa_unlocked;
99cb0dbd
SL
1621 } else if (trylock_page(page)) {
1622 get_page(page);
1623 xas_unlock_irq(&xas);
1624 } else {
1625 result = SCAN_PAGE_LOCK;
1626 goto xa_locked;
f3f0e1d2 1627 }
f3f0e1d2
KS
1628 }
1629
1630 /*
b93b0163 1631 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1632 * without racing with truncate.
1633 */
1634 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1635
1636 /* make sure the page is up to date */
1637 if (unlikely(!PageUptodate(page))) {
1638 result = SCAN_FAIL;
1639 goto out_unlock;
1640 }
06a5e126
HD
1641
1642 /*
1643 * If file was truncated then extended, or hole-punched, before
1644 * we locked the first page, then a THP might be there already.
1645 */
1646 if (PageTransCompound(page)) {
1647 result = SCAN_PAGE_COMPOUND;
1648 goto out_unlock;
1649 }
f3f0e1d2
KS
1650
1651 if (page_mapping(page) != mapping) {
1652 result = SCAN_TRUNCATED;
1653 goto out_unlock;
1654 }
f3f0e1d2 1655
4655e5e5
SL
1656 if (!is_shmem && PageDirty(page)) {
1657 /*
1658 * khugepaged only works on read-only fd, so this
1659 * page is dirty because it hasn't been flushed
1660 * since first write.
1661 */
1662 result = SCAN_FAIL;
1663 goto out_unlock;
1664 }
1665
f3f0e1d2
KS
1666 if (isolate_lru_page(page)) {
1667 result = SCAN_DEL_PAGE_LRU;
042a3082 1668 goto out_unlock;
f3f0e1d2
KS
1669 }
1670
99cb0dbd
SL
1671 if (page_has_private(page) &&
1672 !try_to_release_page(page, GFP_KERNEL)) {
1673 result = SCAN_PAGE_HAS_PRIVATE;
1674 goto out_unlock;
1675 }
1676
f3f0e1d2 1677 if (page_mapped(page))
977fbdcd 1678 unmap_mapping_pages(mapping, index, 1, false);
f3f0e1d2 1679
77da9389
MW
1680 xas_lock_irq(&xas);
1681 xas_set(&xas, index);
f3f0e1d2 1682
77da9389 1683 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
1684 VM_BUG_ON_PAGE(page_mapped(page), page);
1685
1686 /*
1687 * The page is expected to have page_count() == 3:
1688 * - we hold a pin on it;
77da9389 1689 * - one reference from page cache;
f3f0e1d2
KS
1690 * - one from isolate_lru_page;
1691 */
1692 if (!page_ref_freeze(page, 3)) {
1693 result = SCAN_PAGE_COUNT;
042a3082
HD
1694 xas_unlock_irq(&xas);
1695 putback_lru_page(page);
1696 goto out_unlock;
f3f0e1d2
KS
1697 }
1698
1699 /*
1700 * Add the page to the list to be able to undo the collapse if
1701 * something go wrong.
1702 */
1703 list_add_tail(&page->lru, &pagelist);
1704
1705 /* Finally, replace with the new page. */
4101196b 1706 xas_store(&xas, new_page);
f3f0e1d2 1707 continue;
f3f0e1d2
KS
1708out_unlock:
1709 unlock_page(page);
1710 put_page(page);
042a3082 1711 goto xa_unlocked;
f3f0e1d2
KS
1712 }
1713
99cb0dbd
SL
1714 if (is_shmem)
1715 __inc_node_page_state(new_page, NR_SHMEM_THPS);
09d91cda 1716 else {
99cb0dbd 1717 __inc_node_page_state(new_page, NR_FILE_THPS);
09d91cda
SL
1718 filemap_nr_thps_inc(mapping);
1719 }
99cb0dbd 1720
042a3082
HD
1721 if (nr_none) {
1722 struct zone *zone = page_zone(new_page);
1723
1724 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
99cb0dbd
SL
1725 if (is_shmem)
1726 __mod_node_page_state(zone->zone_pgdat,
1727 NR_SHMEM, nr_none);
042a3082
HD
1728 }
1729
1730xa_locked:
1731 xas_unlock_irq(&xas);
77da9389 1732xa_unlocked:
042a3082 1733
f3f0e1d2 1734 if (result == SCAN_SUCCEED) {
77da9389 1735 struct page *page, *tmp;
f3f0e1d2
KS
1736
1737 /*
77da9389
MW
1738 * Replacing old pages with new one has succeeded, now we
1739 * need to copy the content and free the old pages.
f3f0e1d2 1740 */
2af8ff29 1741 index = start;
f3f0e1d2 1742 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2af8ff29
HD
1743 while (index < page->index) {
1744 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1745 index++;
1746 }
f3f0e1d2
KS
1747 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1748 page);
1749 list_del(&page->lru);
f3f0e1d2 1750 page->mapping = NULL;
042a3082 1751 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
1752 ClearPageActive(page);
1753 ClearPageUnevictable(page);
042a3082 1754 unlock_page(page);
f3f0e1d2 1755 put_page(page);
2af8ff29
HD
1756 index++;
1757 }
1758 while (index < end) {
1759 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1760 index++;
f3f0e1d2
KS
1761 }
1762
f3f0e1d2 1763 SetPageUptodate(new_page);
87c460a0 1764 page_ref_add(new_page, HPAGE_PMD_NR - 1);
f3f0e1d2 1765 mem_cgroup_commit_charge(new_page, memcg, false, true);
99cb0dbd
SL
1766
1767 if (is_shmem) {
1768 set_page_dirty(new_page);
1769 lru_cache_add_anon(new_page);
1770 } else {
1771 lru_cache_add_file(new_page);
1772 }
1ff9e6e1 1773 count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
f3f0e1d2 1774
042a3082
HD
1775 /*
1776 * Remove pte page tables, so we can re-fault the page as huge.
1777 */
1778 retract_page_tables(mapping, start);
f3f0e1d2 1779 *hpage = NULL;
87aa7529
YS
1780
1781 khugepaged_pages_collapsed++;
f3f0e1d2 1782 } else {
77da9389 1783 struct page *page;
aaa52e34 1784
77da9389 1785 /* Something went wrong: roll back page cache changes */
77da9389 1786 xas_lock_irq(&xas);
aaa52e34 1787 mapping->nrpages -= nr_none;
99cb0dbd
SL
1788
1789 if (is_shmem)
1790 shmem_uncharge(mapping->host, nr_none);
aaa52e34 1791
77da9389
MW
1792 xas_set(&xas, start);
1793 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
1794 page = list_first_entry_or_null(&pagelist,
1795 struct page, lru);
77da9389 1796 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
1797 if (!nr_none)
1798 break;
f3f0e1d2 1799 nr_none--;
59749e6c 1800 /* Put holes back where they were */
77da9389 1801 xas_store(&xas, NULL);
f3f0e1d2
KS
1802 continue;
1803 }
1804
77da9389 1805 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
1806
1807 /* Unfreeze the page. */
1808 list_del(&page->lru);
1809 page_ref_unfreeze(page, 2);
77da9389
MW
1810 xas_store(&xas, page);
1811 xas_pause(&xas);
1812 xas_unlock_irq(&xas);
f3f0e1d2 1813 unlock_page(page);
042a3082 1814 putback_lru_page(page);
77da9389 1815 xas_lock_irq(&xas);
f3f0e1d2
KS
1816 }
1817 VM_BUG_ON(nr_none);
77da9389 1818 xas_unlock_irq(&xas);
f3f0e1d2 1819
f3f0e1d2 1820 mem_cgroup_cancel_charge(new_page, memcg, true);
f3f0e1d2
KS
1821 new_page->mapping = NULL;
1822 }
042a3082
HD
1823
1824 unlock_page(new_page);
f3f0e1d2
KS
1825out:
1826 VM_BUG_ON(!list_empty(&pagelist));
1827 /* TODO: tracepoints */
1828}
1829
579c571e
SL
1830static void khugepaged_scan_file(struct mm_struct *mm,
1831 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1832{
1833 struct page *page = NULL;
579c571e 1834 struct address_space *mapping = file->f_mapping;
85b392db 1835 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
1836 int present, swap;
1837 int node = NUMA_NO_NODE;
1838 int result = SCAN_SUCCEED;
1839
1840 present = 0;
1841 swap = 0;
1842 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1843 rcu_read_lock();
85b392db
MW
1844 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1845 if (xas_retry(&xas, page))
f3f0e1d2 1846 continue;
f3f0e1d2 1847
85b392db 1848 if (xa_is_value(page)) {
f3f0e1d2
KS
1849 if (++swap > khugepaged_max_ptes_swap) {
1850 result = SCAN_EXCEED_SWAP_PTE;
1851 break;
1852 }
1853 continue;
1854 }
1855
1856 if (PageTransCompound(page)) {
1857 result = SCAN_PAGE_COMPOUND;
1858 break;
1859 }
1860
1861 node = page_to_nid(page);
1862 if (khugepaged_scan_abort(node)) {
1863 result = SCAN_SCAN_ABORT;
1864 break;
1865 }
1866 khugepaged_node_load[node]++;
1867
1868 if (!PageLRU(page)) {
1869 result = SCAN_PAGE_LRU;
1870 break;
1871 }
1872
99cb0dbd
SL
1873 if (page_count(page) !=
1874 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
1875 result = SCAN_PAGE_COUNT;
1876 break;
1877 }
1878
1879 /*
1880 * We probably should check if the page is referenced here, but
1881 * nobody would transfer pte_young() to PageReferenced() for us.
1882 * And rmap walk here is just too costly...
1883 */
1884
1885 present++;
1886
1887 if (need_resched()) {
85b392db 1888 xas_pause(&xas);
f3f0e1d2 1889 cond_resched_rcu();
f3f0e1d2
KS
1890 }
1891 }
1892 rcu_read_unlock();
1893
1894 if (result == SCAN_SUCCEED) {
1895 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1896 result = SCAN_EXCEED_NONE_PTE;
1897 } else {
1898 node = khugepaged_find_target_node();
579c571e 1899 collapse_file(mm, file, start, hpage, node);
f3f0e1d2
KS
1900 }
1901 }
1902
1903 /* TODO: tracepoints */
1904}
1905#else
579c571e
SL
1906static void khugepaged_scan_file(struct mm_struct *mm,
1907 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1908{
1909 BUILD_BUG();
1910}
27e1f827
SL
1911
1912static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1913{
1914 return 0;
1915}
f3f0e1d2
KS
1916#endif
1917
b46e756f
KS
1918static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1919 struct page **hpage)
1920 __releases(&khugepaged_mm_lock)
1921 __acquires(&khugepaged_mm_lock)
1922{
1923 struct mm_slot *mm_slot;
1924 struct mm_struct *mm;
1925 struct vm_area_struct *vma;
1926 int progress = 0;
1927
1928 VM_BUG_ON(!pages);
35f3aa39 1929 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1930
1931 if (khugepaged_scan.mm_slot)
1932 mm_slot = khugepaged_scan.mm_slot;
1933 else {
1934 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1935 struct mm_slot, mm_node);
1936 khugepaged_scan.address = 0;
1937 khugepaged_scan.mm_slot = mm_slot;
1938 }
1939 spin_unlock(&khugepaged_mm_lock);
27e1f827 1940 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f
KS
1941
1942 mm = mm_slot->mm;
3b454ad3
YS
1943 /*
1944 * Don't wait for semaphore (to avoid long wait times). Just move to
1945 * the next mm on the list.
1946 */
1947 vma = NULL;
1948 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1949 goto breakouterloop_mmap_sem;
1950 if (likely(!khugepaged_test_exit(mm)))
b46e756f
KS
1951 vma = find_vma(mm, khugepaged_scan.address);
1952
1953 progress++;
1954 for (; vma; vma = vma->vm_next) {
1955 unsigned long hstart, hend;
1956
1957 cond_resched();
1958 if (unlikely(khugepaged_test_exit(mm))) {
1959 progress++;
1960 break;
1961 }
50f8b92f 1962 if (!hugepage_vma_check(vma, vma->vm_flags)) {
b46e756f
KS
1963skip:
1964 progress++;
1965 continue;
1966 }
1967 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1968 hend = vma->vm_end & HPAGE_PMD_MASK;
1969 if (hstart >= hend)
1970 goto skip;
1971 if (khugepaged_scan.address > hend)
1972 goto skip;
1973 if (khugepaged_scan.address < hstart)
1974 khugepaged_scan.address = hstart;
1975 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1976
1977 while (khugepaged_scan.address < hend) {
1978 int ret;
1979 cond_resched();
1980 if (unlikely(khugepaged_test_exit(mm)))
1981 goto breakouterloop;
1982
1983 VM_BUG_ON(khugepaged_scan.address < hstart ||
1984 khugepaged_scan.address + HPAGE_PMD_SIZE >
1985 hend);
99cb0dbd 1986 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
e496cf3d 1987 struct file *file;
f3f0e1d2
KS
1988 pgoff_t pgoff = linear_page_index(vma,
1989 khugepaged_scan.address);
99cb0dbd
SL
1990
1991 if (shmem_file(vma->vm_file)
1992 && !shmem_huge_enabled(vma))
e496cf3d
KS
1993 goto skip;
1994 file = get_file(vma->vm_file);
f3f0e1d2
KS
1995 up_read(&mm->mmap_sem);
1996 ret = 1;
579c571e 1997 khugepaged_scan_file(mm, file, pgoff, hpage);
f3f0e1d2
KS
1998 fput(file);
1999 } else {
2000 ret = khugepaged_scan_pmd(mm, vma,
2001 khugepaged_scan.address,
2002 hpage);
2003 }
b46e756f
KS
2004 /* move to next address */
2005 khugepaged_scan.address += HPAGE_PMD_SIZE;
2006 progress += HPAGE_PMD_NR;
2007 if (ret)
2008 /* we released mmap_sem so break loop */
2009 goto breakouterloop_mmap_sem;
2010 if (progress >= pages)
2011 goto breakouterloop;
2012 }
2013 }
2014breakouterloop:
2015 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2016breakouterloop_mmap_sem:
2017
2018 spin_lock(&khugepaged_mm_lock);
2019 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2020 /*
2021 * Release the current mm_slot if this mm is about to die, or
2022 * if we scanned all vmas of this mm.
2023 */
2024 if (khugepaged_test_exit(mm) || !vma) {
2025 /*
2026 * Make sure that if mm_users is reaching zero while
2027 * khugepaged runs here, khugepaged_exit will find
2028 * mm_slot not pointing to the exiting mm.
2029 */
2030 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2031 khugepaged_scan.mm_slot = list_entry(
2032 mm_slot->mm_node.next,
2033 struct mm_slot, mm_node);
2034 khugepaged_scan.address = 0;
2035 } else {
2036 khugepaged_scan.mm_slot = NULL;
2037 khugepaged_full_scans++;
2038 }
2039
2040 collect_mm_slot(mm_slot);
2041 }
2042
2043 return progress;
2044}
2045
2046static int khugepaged_has_work(void)
2047{
2048 return !list_empty(&khugepaged_scan.mm_head) &&
2049 khugepaged_enabled();
2050}
2051
2052static int khugepaged_wait_event(void)
2053{
2054 return !list_empty(&khugepaged_scan.mm_head) ||
2055 kthread_should_stop();
2056}
2057
2058static void khugepaged_do_scan(void)
2059{
2060 struct page *hpage = NULL;
2061 unsigned int progress = 0, pass_through_head = 0;
2062 unsigned int pages = khugepaged_pages_to_scan;
2063 bool wait = true;
2064
2065 barrier(); /* write khugepaged_pages_to_scan to local stack */
2066
2067 while (progress < pages) {
2068 if (!khugepaged_prealloc_page(&hpage, &wait))
2069 break;
2070
2071 cond_resched();
2072
2073 if (unlikely(kthread_should_stop() || try_to_freeze()))
2074 break;
2075
2076 spin_lock(&khugepaged_mm_lock);
2077 if (!khugepaged_scan.mm_slot)
2078 pass_through_head++;
2079 if (khugepaged_has_work() &&
2080 pass_through_head < 2)
2081 progress += khugepaged_scan_mm_slot(pages - progress,
2082 &hpage);
2083 else
2084 progress = pages;
2085 spin_unlock(&khugepaged_mm_lock);
2086 }
2087
2088 if (!IS_ERR_OR_NULL(hpage))
2089 put_page(hpage);
2090}
2091
2092static bool khugepaged_should_wakeup(void)
2093{
2094 return kthread_should_stop() ||
2095 time_after_eq(jiffies, khugepaged_sleep_expire);
2096}
2097
2098static void khugepaged_wait_work(void)
2099{
2100 if (khugepaged_has_work()) {
2101 const unsigned long scan_sleep_jiffies =
2102 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2103
2104 if (!scan_sleep_jiffies)
2105 return;
2106
2107 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2108 wait_event_freezable_timeout(khugepaged_wait,
2109 khugepaged_should_wakeup(),
2110 scan_sleep_jiffies);
2111 return;
2112 }
2113
2114 if (khugepaged_enabled())
2115 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2116}
2117
2118static int khugepaged(void *none)
2119{
2120 struct mm_slot *mm_slot;
2121
2122 set_freezable();
2123 set_user_nice(current, MAX_NICE);
2124
2125 while (!kthread_should_stop()) {
2126 khugepaged_do_scan();
2127 khugepaged_wait_work();
2128 }
2129
2130 spin_lock(&khugepaged_mm_lock);
2131 mm_slot = khugepaged_scan.mm_slot;
2132 khugepaged_scan.mm_slot = NULL;
2133 if (mm_slot)
2134 collect_mm_slot(mm_slot);
2135 spin_unlock(&khugepaged_mm_lock);
2136 return 0;
2137}
2138
2139static void set_recommended_min_free_kbytes(void)
2140{
2141 struct zone *zone;
2142 int nr_zones = 0;
2143 unsigned long recommended_min;
2144
b7d349c7
JK
2145 for_each_populated_zone(zone) {
2146 /*
2147 * We don't need to worry about fragmentation of
2148 * ZONE_MOVABLE since it only has movable pages.
2149 */
2150 if (zone_idx(zone) > gfp_zone(GFP_USER))
2151 continue;
2152
b46e756f 2153 nr_zones++;
b7d349c7 2154 }
b46e756f
KS
2155
2156 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2157 recommended_min = pageblock_nr_pages * nr_zones * 2;
2158
2159 /*
2160 * Make sure that on average at least two pageblocks are almost free
2161 * of another type, one for a migratetype to fall back to and a
2162 * second to avoid subsequent fallbacks of other types There are 3
2163 * MIGRATE_TYPES we care about.
2164 */
2165 recommended_min += pageblock_nr_pages * nr_zones *
2166 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2167
2168 /* don't ever allow to reserve more than 5% of the lowmem */
2169 recommended_min = min(recommended_min,
2170 (unsigned long) nr_free_buffer_pages() / 20);
2171 recommended_min <<= (PAGE_SHIFT-10);
2172
2173 if (recommended_min > min_free_kbytes) {
2174 if (user_min_free_kbytes >= 0)
2175 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2176 min_free_kbytes, recommended_min);
2177
2178 min_free_kbytes = recommended_min;
2179 }
2180 setup_per_zone_wmarks();
2181}
2182
2183int start_stop_khugepaged(void)
2184{
2185 static struct task_struct *khugepaged_thread __read_mostly;
2186 static DEFINE_MUTEX(khugepaged_mutex);
2187 int err = 0;
2188
2189 mutex_lock(&khugepaged_mutex);
2190 if (khugepaged_enabled()) {
2191 if (!khugepaged_thread)
2192 khugepaged_thread = kthread_run(khugepaged, NULL,
2193 "khugepaged");
2194 if (IS_ERR(khugepaged_thread)) {
2195 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2196 err = PTR_ERR(khugepaged_thread);
2197 khugepaged_thread = NULL;
2198 goto fail;
2199 }
2200
2201 if (!list_empty(&khugepaged_scan.mm_head))
2202 wake_up_interruptible(&khugepaged_wait);
2203
2204 set_recommended_min_free_kbytes();
2205 } else if (khugepaged_thread) {
2206 kthread_stop(khugepaged_thread);
2207 khugepaged_thread = NULL;
2208 }
2209fail:
2210 mutex_unlock(&khugepaged_mutex);
2211 return err;
2212}