1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
32 SCAN_EXCEED_SHARED_PTE
,
36 SCAN_LACK_REFERENCED_PAGE
,
50 SCAN_ALLOC_HUGE_PAGE_FAIL
,
51 SCAN_CGROUP_CHARGE_FAIL
,
53 SCAN_PAGE_HAS_PRIVATE
,
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
59 static struct task_struct
*khugepaged_thread __read_mostly
;
60 static DEFINE_MUTEX(khugepaged_mutex
);
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly
;
64 static unsigned int khugepaged_pages_collapsed
;
65 static unsigned int khugepaged_full_scans
;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
69 static unsigned long khugepaged_sleep_expire
;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
77 static unsigned int khugepaged_max_ptes_none __read_mostly
;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly
;
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
84 static struct kmem_cache
*mm_slot_cache __read_mostly
;
86 #define MAX_PTE_MAPPED_THP 8
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
93 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
97 struct hlist_node hash
;
98 struct list_head mm_node
;
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp
;
103 unsigned long pte_mapped_thp
[MAX_PTE_MAPPED_THP
];
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
112 * There is only the one khugepaged_scan instance of this cursor structure.
114 struct khugepaged_scan
{
115 struct list_head mm_head
;
116 struct mm_slot
*mm_slot
;
117 unsigned long address
;
120 static struct khugepaged_scan khugepaged_scan
= {
121 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
125 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
126 struct kobj_attribute
*attr
,
129 return sysfs_emit(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
132 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
133 struct kobj_attribute
*attr
,
134 const char *buf
, size_t count
)
139 err
= kstrtouint(buf
, 10, &msecs
);
143 khugepaged_scan_sleep_millisecs
= msecs
;
144 khugepaged_sleep_expire
= 0;
145 wake_up_interruptible(&khugepaged_wait
);
149 static struct kobj_attribute scan_sleep_millisecs_attr
=
150 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
151 scan_sleep_millisecs_store
);
153 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
154 struct kobj_attribute
*attr
,
157 return sysfs_emit(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
160 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
161 struct kobj_attribute
*attr
,
162 const char *buf
, size_t count
)
167 err
= kstrtouint(buf
, 10, &msecs
);
171 khugepaged_alloc_sleep_millisecs
= msecs
;
172 khugepaged_sleep_expire
= 0;
173 wake_up_interruptible(&khugepaged_wait
);
177 static struct kobj_attribute alloc_sleep_millisecs_attr
=
178 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
179 alloc_sleep_millisecs_store
);
181 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
182 struct kobj_attribute
*attr
,
185 return sysfs_emit(buf
, "%u\n", khugepaged_pages_to_scan
);
187 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
188 struct kobj_attribute
*attr
,
189 const char *buf
, size_t count
)
194 err
= kstrtouint(buf
, 10, &pages
);
198 khugepaged_pages_to_scan
= pages
;
202 static struct kobj_attribute pages_to_scan_attr
=
203 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
204 pages_to_scan_store
);
206 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
207 struct kobj_attribute
*attr
,
210 return sysfs_emit(buf
, "%u\n", khugepaged_pages_collapsed
);
212 static struct kobj_attribute pages_collapsed_attr
=
213 __ATTR_RO(pages_collapsed
);
215 static ssize_t
full_scans_show(struct kobject
*kobj
,
216 struct kobj_attribute
*attr
,
219 return sysfs_emit(buf
, "%u\n", khugepaged_full_scans
);
221 static struct kobj_attribute full_scans_attr
=
222 __ATTR_RO(full_scans
);
224 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
225 struct kobj_attribute
*attr
, char *buf
)
227 return single_hugepage_flag_show(kobj
, attr
, buf
,
228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
230 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
231 struct kobj_attribute
*attr
,
232 const char *buf
, size_t count
)
234 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
237 static struct kobj_attribute khugepaged_defrag_attr
=
238 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
239 khugepaged_defrag_store
);
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
249 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
250 struct kobj_attribute
*attr
,
253 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_none
);
255 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
256 struct kobj_attribute
*attr
,
257 const char *buf
, size_t count
)
260 unsigned long max_ptes_none
;
262 err
= kstrtoul(buf
, 10, &max_ptes_none
);
263 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
266 khugepaged_max_ptes_none
= max_ptes_none
;
270 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
271 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
272 khugepaged_max_ptes_none_store
);
274 static ssize_t
khugepaged_max_ptes_swap_show(struct kobject
*kobj
,
275 struct kobj_attribute
*attr
,
278 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_swap
);
281 static ssize_t
khugepaged_max_ptes_swap_store(struct kobject
*kobj
,
282 struct kobj_attribute
*attr
,
283 const char *buf
, size_t count
)
286 unsigned long max_ptes_swap
;
288 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
289 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
-1)
292 khugepaged_max_ptes_swap
= max_ptes_swap
;
297 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
298 __ATTR(max_ptes_swap
, 0644, khugepaged_max_ptes_swap_show
,
299 khugepaged_max_ptes_swap_store
);
301 static ssize_t
khugepaged_max_ptes_shared_show(struct kobject
*kobj
,
302 struct kobj_attribute
*attr
,
305 return sysfs_emit(buf
, "%u\n", khugepaged_max_ptes_shared
);
308 static ssize_t
khugepaged_max_ptes_shared_store(struct kobject
*kobj
,
309 struct kobj_attribute
*attr
,
310 const char *buf
, size_t count
)
313 unsigned long max_ptes_shared
;
315 err
= kstrtoul(buf
, 10, &max_ptes_shared
);
316 if (err
|| max_ptes_shared
> HPAGE_PMD_NR
-1)
319 khugepaged_max_ptes_shared
= max_ptes_shared
;
324 static struct kobj_attribute khugepaged_max_ptes_shared_attr
=
325 __ATTR(max_ptes_shared
, 0644, khugepaged_max_ptes_shared_show
,
326 khugepaged_max_ptes_shared_store
);
328 static struct attribute
*khugepaged_attr
[] = {
329 &khugepaged_defrag_attr
.attr
,
330 &khugepaged_max_ptes_none_attr
.attr
,
331 &khugepaged_max_ptes_swap_attr
.attr
,
332 &khugepaged_max_ptes_shared_attr
.attr
,
333 &pages_to_scan_attr
.attr
,
334 &pages_collapsed_attr
.attr
,
335 &full_scans_attr
.attr
,
336 &scan_sleep_millisecs_attr
.attr
,
337 &alloc_sleep_millisecs_attr
.attr
,
341 struct attribute_group khugepaged_attr_group
= {
342 .attrs
= khugepaged_attr
,
343 .name
= "khugepaged",
345 #endif /* CONFIG_SYSFS */
347 int hugepage_madvise(struct vm_area_struct
*vma
,
348 unsigned long *vm_flags
, int advice
)
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
358 if (mm_has_pgste(vma
->vm_mm
))
361 *vm_flags
&= ~VM_NOHUGEPAGE
;
362 *vm_flags
|= VM_HUGEPAGE
;
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
368 if (!(*vm_flags
& VM_NO_KHUGEPAGED
) &&
369 khugepaged_enter_vma_merge(vma
, *vm_flags
))
372 case MADV_NOHUGEPAGE
:
373 *vm_flags
&= ~VM_HUGEPAGE
;
374 *vm_flags
|= VM_NOHUGEPAGE
;
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
386 int __init
khugepaged_init(void)
388 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot
),
390 __alignof__(struct mm_slot
), 0, NULL
);
394 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
395 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
396 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
397 khugepaged_max_ptes_shared
= HPAGE_PMD_NR
/ 2;
402 void __init
khugepaged_destroy(void)
404 kmem_cache_destroy(mm_slot_cache
);
407 static inline struct mm_slot
*alloc_mm_slot(void)
409 if (!mm_slot_cache
) /* initialization failed */
411 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
414 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
416 kmem_cache_free(mm_slot_cache
, mm_slot
);
419 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
421 struct mm_slot
*mm_slot
;
423 hash_for_each_possible(mm_slots_hash
, mm_slot
, hash
, (unsigned long)mm
)
424 if (mm
== mm_slot
->mm
)
430 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
431 struct mm_slot
*mm_slot
)
434 hash_add(mm_slots_hash
, &mm_slot
->hash
, (long)mm
);
437 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
439 return atomic_read(&mm
->mm_users
) == 0;
442 static bool hugepage_vma_check(struct vm_area_struct
*vma
,
443 unsigned long vm_flags
)
445 if (!transhuge_vma_enabled(vma
, vm_flags
))
448 if (vma
->vm_file
&& !IS_ALIGNED((vma
->vm_start
>> PAGE_SHIFT
) -
449 vma
->vm_pgoff
, HPAGE_PMD_NR
))
452 /* Enabled via shmem mount options or sysfs settings. */
453 if (shmem_file(vma
->vm_file
))
454 return shmem_huge_enabled(vma
);
456 /* THP settings require madvise. */
457 if (!(vm_flags
& VM_HUGEPAGE
) && !khugepaged_always())
460 /* Only regular file is valid */
461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) && vma
->vm_file
&&
462 (vm_flags
& VM_EXEC
)) {
463 struct inode
*inode
= vma
->vm_file
->f_inode
;
465 return !inode_is_open_for_write(inode
) &&
466 S_ISREG(inode
->i_mode
);
469 if (!vma
->anon_vma
|| vma
->vm_ops
)
471 if (vma_is_temporary_stack(vma
))
473 return !(vm_flags
& VM_NO_KHUGEPAGED
);
476 int __khugepaged_enter(struct mm_struct
*mm
)
478 struct mm_slot
*mm_slot
;
481 mm_slot
= alloc_mm_slot();
485 /* __khugepaged_exit() must not run from under us */
486 VM_BUG_ON_MM(khugepaged_test_exit(mm
), mm
);
487 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
488 free_mm_slot(mm_slot
);
492 spin_lock(&khugepaged_mm_lock
);
493 insert_to_mm_slots_hash(mm
, mm_slot
);
495 * Insert just behind the scanning cursor, to let the area settle
498 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
499 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
500 spin_unlock(&khugepaged_mm_lock
);
504 wake_up_interruptible(&khugepaged_wait
);
509 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
510 unsigned long vm_flags
)
512 unsigned long hstart
, hend
;
515 * khugepaged only supports read-only files for non-shmem files.
516 * khugepaged does not yet work on special mappings. And
517 * file-private shmem THP is not supported.
519 if (!hugepage_vma_check(vma
, vm_flags
))
522 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
523 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
525 return khugepaged_enter(vma
, vm_flags
);
529 void __khugepaged_exit(struct mm_struct
*mm
)
531 struct mm_slot
*mm_slot
;
534 spin_lock(&khugepaged_mm_lock
);
535 mm_slot
= get_mm_slot(mm
);
536 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
537 hash_del(&mm_slot
->hash
);
538 list_del(&mm_slot
->mm_node
);
541 spin_unlock(&khugepaged_mm_lock
);
544 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
545 free_mm_slot(mm_slot
);
547 } else if (mm_slot
) {
549 * This is required to serialize against
550 * khugepaged_test_exit() (which is guaranteed to run
551 * under mmap sem read mode). Stop here (after we
552 * return all pagetables will be destroyed) until
553 * khugepaged has finished working on the pagetables
554 * under the mmap_lock.
557 mmap_write_unlock(mm
);
561 static void release_pte_page(struct page
*page
)
563 mod_node_page_state(page_pgdat(page
),
564 NR_ISOLATED_ANON
+ page_is_file_lru(page
),
567 putback_lru_page(page
);
570 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
,
571 struct list_head
*compound_pagelist
)
573 struct page
*page
, *tmp
;
575 while (--_pte
>= pte
) {
576 pte_t pteval
= *_pte
;
578 page
= pte_page(pteval
);
579 if (!pte_none(pteval
) && !is_zero_pfn(pte_pfn(pteval
)) &&
581 release_pte_page(page
);
584 list_for_each_entry_safe(page
, tmp
, compound_pagelist
, lru
) {
585 list_del(&page
->lru
);
586 release_pte_page(page
);
590 static bool is_refcount_suitable(struct page
*page
)
592 int expected_refcount
;
594 expected_refcount
= total_mapcount(page
);
595 if (PageSwapCache(page
))
596 expected_refcount
+= compound_nr(page
);
598 return page_count(page
) == expected_refcount
;
601 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
602 unsigned long address
,
604 struct list_head
*compound_pagelist
)
606 struct page
*page
= NULL
;
608 int none_or_zero
= 0, shared
= 0, result
= 0, referenced
= 0;
609 bool writable
= false;
611 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
612 _pte
++, address
+= PAGE_SIZE
) {
613 pte_t pteval
= *_pte
;
614 if (pte_none(pteval
) || (pte_present(pteval
) &&
615 is_zero_pfn(pte_pfn(pteval
)))) {
616 if (!userfaultfd_armed(vma
) &&
617 ++none_or_zero
<= khugepaged_max_ptes_none
) {
620 result
= SCAN_EXCEED_NONE_PTE
;
624 if (!pte_present(pteval
)) {
625 result
= SCAN_PTE_NON_PRESENT
;
628 page
= vm_normal_page(vma
, address
, pteval
);
629 if (unlikely(!page
)) {
630 result
= SCAN_PAGE_NULL
;
634 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
636 if (page_mapcount(page
) > 1 &&
637 ++shared
> khugepaged_max_ptes_shared
) {
638 result
= SCAN_EXCEED_SHARED_PTE
;
642 if (PageCompound(page
)) {
644 page
= compound_head(page
);
647 * Check if we have dealt with the compound page
650 list_for_each_entry(p
, compound_pagelist
, lru
) {
657 * We can do it before isolate_lru_page because the
658 * page can't be freed from under us. NOTE: PG_lock
659 * is needed to serialize against split_huge_page
660 * when invoked from the VM.
662 if (!trylock_page(page
)) {
663 result
= SCAN_PAGE_LOCK
;
668 * Check if the page has any GUP (or other external) pins.
670 * The page table that maps the page has been already unlinked
671 * from the page table tree and this process cannot get
672 * an additional pin on the page.
674 * New pins can come later if the page is shared across fork,
675 * but not from this process. The other process cannot write to
676 * the page, only trigger CoW.
678 if (!is_refcount_suitable(page
)) {
680 result
= SCAN_PAGE_COUNT
;
683 if (!pte_write(pteval
) && PageSwapCache(page
) &&
684 !reuse_swap_page(page
, NULL
)) {
686 * Page is in the swap cache and cannot be re-used.
687 * It cannot be collapsed into a THP.
690 result
= SCAN_SWAP_CACHE_PAGE
;
695 * Isolate the page to avoid collapsing an hugepage
696 * currently in use by the VM.
698 if (isolate_lru_page(page
)) {
700 result
= SCAN_DEL_PAGE_LRU
;
703 mod_node_page_state(page_pgdat(page
),
704 NR_ISOLATED_ANON
+ page_is_file_lru(page
),
706 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
707 VM_BUG_ON_PAGE(PageLRU(page
), page
);
709 if (PageCompound(page
))
710 list_add_tail(&page
->lru
, compound_pagelist
);
712 /* There should be enough young pte to collapse the page */
713 if (pte_young(pteval
) ||
714 page_is_young(page
) || PageReferenced(page
) ||
715 mmu_notifier_test_young(vma
->vm_mm
, address
))
718 if (pte_write(pteval
))
722 if (unlikely(!writable
)) {
723 result
= SCAN_PAGE_RO
;
724 } else if (unlikely(!referenced
)) {
725 result
= SCAN_LACK_REFERENCED_PAGE
;
727 result
= SCAN_SUCCEED
;
728 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
729 referenced
, writable
, result
);
733 release_pte_pages(pte
, _pte
, compound_pagelist
);
734 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
735 referenced
, writable
, result
);
739 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
740 struct vm_area_struct
*vma
,
741 unsigned long address
,
743 struct list_head
*compound_pagelist
)
745 struct page
*src_page
, *tmp
;
747 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
748 _pte
++, page
++, address
+= PAGE_SIZE
) {
749 pte_t pteval
= *_pte
;
751 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
752 clear_user_highpage(page
, address
);
753 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
754 if (is_zero_pfn(pte_pfn(pteval
))) {
756 * ptl mostly unnecessary.
760 * paravirt calls inside pte_clear here are
763 pte_clear(vma
->vm_mm
, address
, _pte
);
767 src_page
= pte_page(pteval
);
768 copy_user_highpage(page
, src_page
, address
, vma
);
769 if (!PageCompound(src_page
))
770 release_pte_page(src_page
);
772 * ptl mostly unnecessary, but preempt has to
773 * be disabled to update the per-cpu stats
774 * inside page_remove_rmap().
778 * paravirt calls inside pte_clear here are
781 pte_clear(vma
->vm_mm
, address
, _pte
);
782 page_remove_rmap(src_page
, false);
784 free_page_and_swap_cache(src_page
);
788 list_for_each_entry_safe(src_page
, tmp
, compound_pagelist
, lru
) {
789 list_del(&src_page
->lru
);
790 release_pte_page(src_page
);
794 static void khugepaged_alloc_sleep(void)
798 add_wait_queue(&khugepaged_wait
, &wait
);
799 freezable_schedule_timeout_interruptible(
800 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
801 remove_wait_queue(&khugepaged_wait
, &wait
);
804 static int khugepaged_node_load
[MAX_NUMNODES
];
806 static bool khugepaged_scan_abort(int nid
)
811 * If node_reclaim_mode is disabled, then no extra effort is made to
812 * allocate memory locally.
814 if (!node_reclaim_enabled())
817 /* If there is a count for this node already, it must be acceptable */
818 if (khugepaged_node_load
[nid
])
821 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
822 if (!khugepaged_node_load
[i
])
824 if (node_distance(nid
, i
) > node_reclaim_distance
)
830 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
831 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
833 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
837 static int khugepaged_find_target_node(void)
839 static int last_khugepaged_target_node
= NUMA_NO_NODE
;
840 int nid
, target_node
= 0, max_value
= 0;
842 /* find first node with max normal pages hit */
843 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
844 if (khugepaged_node_load
[nid
] > max_value
) {
845 max_value
= khugepaged_node_load
[nid
];
849 /* do some balance if several nodes have the same hit record */
850 if (target_node
<= last_khugepaged_target_node
)
851 for (nid
= last_khugepaged_target_node
+ 1; nid
< MAX_NUMNODES
;
853 if (max_value
== khugepaged_node_load
[nid
]) {
858 last_khugepaged_target_node
= target_node
;
862 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
864 if (IS_ERR(*hpage
)) {
870 khugepaged_alloc_sleep();
880 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
882 VM_BUG_ON_PAGE(*hpage
, *hpage
);
884 *hpage
= __alloc_pages_node(node
, gfp
, HPAGE_PMD_ORDER
);
885 if (unlikely(!*hpage
)) {
886 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
887 *hpage
= ERR_PTR(-ENOMEM
);
891 prep_transhuge_page(*hpage
);
892 count_vm_event(THP_COLLAPSE_ALLOC
);
896 static int khugepaged_find_target_node(void)
901 static inline struct page
*alloc_khugepaged_hugepage(void)
905 page
= alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
908 prep_transhuge_page(page
);
912 static struct page
*khugepaged_alloc_hugepage(bool *wait
)
917 hpage
= alloc_khugepaged_hugepage();
919 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
924 khugepaged_alloc_sleep();
926 count_vm_event(THP_COLLAPSE_ALLOC
);
927 } while (unlikely(!hpage
) && likely(khugepaged_enabled()));
932 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
935 * If the hpage allocated earlier was briefly exposed in page cache
936 * before collapse_file() failed, it is possible that racing lookups
937 * have not yet completed, and would then be unpleasantly surprised by
938 * finding the hpage reused for the same mapping at a different offset.
939 * Just release the previous allocation if there is any danger of that.
941 if (*hpage
&& page_count(*hpage
) > 1) {
947 *hpage
= khugepaged_alloc_hugepage(wait
);
949 if (unlikely(!*hpage
))
956 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
965 * If mmap_lock temporarily dropped, revalidate vma
966 * before taking mmap_lock.
967 * Return 0 if succeeds, otherwise return none-zero
971 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
972 struct vm_area_struct
**vmap
)
974 struct vm_area_struct
*vma
;
975 unsigned long hstart
, hend
;
977 if (unlikely(khugepaged_test_exit(mm
)))
978 return SCAN_ANY_PROCESS
;
980 *vmap
= vma
= find_vma(mm
, address
);
982 return SCAN_VMA_NULL
;
984 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
985 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
986 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
987 return SCAN_ADDRESS_RANGE
;
988 if (!hugepage_vma_check(vma
, vma
->vm_flags
))
989 return SCAN_VMA_CHECK
;
990 /* Anon VMA expected */
991 if (!vma
->anon_vma
|| vma
->vm_ops
)
992 return SCAN_VMA_CHECK
;
997 * Bring missing pages in from swap, to complete THP collapse.
998 * Only done if khugepaged_scan_pmd believes it is worthwhile.
1000 * Called and returns without pte mapped or spinlocks held,
1001 * but with mmap_lock held to protect against vma changes.
1004 static bool __collapse_huge_page_swapin(struct mm_struct
*mm
,
1005 struct vm_area_struct
*vma
,
1006 unsigned long haddr
, pmd_t
*pmd
,
1011 unsigned long address
, end
= haddr
+ (HPAGE_PMD_NR
* PAGE_SIZE
);
1013 for (address
= haddr
; address
< end
; address
+= PAGE_SIZE
) {
1014 struct vm_fault vmf
= {
1017 .pgoff
= linear_page_index(vma
, haddr
),
1018 .flags
= FAULT_FLAG_ALLOW_RETRY
,
1022 vmf
.pte
= pte_offset_map(pmd
, address
);
1023 vmf
.orig_pte
= *vmf
.pte
;
1024 if (!is_swap_pte(vmf
.orig_pte
)) {
1029 ret
= do_swap_page(&vmf
);
1031 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1032 if (ret
& VM_FAULT_RETRY
) {
1034 if (hugepage_vma_revalidate(mm
, haddr
, &vma
)) {
1035 /* vma is no longer available, don't continue to swapin */
1036 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
1039 /* check if the pmd is still valid */
1040 if (mm_find_pmd(mm
, haddr
) != pmd
) {
1041 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
1045 if (ret
& VM_FAULT_ERROR
) {
1046 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
1051 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1055 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 1);
1059 static void collapse_huge_page(struct mm_struct
*mm
,
1060 unsigned long address
,
1061 struct page
**hpage
,
1062 int node
, int referenced
, int unmapped
)
1064 LIST_HEAD(compound_pagelist
);
1068 struct page
*new_page
;
1069 spinlock_t
*pmd_ptl
, *pte_ptl
;
1070 int isolated
= 0, result
= 0;
1071 struct vm_area_struct
*vma
;
1072 struct mmu_notifier_range range
;
1075 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1077 /* Only allocate from the target node */
1078 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
1081 * Before allocating the hugepage, release the mmap_lock read lock.
1082 * The allocation can take potentially a long time if it involves
1083 * sync compaction, and we do not need to hold the mmap_lock during
1084 * that. We will recheck the vma after taking it again in write mode.
1086 mmap_read_unlock(mm
);
1087 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
1089 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
1093 if (unlikely(mem_cgroup_charge(new_page
, mm
, gfp
))) {
1094 result
= SCAN_CGROUP_CHARGE_FAIL
;
1097 count_memcg_page_event(new_page
, THP_COLLAPSE_ALLOC
);
1100 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
1102 mmap_read_unlock(mm
);
1106 pmd
= mm_find_pmd(mm
, address
);
1108 result
= SCAN_PMD_NULL
;
1109 mmap_read_unlock(mm
);
1114 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1115 * If it fails, we release mmap_lock and jump out_nolock.
1116 * Continuing to collapse causes inconsistency.
1118 if (unmapped
&& !__collapse_huge_page_swapin(mm
, vma
, address
,
1120 mmap_read_unlock(mm
);
1124 mmap_read_unlock(mm
);
1126 * Prevent all access to pagetables with the exception of
1127 * gup_fast later handled by the ptep_clear_flush and the VM
1128 * handled by the anon_vma lock + PG_lock.
1130 mmap_write_lock(mm
);
1131 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
1134 /* check if the pmd is still valid */
1135 if (mm_find_pmd(mm
, address
) != pmd
)
1138 anon_vma_lock_write(vma
->anon_vma
);
1140 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, NULL
, mm
,
1141 address
, address
+ HPAGE_PMD_SIZE
);
1142 mmu_notifier_invalidate_range_start(&range
);
1144 pte
= pte_offset_map(pmd
, address
);
1145 pte_ptl
= pte_lockptr(mm
, pmd
);
1147 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1149 * After this gup_fast can't run anymore. This also removes
1150 * any huge TLB entry from the CPU so we won't allow
1151 * huge and small TLB entries for the same virtual address
1152 * to avoid the risk of CPU bugs in that area.
1154 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1155 spin_unlock(pmd_ptl
);
1156 mmu_notifier_invalidate_range_end(&range
);
1159 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
,
1160 &compound_pagelist
);
1161 spin_unlock(pte_ptl
);
1163 if (unlikely(!isolated
)) {
1166 BUG_ON(!pmd_none(*pmd
));
1168 * We can only use set_pmd_at when establishing
1169 * hugepmds and never for establishing regular pmds that
1170 * points to regular pagetables. Use pmd_populate for that
1172 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1173 spin_unlock(pmd_ptl
);
1174 anon_vma_unlock_write(vma
->anon_vma
);
1180 * All pages are isolated and locked so anon_vma rmap
1181 * can't run anymore.
1183 anon_vma_unlock_write(vma
->anon_vma
);
1185 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, pte_ptl
,
1186 &compound_pagelist
);
1189 * spin_lock() below is not the equivalent of smp_wmb(), but
1190 * the smp_wmb() inside __SetPageUptodate() can be reused to
1191 * avoid the copy_huge_page writes to become visible after
1192 * the set_pmd_at() write.
1194 __SetPageUptodate(new_page
);
1195 pgtable
= pmd_pgtable(_pmd
);
1197 _pmd
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1198 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1201 BUG_ON(!pmd_none(*pmd
));
1202 page_add_new_anon_rmap(new_page
, vma
, address
, true);
1203 lru_cache_add_inactive_or_unevictable(new_page
, vma
);
1204 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1205 set_pmd_at(mm
, address
, pmd
, _pmd
);
1206 update_mmu_cache_pmd(vma
, address
, pmd
);
1207 spin_unlock(pmd_ptl
);
1211 khugepaged_pages_collapsed
++;
1212 result
= SCAN_SUCCEED
;
1214 mmap_write_unlock(mm
);
1216 if (!IS_ERR_OR_NULL(*hpage
))
1217 mem_cgroup_uncharge(*hpage
);
1218 trace_mm_collapse_huge_page(mm
, isolated
, result
);
1222 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1223 struct vm_area_struct
*vma
,
1224 unsigned long address
,
1225 struct page
**hpage
)
1229 int ret
= 0, result
= 0, referenced
= 0;
1230 int none_or_zero
= 0, shared
= 0;
1231 struct page
*page
= NULL
;
1232 unsigned long _address
;
1234 int node
= NUMA_NO_NODE
, unmapped
= 0;
1235 bool writable
= false;
1237 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1239 pmd
= mm_find_pmd(mm
, address
);
1241 result
= SCAN_PMD_NULL
;
1245 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1246 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1247 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1248 _pte
++, _address
+= PAGE_SIZE
) {
1249 pte_t pteval
= *_pte
;
1250 if (is_swap_pte(pteval
)) {
1251 if (++unmapped
<= khugepaged_max_ptes_swap
) {
1253 * Always be strict with uffd-wp
1254 * enabled swap entries. Please see
1255 * comment below for pte_uffd_wp().
1257 if (pte_swp_uffd_wp(pteval
)) {
1258 result
= SCAN_PTE_UFFD_WP
;
1263 result
= SCAN_EXCEED_SWAP_PTE
;
1267 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1268 if (!userfaultfd_armed(vma
) &&
1269 ++none_or_zero
<= khugepaged_max_ptes_none
) {
1272 result
= SCAN_EXCEED_NONE_PTE
;
1276 if (pte_uffd_wp(pteval
)) {
1278 * Don't collapse the page if any of the small
1279 * PTEs are armed with uffd write protection.
1280 * Here we can also mark the new huge pmd as
1281 * write protected if any of the small ones is
1282 * marked but that could bring unknown
1283 * userfault messages that falls outside of
1284 * the registered range. So, just be simple.
1286 result
= SCAN_PTE_UFFD_WP
;
1289 if (pte_write(pteval
))
1292 page
= vm_normal_page(vma
, _address
, pteval
);
1293 if (unlikely(!page
)) {
1294 result
= SCAN_PAGE_NULL
;
1298 if (page_mapcount(page
) > 1 &&
1299 ++shared
> khugepaged_max_ptes_shared
) {
1300 result
= SCAN_EXCEED_SHARED_PTE
;
1304 page
= compound_head(page
);
1307 * Record which node the original page is from and save this
1308 * information to khugepaged_node_load[].
1309 * Khupaged will allocate hugepage from the node has the max
1312 node
= page_to_nid(page
);
1313 if (khugepaged_scan_abort(node
)) {
1314 result
= SCAN_SCAN_ABORT
;
1317 khugepaged_node_load
[node
]++;
1318 if (!PageLRU(page
)) {
1319 result
= SCAN_PAGE_LRU
;
1322 if (PageLocked(page
)) {
1323 result
= SCAN_PAGE_LOCK
;
1326 if (!PageAnon(page
)) {
1327 result
= SCAN_PAGE_ANON
;
1332 * Check if the page has any GUP (or other external) pins.
1334 * Here the check is racy it may see totmal_mapcount > refcount
1336 * For example, one process with one forked child process.
1337 * The parent has the PMD split due to MADV_DONTNEED, then
1338 * the child is trying unmap the whole PMD, but khugepaged
1339 * may be scanning the parent between the child has
1340 * PageDoubleMap flag cleared and dec the mapcount. So
1341 * khugepaged may see total_mapcount > refcount.
1343 * But such case is ephemeral we could always retry collapse
1344 * later. However it may report false positive if the page
1345 * has excessive GUP pins (i.e. 512). Anyway the same check
1346 * will be done again later the risk seems low.
1348 if (!is_refcount_suitable(page
)) {
1349 result
= SCAN_PAGE_COUNT
;
1352 if (pte_young(pteval
) ||
1353 page_is_young(page
) || PageReferenced(page
) ||
1354 mmu_notifier_test_young(vma
->vm_mm
, address
))
1358 result
= SCAN_PAGE_RO
;
1359 } else if (!referenced
|| (unmapped
&& referenced
< HPAGE_PMD_NR
/2)) {
1360 result
= SCAN_LACK_REFERENCED_PAGE
;
1362 result
= SCAN_SUCCEED
;
1366 pte_unmap_unlock(pte
, ptl
);
1368 node
= khugepaged_find_target_node();
1369 /* collapse_huge_page will return with the mmap_lock released */
1370 collapse_huge_page(mm
, address
, hpage
, node
,
1371 referenced
, unmapped
);
1374 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1375 none_or_zero
, result
, unmapped
);
1379 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1381 struct mm_struct
*mm
= mm_slot
->mm
;
1383 lockdep_assert_held(&khugepaged_mm_lock
);
1385 if (khugepaged_test_exit(mm
)) {
1387 hash_del(&mm_slot
->hash
);
1388 list_del(&mm_slot
->mm_node
);
1391 * Not strictly needed because the mm exited already.
1393 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1396 /* khugepaged_mm_lock actually not necessary for the below */
1397 free_mm_slot(mm_slot
);
1404 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1405 * khugepaged should try to collapse the page table.
1407 static int khugepaged_add_pte_mapped_thp(struct mm_struct
*mm
,
1410 struct mm_slot
*mm_slot
;
1412 VM_BUG_ON(addr
& ~HPAGE_PMD_MASK
);
1414 spin_lock(&khugepaged_mm_lock
);
1415 mm_slot
= get_mm_slot(mm
);
1416 if (likely(mm_slot
&& mm_slot
->nr_pte_mapped_thp
< MAX_PTE_MAPPED_THP
))
1417 mm_slot
->pte_mapped_thp
[mm_slot
->nr_pte_mapped_thp
++] = addr
;
1418 spin_unlock(&khugepaged_mm_lock
);
1423 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1426 * @mm: process address space where collapse happens
1427 * @addr: THP collapse address
1429 * This function checks whether all the PTEs in the PMD are pointing to the
1430 * right THP. If so, retract the page table so the THP can refault in with
1433 void collapse_pte_mapped_thp(struct mm_struct
*mm
, unsigned long addr
)
1435 unsigned long haddr
= addr
& HPAGE_PMD_MASK
;
1436 struct vm_area_struct
*vma
= find_vma(mm
, haddr
);
1438 pte_t
*start_pte
, *pte
;
1444 if (!vma
|| !vma
->vm_file
||
1445 !range_in_vma(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
))
1449 * This vm_flags may not have VM_HUGEPAGE if the page was not
1450 * collapsed by this mm. But we can still collapse if the page is
1451 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1452 * will not fail the vma for missing VM_HUGEPAGE
1454 if (!hugepage_vma_check(vma
, vma
->vm_flags
| VM_HUGEPAGE
))
1457 hpage
= find_lock_page(vma
->vm_file
->f_mapping
,
1458 linear_page_index(vma
, haddr
));
1462 if (!PageHead(hpage
))
1465 pmd
= mm_find_pmd(mm
, haddr
);
1469 start_pte
= pte_offset_map_lock(mm
, pmd
, haddr
, &ptl
);
1471 /* step 1: check all mapped PTEs are to the right huge page */
1472 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1473 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1476 /* empty pte, skip */
1480 /* page swapped out, abort */
1481 if (!pte_present(*pte
))
1484 page
= vm_normal_page(vma
, addr
, *pte
);
1487 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1488 * page table, but the new page will not be a subpage of hpage.
1490 if (hpage
+ i
!= page
)
1495 /* step 2: adjust rmap */
1496 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1497 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1502 page
= vm_normal_page(vma
, addr
, *pte
);
1503 page_remove_rmap(page
, false);
1506 pte_unmap_unlock(start_pte
, ptl
);
1508 /* step 3: set proper refcount and mm_counters. */
1510 page_ref_sub(hpage
, count
);
1511 add_mm_counter(vma
->vm_mm
, mm_counter_file(hpage
), -count
);
1514 /* step 4: collapse pmd */
1515 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1516 _pmd
= pmdp_collapse_flush(vma
, haddr
, pmd
);
1519 pte_free(mm
, pmd_pgtable(_pmd
));
1527 pte_unmap_unlock(start_pte
, ptl
);
1531 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot
*mm_slot
)
1533 struct mm_struct
*mm
= mm_slot
->mm
;
1536 if (likely(mm_slot
->nr_pte_mapped_thp
== 0))
1539 if (!mmap_write_trylock(mm
))
1542 if (unlikely(khugepaged_test_exit(mm
)))
1545 for (i
= 0; i
< mm_slot
->nr_pte_mapped_thp
; i
++)
1546 collapse_pte_mapped_thp(mm
, mm_slot
->pte_mapped_thp
[i
]);
1549 mm_slot
->nr_pte_mapped_thp
= 0;
1550 mmap_write_unlock(mm
);
1553 static void retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
)
1555 struct vm_area_struct
*vma
;
1556 struct mm_struct
*mm
;
1560 i_mmap_lock_write(mapping
);
1561 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1563 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1564 * got written to. These VMAs are likely not worth investing
1565 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1568 * Not that vma->anon_vma check is racy: it can be set up after
1569 * the check but before we took mmap_lock by the fault path.
1570 * But page lock would prevent establishing any new ptes of the
1571 * page, so we are safe.
1573 * An alternative would be drop the check, but check that page
1574 * table is clear before calling pmdp_collapse_flush() under
1575 * ptl. It has higher chance to recover THP for the VMA, but
1576 * has higher cost too.
1580 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1581 if (addr
& ~HPAGE_PMD_MASK
)
1583 if (vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
)
1586 pmd
= mm_find_pmd(mm
, addr
);
1590 * We need exclusive mmap_lock to retract page table.
1592 * We use trylock due to lock inversion: we need to acquire
1593 * mmap_lock while holding page lock. Fault path does it in
1594 * reverse order. Trylock is a way to avoid deadlock.
1596 if (mmap_write_trylock(mm
)) {
1597 if (!khugepaged_test_exit(mm
)) {
1598 spinlock_t
*ptl
= pmd_lock(mm
, pmd
);
1599 /* assume page table is clear */
1600 _pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1603 pte_free(mm
, pmd_pgtable(_pmd
));
1605 mmap_write_unlock(mm
);
1607 /* Try again later */
1608 khugepaged_add_pte_mapped_thp(mm
, addr
);
1611 i_mmap_unlock_write(mapping
);
1615 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1617 * @mm: process address space where collapse happens
1618 * @file: file that collapse on
1619 * @start: collapse start address
1620 * @hpage: new allocated huge page for collapse
1621 * @node: appointed node the new huge page allocate from
1623 * Basic scheme is simple, details are more complex:
1624 * - allocate and lock a new huge page;
1625 * - scan page cache replacing old pages with the new one
1626 * + swap/gup in pages if necessary;
1628 * + keep old pages around in case rollback is required;
1629 * - if replacing succeeds:
1632 * + unlock huge page;
1633 * - if replacing failed;
1634 * + put all pages back and unfreeze them;
1635 * + restore gaps in the page cache;
1636 * + unlock and free huge page;
1638 static void collapse_file(struct mm_struct
*mm
,
1639 struct file
*file
, pgoff_t start
,
1640 struct page
**hpage
, int node
)
1642 struct address_space
*mapping
= file
->f_mapping
;
1644 struct page
*new_page
;
1645 pgoff_t index
, end
= start
+ HPAGE_PMD_NR
;
1646 LIST_HEAD(pagelist
);
1647 XA_STATE_ORDER(xas
, &mapping
->i_pages
, start
, HPAGE_PMD_ORDER
);
1648 int nr_none
= 0, result
= SCAN_SUCCEED
;
1649 bool is_shmem
= shmem_file(file
);
1652 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) && !is_shmem
);
1653 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1655 /* Only allocate from the target node */
1656 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
1658 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
1660 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
1664 if (unlikely(mem_cgroup_charge(new_page
, mm
, gfp
))) {
1665 result
= SCAN_CGROUP_CHARGE_FAIL
;
1668 count_memcg_page_event(new_page
, THP_COLLAPSE_ALLOC
);
1670 /* This will be less messy when we use multi-index entries */
1673 xas_create_range(&xas
);
1674 if (!xas_error(&xas
))
1676 xas_unlock_irq(&xas
);
1677 if (!xas_nomem(&xas
, GFP_KERNEL
)) {
1683 __SetPageLocked(new_page
);
1685 __SetPageSwapBacked(new_page
);
1686 new_page
->index
= start
;
1687 new_page
->mapping
= mapping
;
1690 * At this point the new_page is locked and not up-to-date.
1691 * It's safe to insert it into the page cache, because nobody would
1692 * be able to map it or use it in another way until we unlock it.
1695 xas_set(&xas
, start
);
1696 for (index
= start
; index
< end
; index
++) {
1697 struct page
*page
= xas_next(&xas
);
1699 VM_BUG_ON(index
!= xas
.xa_index
);
1703 * Stop if extent has been truncated or
1704 * hole-punched, and is now completely
1707 if (index
== start
) {
1708 if (!xas_next_entry(&xas
, end
- 1)) {
1709 result
= SCAN_TRUNCATED
;
1712 xas_set(&xas
, index
);
1714 if (!shmem_charge(mapping
->host
, 1)) {
1718 xas_store(&xas
, new_page
);
1723 if (xa_is_value(page
) || !PageUptodate(page
)) {
1724 xas_unlock_irq(&xas
);
1725 /* swap in or instantiate fallocated page */
1726 if (shmem_getpage(mapping
->host
, index
, &page
,
1731 } else if (trylock_page(page
)) {
1733 xas_unlock_irq(&xas
);
1735 result
= SCAN_PAGE_LOCK
;
1738 } else { /* !is_shmem */
1739 if (!page
|| xa_is_value(page
)) {
1740 xas_unlock_irq(&xas
);
1741 page_cache_sync_readahead(mapping
, &file
->f_ra
,
1744 /* drain pagevecs to help isolate_lru_page() */
1746 page
= find_lock_page(mapping
, index
);
1747 if (unlikely(page
== NULL
)) {
1751 } else if (PageDirty(page
)) {
1753 * khugepaged only works on read-only fd,
1754 * so this page is dirty because it hasn't
1755 * been flushed since first write. There
1756 * won't be new dirty pages.
1758 * Trigger async flush here and hope the
1759 * writeback is done when khugepaged
1760 * revisits this page.
1762 * This is a one-off situation. We are not
1763 * forcing writeback in loop.
1765 xas_unlock_irq(&xas
);
1766 filemap_flush(mapping
);
1769 } else if (PageWriteback(page
)) {
1770 xas_unlock_irq(&xas
);
1773 } else if (trylock_page(page
)) {
1775 xas_unlock_irq(&xas
);
1777 result
= SCAN_PAGE_LOCK
;
1783 * The page must be locked, so we can drop the i_pages lock
1784 * without racing with truncate.
1786 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1788 /* make sure the page is up to date */
1789 if (unlikely(!PageUptodate(page
))) {
1795 * If file was truncated then extended, or hole-punched, before
1796 * we locked the first page, then a THP might be there already.
1798 if (PageTransCompound(page
)) {
1799 result
= SCAN_PAGE_COMPOUND
;
1803 if (page_mapping(page
) != mapping
) {
1804 result
= SCAN_TRUNCATED
;
1808 if (!is_shmem
&& (PageDirty(page
) ||
1809 PageWriteback(page
))) {
1811 * khugepaged only works on read-only fd, so this
1812 * page is dirty because it hasn't been flushed
1813 * since first write.
1819 if (isolate_lru_page(page
)) {
1820 result
= SCAN_DEL_PAGE_LRU
;
1824 if (page_has_private(page
) &&
1825 !try_to_release_page(page
, GFP_KERNEL
)) {
1826 result
= SCAN_PAGE_HAS_PRIVATE
;
1827 putback_lru_page(page
);
1831 if (page_mapped(page
))
1832 unmap_mapping_pages(mapping
, index
, 1, false);
1835 xas_set(&xas
, index
);
1837 VM_BUG_ON_PAGE(page
!= xas_load(&xas
), page
);
1838 VM_BUG_ON_PAGE(page_mapped(page
), page
);
1841 * The page is expected to have page_count() == 3:
1842 * - we hold a pin on it;
1843 * - one reference from page cache;
1844 * - one from isolate_lru_page;
1846 if (!page_ref_freeze(page
, 3)) {
1847 result
= SCAN_PAGE_COUNT
;
1848 xas_unlock_irq(&xas
);
1849 putback_lru_page(page
);
1854 * Add the page to the list to be able to undo the collapse if
1855 * something go wrong.
1857 list_add_tail(&page
->lru
, &pagelist
);
1859 /* Finally, replace with the new page. */
1860 xas_store(&xas
, new_page
);
1867 nr
= thp_nr_pages(new_page
);
1870 __mod_lruvec_page_state(new_page
, NR_SHMEM_THPS
, nr
);
1872 __mod_lruvec_page_state(new_page
, NR_FILE_THPS
, nr
);
1873 filemap_nr_thps_inc(mapping
);
1875 * Paired with smp_mb() in do_dentry_open() to ensure
1876 * i_writecount is up to date and the update to nr_thps is
1877 * visible. Ensures the page cache will be truncated if the
1878 * file is opened writable.
1881 if (inode_is_open_for_write(mapping
->host
)) {
1883 __mod_lruvec_page_state(new_page
, NR_FILE_THPS
, -nr
);
1884 filemap_nr_thps_dec(mapping
);
1890 __mod_lruvec_page_state(new_page
, NR_FILE_PAGES
, nr_none
);
1892 __mod_lruvec_page_state(new_page
, NR_SHMEM
, nr_none
);
1896 xas_unlock_irq(&xas
);
1899 if (result
== SCAN_SUCCEED
) {
1900 struct page
*page
, *tmp
;
1903 * Replacing old pages with new one has succeeded, now we
1904 * need to copy the content and free the old pages.
1907 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
1908 while (index
< page
->index
) {
1909 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1912 copy_highpage(new_page
+ (page
->index
% HPAGE_PMD_NR
),
1914 list_del(&page
->lru
);
1915 page
->mapping
= NULL
;
1916 page_ref_unfreeze(page
, 1);
1917 ClearPageActive(page
);
1918 ClearPageUnevictable(page
);
1923 while (index
< end
) {
1924 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1928 SetPageUptodate(new_page
);
1929 page_ref_add(new_page
, HPAGE_PMD_NR
- 1);
1931 set_page_dirty(new_page
);
1932 lru_cache_add(new_page
);
1935 * Remove pte page tables, so we can re-fault the page as huge.
1937 retract_page_tables(mapping
, start
);
1940 khugepaged_pages_collapsed
++;
1944 /* Something went wrong: roll back page cache changes */
1946 mapping
->nrpages
-= nr_none
;
1949 shmem_uncharge(mapping
->host
, nr_none
);
1951 xas_set(&xas
, start
);
1952 xas_for_each(&xas
, page
, end
- 1) {
1953 page
= list_first_entry_or_null(&pagelist
,
1955 if (!page
|| xas
.xa_index
< page
->index
) {
1959 /* Put holes back where they were */
1960 xas_store(&xas
, NULL
);
1964 VM_BUG_ON_PAGE(page
->index
!= xas
.xa_index
, page
);
1966 /* Unfreeze the page. */
1967 list_del(&page
->lru
);
1968 page_ref_unfreeze(page
, 2);
1969 xas_store(&xas
, page
);
1971 xas_unlock_irq(&xas
);
1973 putback_lru_page(page
);
1977 xas_unlock_irq(&xas
);
1979 new_page
->mapping
= NULL
;
1982 unlock_page(new_page
);
1984 VM_BUG_ON(!list_empty(&pagelist
));
1985 if (!IS_ERR_OR_NULL(*hpage
))
1986 mem_cgroup_uncharge(*hpage
);
1987 /* TODO: tracepoints */
1990 static void khugepaged_scan_file(struct mm_struct
*mm
,
1991 struct file
*file
, pgoff_t start
, struct page
**hpage
)
1993 struct page
*page
= NULL
;
1994 struct address_space
*mapping
= file
->f_mapping
;
1995 XA_STATE(xas
, &mapping
->i_pages
, start
);
1997 int node
= NUMA_NO_NODE
;
1998 int result
= SCAN_SUCCEED
;
2002 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
2004 xas_for_each(&xas
, page
, start
+ HPAGE_PMD_NR
- 1) {
2005 if (xas_retry(&xas
, page
))
2008 if (xa_is_value(page
)) {
2009 if (++swap
> khugepaged_max_ptes_swap
) {
2010 result
= SCAN_EXCEED_SWAP_PTE
;
2016 if (PageTransCompound(page
)) {
2017 result
= SCAN_PAGE_COMPOUND
;
2021 node
= page_to_nid(page
);
2022 if (khugepaged_scan_abort(node
)) {
2023 result
= SCAN_SCAN_ABORT
;
2026 khugepaged_node_load
[node
]++;
2028 if (!PageLRU(page
)) {
2029 result
= SCAN_PAGE_LRU
;
2033 if (page_count(page
) !=
2034 1 + page_mapcount(page
) + page_has_private(page
)) {
2035 result
= SCAN_PAGE_COUNT
;
2040 * We probably should check if the page is referenced here, but
2041 * nobody would transfer pte_young() to PageReferenced() for us.
2042 * And rmap walk here is just too costly...
2047 if (need_resched()) {
2054 if (result
== SCAN_SUCCEED
) {
2055 if (present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
2056 result
= SCAN_EXCEED_NONE_PTE
;
2058 node
= khugepaged_find_target_node();
2059 collapse_file(mm
, file
, start
, hpage
, node
);
2063 /* TODO: tracepoints */
2066 static void khugepaged_scan_file(struct mm_struct
*mm
,
2067 struct file
*file
, pgoff_t start
, struct page
**hpage
)
2072 static void khugepaged_collapse_pte_mapped_thps(struct mm_slot
*mm_slot
)
2077 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
2078 struct page
**hpage
)
2079 __releases(&khugepaged_mm_lock
)
2080 __acquires(&khugepaged_mm_lock
)
2082 struct mm_slot
*mm_slot
;
2083 struct mm_struct
*mm
;
2084 struct vm_area_struct
*vma
;
2088 lockdep_assert_held(&khugepaged_mm_lock
);
2090 if (khugepaged_scan
.mm_slot
)
2091 mm_slot
= khugepaged_scan
.mm_slot
;
2093 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
2094 struct mm_slot
, mm_node
);
2095 khugepaged_scan
.address
= 0;
2096 khugepaged_scan
.mm_slot
= mm_slot
;
2098 spin_unlock(&khugepaged_mm_lock
);
2099 khugepaged_collapse_pte_mapped_thps(mm_slot
);
2103 * Don't wait for semaphore (to avoid long wait times). Just move to
2104 * the next mm on the list.
2107 if (unlikely(!mmap_read_trylock(mm
)))
2108 goto breakouterloop_mmap_lock
;
2109 if (likely(!khugepaged_test_exit(mm
)))
2110 vma
= find_vma(mm
, khugepaged_scan
.address
);
2113 for (; vma
; vma
= vma
->vm_next
) {
2114 unsigned long hstart
, hend
;
2117 if (unlikely(khugepaged_test_exit(mm
))) {
2121 if (!hugepage_vma_check(vma
, vma
->vm_flags
)) {
2126 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
2127 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
2130 if (khugepaged_scan
.address
> hend
)
2132 if (khugepaged_scan
.address
< hstart
)
2133 khugepaged_scan
.address
= hstart
;
2134 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
2135 if (shmem_file(vma
->vm_file
) && !shmem_huge_enabled(vma
))
2138 while (khugepaged_scan
.address
< hend
) {
2141 if (unlikely(khugepaged_test_exit(mm
)))
2142 goto breakouterloop
;
2144 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
2145 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
2147 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
2148 struct file
*file
= get_file(vma
->vm_file
);
2149 pgoff_t pgoff
= linear_page_index(vma
,
2150 khugepaged_scan
.address
);
2152 mmap_read_unlock(mm
);
2154 khugepaged_scan_file(mm
, file
, pgoff
, hpage
);
2157 ret
= khugepaged_scan_pmd(mm
, vma
,
2158 khugepaged_scan
.address
,
2161 /* move to next address */
2162 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
2163 progress
+= HPAGE_PMD_NR
;
2165 /* we released mmap_lock so break loop */
2166 goto breakouterloop_mmap_lock
;
2167 if (progress
>= pages
)
2168 goto breakouterloop
;
2172 mmap_read_unlock(mm
); /* exit_mmap will destroy ptes after this */
2173 breakouterloop_mmap_lock
:
2175 spin_lock(&khugepaged_mm_lock
);
2176 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
2178 * Release the current mm_slot if this mm is about to die, or
2179 * if we scanned all vmas of this mm.
2181 if (khugepaged_test_exit(mm
) || !vma
) {
2183 * Make sure that if mm_users is reaching zero while
2184 * khugepaged runs here, khugepaged_exit will find
2185 * mm_slot not pointing to the exiting mm.
2187 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2188 khugepaged_scan
.mm_slot
= list_entry(
2189 mm_slot
->mm_node
.next
,
2190 struct mm_slot
, mm_node
);
2191 khugepaged_scan
.address
= 0;
2193 khugepaged_scan
.mm_slot
= NULL
;
2194 khugepaged_full_scans
++;
2197 collect_mm_slot(mm_slot
);
2203 static int khugepaged_has_work(void)
2205 return !list_empty(&khugepaged_scan
.mm_head
) &&
2206 khugepaged_enabled();
2209 static int khugepaged_wait_event(void)
2211 return !list_empty(&khugepaged_scan
.mm_head
) ||
2212 kthread_should_stop();
2215 static void khugepaged_do_scan(void)
2217 struct page
*hpage
= NULL
;
2218 unsigned int progress
= 0, pass_through_head
= 0;
2219 unsigned int pages
= READ_ONCE(khugepaged_pages_to_scan
);
2222 lru_add_drain_all();
2224 while (progress
< pages
) {
2225 if (!khugepaged_prealloc_page(&hpage
, &wait
))
2230 if (unlikely(kthread_should_stop() || try_to_freeze()))
2233 spin_lock(&khugepaged_mm_lock
);
2234 if (!khugepaged_scan
.mm_slot
)
2235 pass_through_head
++;
2236 if (khugepaged_has_work() &&
2237 pass_through_head
< 2)
2238 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2242 spin_unlock(&khugepaged_mm_lock
);
2245 if (!IS_ERR_OR_NULL(hpage
))
2249 static bool khugepaged_should_wakeup(void)
2251 return kthread_should_stop() ||
2252 time_after_eq(jiffies
, khugepaged_sleep_expire
);
2255 static void khugepaged_wait_work(void)
2257 if (khugepaged_has_work()) {
2258 const unsigned long scan_sleep_jiffies
=
2259 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
2261 if (!scan_sleep_jiffies
)
2264 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
2265 wait_event_freezable_timeout(khugepaged_wait
,
2266 khugepaged_should_wakeup(),
2267 scan_sleep_jiffies
);
2271 if (khugepaged_enabled())
2272 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
2275 static int khugepaged(void *none
)
2277 struct mm_slot
*mm_slot
;
2280 set_user_nice(current
, MAX_NICE
);
2282 while (!kthread_should_stop()) {
2283 khugepaged_do_scan();
2284 khugepaged_wait_work();
2287 spin_lock(&khugepaged_mm_lock
);
2288 mm_slot
= khugepaged_scan
.mm_slot
;
2289 khugepaged_scan
.mm_slot
= NULL
;
2291 collect_mm_slot(mm_slot
);
2292 spin_unlock(&khugepaged_mm_lock
);
2296 static void set_recommended_min_free_kbytes(void)
2300 unsigned long recommended_min
;
2302 for_each_populated_zone(zone
) {
2304 * We don't need to worry about fragmentation of
2305 * ZONE_MOVABLE since it only has movable pages.
2307 if (zone_idx(zone
) > gfp_zone(GFP_USER
))
2313 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2314 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
2317 * Make sure that on average at least two pageblocks are almost free
2318 * of another type, one for a migratetype to fall back to and a
2319 * second to avoid subsequent fallbacks of other types There are 3
2320 * MIGRATE_TYPES we care about.
2322 recommended_min
+= pageblock_nr_pages
* nr_zones
*
2323 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
2325 /* don't ever allow to reserve more than 5% of the lowmem */
2326 recommended_min
= min(recommended_min
,
2327 (unsigned long) nr_free_buffer_pages() / 20);
2328 recommended_min
<<= (PAGE_SHIFT
-10);
2330 if (recommended_min
> min_free_kbytes
) {
2331 if (user_min_free_kbytes
>= 0)
2332 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2333 min_free_kbytes
, recommended_min
);
2335 min_free_kbytes
= recommended_min
;
2337 setup_per_zone_wmarks();
2340 int start_stop_khugepaged(void)
2344 mutex_lock(&khugepaged_mutex
);
2345 if (khugepaged_enabled()) {
2346 if (!khugepaged_thread
)
2347 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
2349 if (IS_ERR(khugepaged_thread
)) {
2350 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2351 err
= PTR_ERR(khugepaged_thread
);
2352 khugepaged_thread
= NULL
;
2356 if (!list_empty(&khugepaged_scan
.mm_head
))
2357 wake_up_interruptible(&khugepaged_wait
);
2359 set_recommended_min_free_kbytes();
2360 } else if (khugepaged_thread
) {
2361 kthread_stop(khugepaged_thread
);
2362 khugepaged_thread
= NULL
;
2365 mutex_unlock(&khugepaged_mutex
);
2369 void khugepaged_min_free_kbytes_update(void)
2371 mutex_lock(&khugepaged_mutex
);
2372 if (khugepaged_enabled() && khugepaged_thread
)
2373 set_recommended_min_free_kbytes();
2374 mutex_unlock(&khugepaged_mutex
);