1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
33 SCAN_LACK_REFERENCED_PAGE
,
47 SCAN_ALLOC_HUGE_PAGE_FAIL
,
48 SCAN_CGROUP_CHARGE_FAIL
,
51 SCAN_PAGE_HAS_PRIVATE
,
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/huge_memory.h>
57 /* default scan 8*512 pte (or vmas) every 30 second */
58 static unsigned int khugepaged_pages_to_scan __read_mostly
;
59 static unsigned int khugepaged_pages_collapsed
;
60 static unsigned int khugepaged_full_scans
;
61 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly
= 10000;
62 /* during fragmentation poll the hugepage allocator once every minute */
63 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly
= 60000;
64 static unsigned long khugepaged_sleep_expire
;
65 static DEFINE_SPINLOCK(khugepaged_mm_lock
);
66 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait
);
68 * default collapse hugepages if there is at least one pte mapped like
69 * it would have happened if the vma was large enough during page
72 static unsigned int khugepaged_max_ptes_none __read_mostly
;
73 static unsigned int khugepaged_max_ptes_swap __read_mostly
;
75 #define MM_SLOTS_HASH_BITS 10
76 static __read_mostly
DEFINE_HASHTABLE(mm_slots_hash
, MM_SLOTS_HASH_BITS
);
78 static struct kmem_cache
*mm_slot_cache __read_mostly
;
80 #define MAX_PTE_MAPPED_THP 8
83 * struct mm_slot - hash lookup from mm to mm_slot
84 * @hash: hash collision list
85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86 * @mm: the mm that this information is valid for
89 struct hlist_node hash
;
90 struct list_head mm_node
;
93 /* pte-mapped THP in this mm */
94 int nr_pte_mapped_thp
;
95 unsigned long pte_mapped_thp
[MAX_PTE_MAPPED_THP
];
99 * struct khugepaged_scan - cursor for scanning
100 * @mm_head: the head of the mm list to scan
101 * @mm_slot: the current mm_slot we are scanning
102 * @address: the next address inside that to be scanned
104 * There is only the one khugepaged_scan instance of this cursor structure.
106 struct khugepaged_scan
{
107 struct list_head mm_head
;
108 struct mm_slot
*mm_slot
;
109 unsigned long address
;
112 static struct khugepaged_scan khugepaged_scan
= {
113 .mm_head
= LIST_HEAD_INIT(khugepaged_scan
.mm_head
),
117 static ssize_t
scan_sleep_millisecs_show(struct kobject
*kobj
,
118 struct kobj_attribute
*attr
,
121 return sprintf(buf
, "%u\n", khugepaged_scan_sleep_millisecs
);
124 static ssize_t
scan_sleep_millisecs_store(struct kobject
*kobj
,
125 struct kobj_attribute
*attr
,
126 const char *buf
, size_t count
)
131 err
= kstrtoul(buf
, 10, &msecs
);
132 if (err
|| msecs
> UINT_MAX
)
135 khugepaged_scan_sleep_millisecs
= msecs
;
136 khugepaged_sleep_expire
= 0;
137 wake_up_interruptible(&khugepaged_wait
);
141 static struct kobj_attribute scan_sleep_millisecs_attr
=
142 __ATTR(scan_sleep_millisecs
, 0644, scan_sleep_millisecs_show
,
143 scan_sleep_millisecs_store
);
145 static ssize_t
alloc_sleep_millisecs_show(struct kobject
*kobj
,
146 struct kobj_attribute
*attr
,
149 return sprintf(buf
, "%u\n", khugepaged_alloc_sleep_millisecs
);
152 static ssize_t
alloc_sleep_millisecs_store(struct kobject
*kobj
,
153 struct kobj_attribute
*attr
,
154 const char *buf
, size_t count
)
159 err
= kstrtoul(buf
, 10, &msecs
);
160 if (err
|| msecs
> UINT_MAX
)
163 khugepaged_alloc_sleep_millisecs
= msecs
;
164 khugepaged_sleep_expire
= 0;
165 wake_up_interruptible(&khugepaged_wait
);
169 static struct kobj_attribute alloc_sleep_millisecs_attr
=
170 __ATTR(alloc_sleep_millisecs
, 0644, alloc_sleep_millisecs_show
,
171 alloc_sleep_millisecs_store
);
173 static ssize_t
pages_to_scan_show(struct kobject
*kobj
,
174 struct kobj_attribute
*attr
,
177 return sprintf(buf
, "%u\n", khugepaged_pages_to_scan
);
179 static ssize_t
pages_to_scan_store(struct kobject
*kobj
,
180 struct kobj_attribute
*attr
,
181 const char *buf
, size_t count
)
186 err
= kstrtoul(buf
, 10, &pages
);
187 if (err
|| !pages
|| pages
> UINT_MAX
)
190 khugepaged_pages_to_scan
= pages
;
194 static struct kobj_attribute pages_to_scan_attr
=
195 __ATTR(pages_to_scan
, 0644, pages_to_scan_show
,
196 pages_to_scan_store
);
198 static ssize_t
pages_collapsed_show(struct kobject
*kobj
,
199 struct kobj_attribute
*attr
,
202 return sprintf(buf
, "%u\n", khugepaged_pages_collapsed
);
204 static struct kobj_attribute pages_collapsed_attr
=
205 __ATTR_RO(pages_collapsed
);
207 static ssize_t
full_scans_show(struct kobject
*kobj
,
208 struct kobj_attribute
*attr
,
211 return sprintf(buf
, "%u\n", khugepaged_full_scans
);
213 static struct kobj_attribute full_scans_attr
=
214 __ATTR_RO(full_scans
);
216 static ssize_t
khugepaged_defrag_show(struct kobject
*kobj
,
217 struct kobj_attribute
*attr
, char *buf
)
219 return single_hugepage_flag_show(kobj
, attr
, buf
,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
222 static ssize_t
khugepaged_defrag_store(struct kobject
*kobj
,
223 struct kobj_attribute
*attr
,
224 const char *buf
, size_t count
)
226 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
227 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
);
229 static struct kobj_attribute khugepaged_defrag_attr
=
230 __ATTR(defrag
, 0644, khugepaged_defrag_show
,
231 khugepaged_defrag_store
);
234 * max_ptes_none controls if khugepaged should collapse hugepages over
235 * any unmapped ptes in turn potentially increasing the memory
236 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
237 * reduce the available free memory in the system as it
238 * runs. Increasing max_ptes_none will instead potentially reduce the
239 * free memory in the system during the khugepaged scan.
241 static ssize_t
khugepaged_max_ptes_none_show(struct kobject
*kobj
,
242 struct kobj_attribute
*attr
,
245 return sprintf(buf
, "%u\n", khugepaged_max_ptes_none
);
247 static ssize_t
khugepaged_max_ptes_none_store(struct kobject
*kobj
,
248 struct kobj_attribute
*attr
,
249 const char *buf
, size_t count
)
252 unsigned long max_ptes_none
;
254 err
= kstrtoul(buf
, 10, &max_ptes_none
);
255 if (err
|| max_ptes_none
> HPAGE_PMD_NR
-1)
258 khugepaged_max_ptes_none
= max_ptes_none
;
262 static struct kobj_attribute khugepaged_max_ptes_none_attr
=
263 __ATTR(max_ptes_none
, 0644, khugepaged_max_ptes_none_show
,
264 khugepaged_max_ptes_none_store
);
266 static ssize_t
khugepaged_max_ptes_swap_show(struct kobject
*kobj
,
267 struct kobj_attribute
*attr
,
270 return sprintf(buf
, "%u\n", khugepaged_max_ptes_swap
);
273 static ssize_t
khugepaged_max_ptes_swap_store(struct kobject
*kobj
,
274 struct kobj_attribute
*attr
,
275 const char *buf
, size_t count
)
278 unsigned long max_ptes_swap
;
280 err
= kstrtoul(buf
, 10, &max_ptes_swap
);
281 if (err
|| max_ptes_swap
> HPAGE_PMD_NR
-1)
284 khugepaged_max_ptes_swap
= max_ptes_swap
;
289 static struct kobj_attribute khugepaged_max_ptes_swap_attr
=
290 __ATTR(max_ptes_swap
, 0644, khugepaged_max_ptes_swap_show
,
291 khugepaged_max_ptes_swap_store
);
293 static struct attribute
*khugepaged_attr
[] = {
294 &khugepaged_defrag_attr
.attr
,
295 &khugepaged_max_ptes_none_attr
.attr
,
296 &pages_to_scan_attr
.attr
,
297 &pages_collapsed_attr
.attr
,
298 &full_scans_attr
.attr
,
299 &scan_sleep_millisecs_attr
.attr
,
300 &alloc_sleep_millisecs_attr
.attr
,
301 &khugepaged_max_ptes_swap_attr
.attr
,
305 struct attribute_group khugepaged_attr_group
= {
306 .attrs
= khugepaged_attr
,
307 .name
= "khugepaged",
309 #endif /* CONFIG_SYSFS */
311 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
313 int hugepage_madvise(struct vm_area_struct
*vma
,
314 unsigned long *vm_flags
, int advice
)
320 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
321 * can't handle this properly after s390_enable_sie, so we simply
322 * ignore the madvise to prevent qemu from causing a SIGSEGV.
324 if (mm_has_pgste(vma
->vm_mm
))
327 *vm_flags
&= ~VM_NOHUGEPAGE
;
328 *vm_flags
|= VM_HUGEPAGE
;
330 * If the vma become good for khugepaged to scan,
331 * register it here without waiting a page fault that
332 * may not happen any time soon.
334 if (!(*vm_flags
& VM_NO_KHUGEPAGED
) &&
335 khugepaged_enter_vma_merge(vma
, *vm_flags
))
338 case MADV_NOHUGEPAGE
:
339 *vm_flags
&= ~VM_HUGEPAGE
;
340 *vm_flags
|= VM_NOHUGEPAGE
;
342 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
343 * this vma even if we leave the mm registered in khugepaged if
344 * it got registered before VM_NOHUGEPAGE was set.
352 int __init
khugepaged_init(void)
354 mm_slot_cache
= kmem_cache_create("khugepaged_mm_slot",
355 sizeof(struct mm_slot
),
356 __alignof__(struct mm_slot
), 0, NULL
);
360 khugepaged_pages_to_scan
= HPAGE_PMD_NR
* 8;
361 khugepaged_max_ptes_none
= HPAGE_PMD_NR
- 1;
362 khugepaged_max_ptes_swap
= HPAGE_PMD_NR
/ 8;
367 void __init
khugepaged_destroy(void)
369 kmem_cache_destroy(mm_slot_cache
);
372 static inline struct mm_slot
*alloc_mm_slot(void)
374 if (!mm_slot_cache
) /* initialization failed */
376 return kmem_cache_zalloc(mm_slot_cache
, GFP_KERNEL
);
379 static inline void free_mm_slot(struct mm_slot
*mm_slot
)
381 kmem_cache_free(mm_slot_cache
, mm_slot
);
384 static struct mm_slot
*get_mm_slot(struct mm_struct
*mm
)
386 struct mm_slot
*mm_slot
;
388 hash_for_each_possible(mm_slots_hash
, mm_slot
, hash
, (unsigned long)mm
)
389 if (mm
== mm_slot
->mm
)
395 static void insert_to_mm_slots_hash(struct mm_struct
*mm
,
396 struct mm_slot
*mm_slot
)
399 hash_add(mm_slots_hash
, &mm_slot
->hash
, (long)mm
);
402 static inline int khugepaged_test_exit(struct mm_struct
*mm
)
404 return atomic_read(&mm
->mm_users
) == 0;
407 static bool hugepage_vma_check(struct vm_area_struct
*vma
,
408 unsigned long vm_flags
)
410 if ((!(vm_flags
& VM_HUGEPAGE
) && !khugepaged_always()) ||
411 (vm_flags
& VM_NOHUGEPAGE
) ||
412 test_bit(MMF_DISABLE_THP
, &vma
->vm_mm
->flags
))
415 if (shmem_file(vma
->vm_file
) ||
416 (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) &&
418 (vm_flags
& VM_DENYWRITE
))) {
419 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE
))
421 return IS_ALIGNED((vma
->vm_start
>> PAGE_SHIFT
) - vma
->vm_pgoff
,
424 if (!vma
->anon_vma
|| vma
->vm_ops
)
426 if (is_vma_temporary_stack(vma
))
428 return !(vm_flags
& VM_NO_KHUGEPAGED
);
431 int __khugepaged_enter(struct mm_struct
*mm
)
433 struct mm_slot
*mm_slot
;
436 mm_slot
= alloc_mm_slot();
440 /* __khugepaged_exit() must not run from under us */
441 VM_BUG_ON_MM(khugepaged_test_exit(mm
), mm
);
442 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE
, &mm
->flags
))) {
443 free_mm_slot(mm_slot
);
447 spin_lock(&khugepaged_mm_lock
);
448 insert_to_mm_slots_hash(mm
, mm_slot
);
450 * Insert just behind the scanning cursor, to let the area settle
453 wakeup
= list_empty(&khugepaged_scan
.mm_head
);
454 list_add_tail(&mm_slot
->mm_node
, &khugepaged_scan
.mm_head
);
455 spin_unlock(&khugepaged_mm_lock
);
459 wake_up_interruptible(&khugepaged_wait
);
464 int khugepaged_enter_vma_merge(struct vm_area_struct
*vma
,
465 unsigned long vm_flags
)
467 unsigned long hstart
, hend
;
470 * khugepaged only supports read-only files for non-shmem files.
471 * khugepaged does not yet work on special mappings. And
472 * file-private shmem THP is not supported.
474 if (!hugepage_vma_check(vma
, vm_flags
))
477 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
478 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
480 return khugepaged_enter(vma
, vm_flags
);
484 void __khugepaged_exit(struct mm_struct
*mm
)
486 struct mm_slot
*mm_slot
;
489 spin_lock(&khugepaged_mm_lock
);
490 mm_slot
= get_mm_slot(mm
);
491 if (mm_slot
&& khugepaged_scan
.mm_slot
!= mm_slot
) {
492 hash_del(&mm_slot
->hash
);
493 list_del(&mm_slot
->mm_node
);
496 spin_unlock(&khugepaged_mm_lock
);
499 clear_bit(MMF_VM_HUGEPAGE
, &mm
->flags
);
500 free_mm_slot(mm_slot
);
502 } else if (mm_slot
) {
504 * This is required to serialize against
505 * khugepaged_test_exit() (which is guaranteed to run
506 * under mmap sem read mode). Stop here (after we
507 * return all pagetables will be destroyed) until
508 * khugepaged has finished working on the pagetables
509 * under the mmap_sem.
511 down_write(&mm
->mmap_sem
);
512 up_write(&mm
->mmap_sem
);
516 static void release_pte_page(struct page
*page
)
518 dec_node_page_state(page
, NR_ISOLATED_ANON
+ page_is_file_cache(page
));
520 putback_lru_page(page
);
523 static void release_pte_pages(pte_t
*pte
, pte_t
*_pte
)
525 while (--_pte
>= pte
) {
526 pte_t pteval
= *_pte
;
527 if (!pte_none(pteval
) && !is_zero_pfn(pte_pfn(pteval
)))
528 release_pte_page(pte_page(pteval
));
532 static int __collapse_huge_page_isolate(struct vm_area_struct
*vma
,
533 unsigned long address
,
536 struct page
*page
= NULL
;
538 int none_or_zero
= 0, result
= 0, referenced
= 0;
539 bool writable
= false;
541 for (_pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
542 _pte
++, address
+= PAGE_SIZE
) {
543 pte_t pteval
= *_pte
;
544 if (pte_none(pteval
) || (pte_present(pteval
) &&
545 is_zero_pfn(pte_pfn(pteval
)))) {
546 if (!userfaultfd_armed(vma
) &&
547 ++none_or_zero
<= khugepaged_max_ptes_none
) {
550 result
= SCAN_EXCEED_NONE_PTE
;
554 if (!pte_present(pteval
)) {
555 result
= SCAN_PTE_NON_PRESENT
;
558 page
= vm_normal_page(vma
, address
, pteval
);
559 if (unlikely(!page
)) {
560 result
= SCAN_PAGE_NULL
;
564 /* TODO: teach khugepaged to collapse THP mapped with pte */
565 if (PageCompound(page
)) {
566 result
= SCAN_PAGE_COMPOUND
;
570 VM_BUG_ON_PAGE(!PageAnon(page
), page
);
573 * We can do it before isolate_lru_page because the
574 * page can't be freed from under us. NOTE: PG_lock
575 * is needed to serialize against split_huge_page
576 * when invoked from the VM.
578 if (!trylock_page(page
)) {
579 result
= SCAN_PAGE_LOCK
;
584 * cannot use mapcount: can't collapse if there's a gup pin.
585 * The page must only be referenced by the scanned process
586 * and page swap cache.
588 if (page_count(page
) != 1 + PageSwapCache(page
)) {
590 result
= SCAN_PAGE_COUNT
;
593 if (pte_write(pteval
)) {
596 if (PageSwapCache(page
) &&
597 !reuse_swap_page(page
, NULL
)) {
599 result
= SCAN_SWAP_CACHE_PAGE
;
603 * Page is not in the swap cache. It can be collapsed
609 * Isolate the page to avoid collapsing an hugepage
610 * currently in use by the VM.
612 if (isolate_lru_page(page
)) {
614 result
= SCAN_DEL_PAGE_LRU
;
617 inc_node_page_state(page
,
618 NR_ISOLATED_ANON
+ page_is_file_cache(page
));
619 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
620 VM_BUG_ON_PAGE(PageLRU(page
), page
);
622 /* There should be enough young pte to collapse the page */
623 if (pte_young(pteval
) ||
624 page_is_young(page
) || PageReferenced(page
) ||
625 mmu_notifier_test_young(vma
->vm_mm
, address
))
628 if (likely(writable
)) {
629 if (likely(referenced
)) {
630 result
= SCAN_SUCCEED
;
631 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
632 referenced
, writable
, result
);
636 result
= SCAN_PAGE_RO
;
640 release_pte_pages(pte
, _pte
);
641 trace_mm_collapse_huge_page_isolate(page
, none_or_zero
,
642 referenced
, writable
, result
);
646 static void __collapse_huge_page_copy(pte_t
*pte
, struct page
*page
,
647 struct vm_area_struct
*vma
,
648 unsigned long address
,
652 for (_pte
= pte
; _pte
< pte
+ HPAGE_PMD_NR
;
653 _pte
++, page
++, address
+= PAGE_SIZE
) {
654 pte_t pteval
= *_pte
;
655 struct page
*src_page
;
657 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
658 clear_user_highpage(page
, address
);
659 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, 1);
660 if (is_zero_pfn(pte_pfn(pteval
))) {
662 * ptl mostly unnecessary.
666 * paravirt calls inside pte_clear here are
669 pte_clear(vma
->vm_mm
, address
, _pte
);
673 src_page
= pte_page(pteval
);
674 copy_user_highpage(page
, src_page
, address
, vma
);
675 VM_BUG_ON_PAGE(page_mapcount(src_page
) != 1, src_page
);
676 release_pte_page(src_page
);
678 * ptl mostly unnecessary, but preempt has to
679 * be disabled to update the per-cpu stats
680 * inside page_remove_rmap().
684 * paravirt calls inside pte_clear here are
687 pte_clear(vma
->vm_mm
, address
, _pte
);
688 page_remove_rmap(src_page
, false);
690 free_page_and_swap_cache(src_page
);
695 static void khugepaged_alloc_sleep(void)
699 add_wait_queue(&khugepaged_wait
, &wait
);
700 freezable_schedule_timeout_interruptible(
701 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs
));
702 remove_wait_queue(&khugepaged_wait
, &wait
);
705 static int khugepaged_node_load
[MAX_NUMNODES
];
707 static bool khugepaged_scan_abort(int nid
)
712 * If node_reclaim_mode is disabled, then no extra effort is made to
713 * allocate memory locally.
715 if (!node_reclaim_mode
)
718 /* If there is a count for this node already, it must be acceptable */
719 if (khugepaged_node_load
[nid
])
722 for (i
= 0; i
< MAX_NUMNODES
; i
++) {
723 if (!khugepaged_node_load
[i
])
725 if (node_distance(nid
, i
) > node_reclaim_distance
)
731 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
732 static inline gfp_t
alloc_hugepage_khugepaged_gfpmask(void)
734 return khugepaged_defrag() ? GFP_TRANSHUGE
: GFP_TRANSHUGE_LIGHT
;
738 static int khugepaged_find_target_node(void)
740 static int last_khugepaged_target_node
= NUMA_NO_NODE
;
741 int nid
, target_node
= 0, max_value
= 0;
743 /* find first node with max normal pages hit */
744 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++)
745 if (khugepaged_node_load
[nid
] > max_value
) {
746 max_value
= khugepaged_node_load
[nid
];
750 /* do some balance if several nodes have the same hit record */
751 if (target_node
<= last_khugepaged_target_node
)
752 for (nid
= last_khugepaged_target_node
+ 1; nid
< MAX_NUMNODES
;
754 if (max_value
== khugepaged_node_load
[nid
]) {
759 last_khugepaged_target_node
= target_node
;
763 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
765 if (IS_ERR(*hpage
)) {
771 khugepaged_alloc_sleep();
781 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
783 VM_BUG_ON_PAGE(*hpage
, *hpage
);
785 *hpage
= __alloc_pages_node(node
, gfp
, HPAGE_PMD_ORDER
);
786 if (unlikely(!*hpage
)) {
787 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
788 *hpage
= ERR_PTR(-ENOMEM
);
792 prep_transhuge_page(*hpage
);
793 count_vm_event(THP_COLLAPSE_ALLOC
);
797 static int khugepaged_find_target_node(void)
802 static inline struct page
*alloc_khugepaged_hugepage(void)
806 page
= alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
809 prep_transhuge_page(page
);
813 static struct page
*khugepaged_alloc_hugepage(bool *wait
)
818 hpage
= alloc_khugepaged_hugepage();
820 count_vm_event(THP_COLLAPSE_ALLOC_FAILED
);
825 khugepaged_alloc_sleep();
827 count_vm_event(THP_COLLAPSE_ALLOC
);
828 } while (unlikely(!hpage
) && likely(khugepaged_enabled()));
833 static bool khugepaged_prealloc_page(struct page
**hpage
, bool *wait
)
836 *hpage
= khugepaged_alloc_hugepage(wait
);
838 if (unlikely(!*hpage
))
845 khugepaged_alloc_page(struct page
**hpage
, gfp_t gfp
, int node
)
854 * If mmap_sem temporarily dropped, revalidate vma
855 * before taking mmap_sem.
856 * Return 0 if succeeds, otherwise return none-zero
860 static int hugepage_vma_revalidate(struct mm_struct
*mm
, unsigned long address
,
861 struct vm_area_struct
**vmap
)
863 struct vm_area_struct
*vma
;
864 unsigned long hstart
, hend
;
866 if (unlikely(khugepaged_test_exit(mm
)))
867 return SCAN_ANY_PROCESS
;
869 *vmap
= vma
= find_vma(mm
, address
);
871 return SCAN_VMA_NULL
;
873 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
874 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
875 if (address
< hstart
|| address
+ HPAGE_PMD_SIZE
> hend
)
876 return SCAN_ADDRESS_RANGE
;
877 if (!hugepage_vma_check(vma
, vma
->vm_flags
))
878 return SCAN_VMA_CHECK
;
879 /* Anon VMA expected */
880 if (!vma
->anon_vma
|| vma
->vm_ops
)
881 return SCAN_VMA_CHECK
;
886 * Bring missing pages in from swap, to complete THP collapse.
887 * Only done if khugepaged_scan_pmd believes it is worthwhile.
889 * Called and returns without pte mapped or spinlocks held,
890 * but with mmap_sem held to protect against vma changes.
893 static bool __collapse_huge_page_swapin(struct mm_struct
*mm
,
894 struct vm_area_struct
*vma
,
895 unsigned long address
, pmd_t
*pmd
,
900 struct vm_fault vmf
= {
903 .flags
= FAULT_FLAG_ALLOW_RETRY
,
905 .pgoff
= linear_page_index(vma
, address
),
908 /* we only decide to swapin, if there is enough young ptes */
909 if (referenced
< HPAGE_PMD_NR
/2) {
910 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
913 vmf
.pte
= pte_offset_map(pmd
, address
);
914 for (; vmf
.address
< address
+ HPAGE_PMD_NR
*PAGE_SIZE
;
915 vmf
.pte
++, vmf
.address
+= PAGE_SIZE
) {
916 vmf
.orig_pte
= *vmf
.pte
;
917 if (!is_swap_pte(vmf
.orig_pte
))
920 ret
= do_swap_page(&vmf
);
922 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
923 if (ret
& VM_FAULT_RETRY
) {
924 down_read(&mm
->mmap_sem
);
925 if (hugepage_vma_revalidate(mm
, address
, &vmf
.vma
)) {
926 /* vma is no longer available, don't continue to swapin */
927 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
930 /* check if the pmd is still valid */
931 if (mm_find_pmd(mm
, address
) != pmd
) {
932 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
936 if (ret
& VM_FAULT_ERROR
) {
937 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 0);
940 /* pte is unmapped now, we need to map it */
941 vmf
.pte
= pte_offset_map(pmd
, vmf
.address
);
945 trace_mm_collapse_huge_page_swapin(mm
, swapped_in
, referenced
, 1);
949 static void collapse_huge_page(struct mm_struct
*mm
,
950 unsigned long address
,
952 int node
, int referenced
)
957 struct page
*new_page
;
958 spinlock_t
*pmd_ptl
, *pte_ptl
;
959 int isolated
= 0, result
= 0;
960 struct mem_cgroup
*memcg
;
961 struct vm_area_struct
*vma
;
962 struct mmu_notifier_range range
;
965 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
967 /* Only allocate from the target node */
968 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
971 * Before allocating the hugepage, release the mmap_sem read lock.
972 * The allocation can take potentially a long time if it involves
973 * sync compaction, and we do not need to hold the mmap_sem during
974 * that. We will recheck the vma after taking it again in write mode.
976 up_read(&mm
->mmap_sem
);
977 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
979 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
983 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
984 result
= SCAN_CGROUP_CHARGE_FAIL
;
988 down_read(&mm
->mmap_sem
);
989 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
991 mem_cgroup_cancel_charge(new_page
, memcg
, true);
992 up_read(&mm
->mmap_sem
);
996 pmd
= mm_find_pmd(mm
, address
);
998 result
= SCAN_PMD_NULL
;
999 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1000 up_read(&mm
->mmap_sem
);
1005 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1006 * If it fails, we release mmap_sem and jump out_nolock.
1007 * Continuing to collapse causes inconsistency.
1009 if (!__collapse_huge_page_swapin(mm
, vma
, address
, pmd
, referenced
)) {
1010 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1011 up_read(&mm
->mmap_sem
);
1015 up_read(&mm
->mmap_sem
);
1017 * Prevent all access to pagetables with the exception of
1018 * gup_fast later handled by the ptep_clear_flush and the VM
1019 * handled by the anon_vma lock + PG_lock.
1021 down_write(&mm
->mmap_sem
);
1022 result
= SCAN_ANY_PROCESS
;
1023 if (!mmget_still_valid(mm
))
1025 result
= hugepage_vma_revalidate(mm
, address
, &vma
);
1028 /* check if the pmd is still valid */
1029 if (mm_find_pmd(mm
, address
) != pmd
)
1032 anon_vma_lock_write(vma
->anon_vma
);
1034 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, NULL
, mm
,
1035 address
, address
+ HPAGE_PMD_SIZE
);
1036 mmu_notifier_invalidate_range_start(&range
);
1038 pte
= pte_offset_map(pmd
, address
);
1039 pte_ptl
= pte_lockptr(mm
, pmd
);
1041 pmd_ptl
= pmd_lock(mm
, pmd
); /* probably unnecessary */
1043 * After this gup_fast can't run anymore. This also removes
1044 * any huge TLB entry from the CPU so we won't allow
1045 * huge and small TLB entries for the same virtual address
1046 * to avoid the risk of CPU bugs in that area.
1048 _pmd
= pmdp_collapse_flush(vma
, address
, pmd
);
1049 spin_unlock(pmd_ptl
);
1050 mmu_notifier_invalidate_range_end(&range
);
1053 isolated
= __collapse_huge_page_isolate(vma
, address
, pte
);
1054 spin_unlock(pte_ptl
);
1056 if (unlikely(!isolated
)) {
1059 BUG_ON(!pmd_none(*pmd
));
1061 * We can only use set_pmd_at when establishing
1062 * hugepmds and never for establishing regular pmds that
1063 * points to regular pagetables. Use pmd_populate for that
1065 pmd_populate(mm
, pmd
, pmd_pgtable(_pmd
));
1066 spin_unlock(pmd_ptl
);
1067 anon_vma_unlock_write(vma
->anon_vma
);
1073 * All pages are isolated and locked so anon_vma rmap
1074 * can't run anymore.
1076 anon_vma_unlock_write(vma
->anon_vma
);
1078 __collapse_huge_page_copy(pte
, new_page
, vma
, address
, pte_ptl
);
1080 __SetPageUptodate(new_page
);
1081 pgtable
= pmd_pgtable(_pmd
);
1083 _pmd
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1084 _pmd
= maybe_pmd_mkwrite(pmd_mkdirty(_pmd
), vma
);
1087 * spin_lock() below is not the equivalent of smp_wmb(), so
1088 * this is needed to avoid the copy_huge_page writes to become
1089 * visible after the set_pmd_at() write.
1094 BUG_ON(!pmd_none(*pmd
));
1095 page_add_new_anon_rmap(new_page
, vma
, address
, true);
1096 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1097 count_memcg_events(memcg
, THP_COLLAPSE_ALLOC
, 1);
1098 lru_cache_add_active_or_unevictable(new_page
, vma
);
1099 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
1100 set_pmd_at(mm
, address
, pmd
, _pmd
);
1101 update_mmu_cache_pmd(vma
, address
, pmd
);
1102 spin_unlock(pmd_ptl
);
1106 khugepaged_pages_collapsed
++;
1107 result
= SCAN_SUCCEED
;
1109 up_write(&mm
->mmap_sem
);
1111 trace_mm_collapse_huge_page(mm
, isolated
, result
);
1114 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1118 static int khugepaged_scan_pmd(struct mm_struct
*mm
,
1119 struct vm_area_struct
*vma
,
1120 unsigned long address
,
1121 struct page
**hpage
)
1125 int ret
= 0, none_or_zero
= 0, result
= 0, referenced
= 0;
1126 struct page
*page
= NULL
;
1127 unsigned long _address
;
1129 int node
= NUMA_NO_NODE
, unmapped
= 0;
1130 bool writable
= false;
1132 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1134 pmd
= mm_find_pmd(mm
, address
);
1136 result
= SCAN_PMD_NULL
;
1140 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1141 pte
= pte_offset_map_lock(mm
, pmd
, address
, &ptl
);
1142 for (_address
= address
, _pte
= pte
; _pte
< pte
+HPAGE_PMD_NR
;
1143 _pte
++, _address
+= PAGE_SIZE
) {
1144 pte_t pteval
= *_pte
;
1145 if (is_swap_pte(pteval
)) {
1146 if (++unmapped
<= khugepaged_max_ptes_swap
) {
1149 result
= SCAN_EXCEED_SWAP_PTE
;
1153 if (pte_none(pteval
) || is_zero_pfn(pte_pfn(pteval
))) {
1154 if (!userfaultfd_armed(vma
) &&
1155 ++none_or_zero
<= khugepaged_max_ptes_none
) {
1158 result
= SCAN_EXCEED_NONE_PTE
;
1162 if (!pte_present(pteval
)) {
1163 result
= SCAN_PTE_NON_PRESENT
;
1166 if (pte_write(pteval
))
1169 page
= vm_normal_page(vma
, _address
, pteval
);
1170 if (unlikely(!page
)) {
1171 result
= SCAN_PAGE_NULL
;
1175 /* TODO: teach khugepaged to collapse THP mapped with pte */
1176 if (PageCompound(page
)) {
1177 result
= SCAN_PAGE_COMPOUND
;
1182 * Record which node the original page is from and save this
1183 * information to khugepaged_node_load[].
1184 * Khupaged will allocate hugepage from the node has the max
1187 node
= page_to_nid(page
);
1188 if (khugepaged_scan_abort(node
)) {
1189 result
= SCAN_SCAN_ABORT
;
1192 khugepaged_node_load
[node
]++;
1193 if (!PageLRU(page
)) {
1194 result
= SCAN_PAGE_LRU
;
1197 if (PageLocked(page
)) {
1198 result
= SCAN_PAGE_LOCK
;
1201 if (!PageAnon(page
)) {
1202 result
= SCAN_PAGE_ANON
;
1207 * cannot use mapcount: can't collapse if there's a gup pin.
1208 * The page must only be referenced by the scanned process
1209 * and page swap cache.
1211 if (page_count(page
) != 1 + PageSwapCache(page
)) {
1212 result
= SCAN_PAGE_COUNT
;
1215 if (pte_young(pteval
) ||
1216 page_is_young(page
) || PageReferenced(page
) ||
1217 mmu_notifier_test_young(vma
->vm_mm
, address
))
1222 result
= SCAN_SUCCEED
;
1225 result
= SCAN_LACK_REFERENCED_PAGE
;
1228 result
= SCAN_PAGE_RO
;
1231 pte_unmap_unlock(pte
, ptl
);
1233 node
= khugepaged_find_target_node();
1234 /* collapse_huge_page will return with the mmap_sem released */
1235 collapse_huge_page(mm
, address
, hpage
, node
, referenced
);
1238 trace_mm_khugepaged_scan_pmd(mm
, page
, writable
, referenced
,
1239 none_or_zero
, result
, unmapped
);
1243 static void collect_mm_slot(struct mm_slot
*mm_slot
)
1245 struct mm_struct
*mm
= mm_slot
->mm
;
1247 lockdep_assert_held(&khugepaged_mm_lock
);
1249 if (khugepaged_test_exit(mm
)) {
1251 hash_del(&mm_slot
->hash
);
1252 list_del(&mm_slot
->mm_node
);
1255 * Not strictly needed because the mm exited already.
1257 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1260 /* khugepaged_mm_lock actually not necessary for the below */
1261 free_mm_slot(mm_slot
);
1266 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1268 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1269 * khugepaged should try to collapse the page table.
1271 static int khugepaged_add_pte_mapped_thp(struct mm_struct
*mm
,
1274 struct mm_slot
*mm_slot
;
1276 VM_BUG_ON(addr
& ~HPAGE_PMD_MASK
);
1278 spin_lock(&khugepaged_mm_lock
);
1279 mm_slot
= get_mm_slot(mm
);
1280 if (likely(mm_slot
&& mm_slot
->nr_pte_mapped_thp
< MAX_PTE_MAPPED_THP
))
1281 mm_slot
->pte_mapped_thp
[mm_slot
->nr_pte_mapped_thp
++] = addr
;
1282 spin_unlock(&khugepaged_mm_lock
);
1287 * Try to collapse a pte-mapped THP for mm at address haddr.
1289 * This function checks whether all the PTEs in the PMD are pointing to the
1290 * right THP. If so, retract the page table so the THP can refault in with
1293 void collapse_pte_mapped_thp(struct mm_struct
*mm
, unsigned long addr
)
1295 unsigned long haddr
= addr
& HPAGE_PMD_MASK
;
1296 struct vm_area_struct
*vma
= find_vma(mm
, haddr
);
1297 struct page
*hpage
= NULL
;
1298 pte_t
*start_pte
, *pte
;
1304 if (!vma
|| !vma
->vm_file
||
1305 vma
->vm_start
> haddr
|| vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
)
1309 * This vm_flags may not have VM_HUGEPAGE if the page was not
1310 * collapsed by this mm. But we can still collapse if the page is
1311 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1312 * will not fail the vma for missing VM_HUGEPAGE
1314 if (!hugepage_vma_check(vma
, vma
->vm_flags
| VM_HUGEPAGE
))
1317 pmd
= mm_find_pmd(mm
, haddr
);
1321 start_pte
= pte_offset_map_lock(mm
, pmd
, haddr
, &ptl
);
1323 /* step 1: check all mapped PTEs are to the right huge page */
1324 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1325 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1328 /* empty pte, skip */
1332 /* page swapped out, abort */
1333 if (!pte_present(*pte
))
1336 page
= vm_normal_page(vma
, addr
, *pte
);
1338 if (!page
|| !PageCompound(page
))
1342 hpage
= compound_head(page
);
1344 * The mapping of the THP should not change.
1346 * Note that uprobe, debugger, or MAP_PRIVATE may
1347 * change the page table, but the new page will
1348 * not pass PageCompound() check.
1350 if (WARN_ON(hpage
->mapping
!= vma
->vm_file
->f_mapping
))
1355 * Confirm the page maps to the correct subpage.
1357 * Note that uprobe, debugger, or MAP_PRIVATE may change
1358 * the page table, but the new page will not pass
1359 * PageCompound() check.
1361 if (WARN_ON(hpage
+ i
!= page
))
1366 /* step 2: adjust rmap */
1367 for (i
= 0, addr
= haddr
, pte
= start_pte
;
1368 i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
, pte
++) {
1373 page
= vm_normal_page(vma
, addr
, *pte
);
1374 page_remove_rmap(page
, false);
1377 pte_unmap_unlock(start_pte
, ptl
);
1379 /* step 3: set proper refcount and mm_counters. */
1381 page_ref_sub(hpage
, count
);
1382 add_mm_counter(vma
->vm_mm
, mm_counter_file(hpage
), -count
);
1385 /* step 4: collapse pmd */
1386 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1387 _pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1390 pte_free(mm
, pmd_pgtable(_pmd
));
1394 pte_unmap_unlock(start_pte
, ptl
);
1397 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot
*mm_slot
)
1399 struct mm_struct
*mm
= mm_slot
->mm
;
1402 if (likely(mm_slot
->nr_pte_mapped_thp
== 0))
1405 if (!down_write_trylock(&mm
->mmap_sem
))
1408 if (unlikely(khugepaged_test_exit(mm
)))
1411 for (i
= 0; i
< mm_slot
->nr_pte_mapped_thp
; i
++)
1412 collapse_pte_mapped_thp(mm
, mm_slot
->pte_mapped_thp
[i
]);
1415 mm_slot
->nr_pte_mapped_thp
= 0;
1416 up_write(&mm
->mmap_sem
);
1420 static void retract_page_tables(struct address_space
*mapping
, pgoff_t pgoff
)
1422 struct vm_area_struct
*vma
;
1426 i_mmap_lock_write(mapping
);
1427 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
1429 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1430 * got written to. These VMAs are likely not worth investing
1431 * down_write(mmap_sem) as PMD-mapping is likely to be split
1434 * Not that vma->anon_vma check is racy: it can be set up after
1435 * the check but before we took mmap_sem by the fault path.
1436 * But page lock would prevent establishing any new ptes of the
1437 * page, so we are safe.
1439 * An alternative would be drop the check, but check that page
1440 * table is clear before calling pmdp_collapse_flush() under
1441 * ptl. It has higher chance to recover THP for the VMA, but
1442 * has higher cost too.
1446 addr
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
1447 if (addr
& ~HPAGE_PMD_MASK
)
1449 if (vma
->vm_end
< addr
+ HPAGE_PMD_SIZE
)
1451 pmd
= mm_find_pmd(vma
->vm_mm
, addr
);
1455 * We need exclusive mmap_sem to retract page table.
1457 * We use trylock due to lock inversion: we need to acquire
1458 * mmap_sem while holding page lock. Fault path does it in
1459 * reverse order. Trylock is a way to avoid deadlock.
1461 if (down_write_trylock(&vma
->vm_mm
->mmap_sem
)) {
1462 spinlock_t
*ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1463 /* assume page table is clear */
1464 _pmd
= pmdp_collapse_flush(vma
, addr
, pmd
);
1466 up_write(&vma
->vm_mm
->mmap_sem
);
1467 mm_dec_nr_ptes(vma
->vm_mm
);
1468 pte_free(vma
->vm_mm
, pmd_pgtable(_pmd
));
1470 /* Try again later */
1471 khugepaged_add_pte_mapped_thp(vma
->vm_mm
, addr
);
1474 i_mmap_unlock_write(mapping
);
1478 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1480 * Basic scheme is simple, details are more complex:
1481 * - allocate and lock a new huge page;
1482 * - scan page cache replacing old pages with the new one
1483 * + swap/gup in pages if necessary;
1485 * + keep old pages around in case rollback is required;
1486 * - if replacing succeeds:
1489 * + unlock huge page;
1490 * - if replacing failed;
1491 * + put all pages back and unfreeze them;
1492 * + restore gaps in the page cache;
1493 * + unlock and free huge page;
1495 static void collapse_file(struct mm_struct
*mm
,
1496 struct file
*file
, pgoff_t start
,
1497 struct page
**hpage
, int node
)
1499 struct address_space
*mapping
= file
->f_mapping
;
1501 struct page
*new_page
;
1502 struct mem_cgroup
*memcg
;
1503 pgoff_t index
, end
= start
+ HPAGE_PMD_NR
;
1504 LIST_HEAD(pagelist
);
1505 XA_STATE_ORDER(xas
, &mapping
->i_pages
, start
, HPAGE_PMD_ORDER
);
1506 int nr_none
= 0, result
= SCAN_SUCCEED
;
1507 bool is_shmem
= shmem_file(file
);
1509 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
) && !is_shmem
);
1510 VM_BUG_ON(start
& (HPAGE_PMD_NR
- 1));
1512 /* Only allocate from the target node */
1513 gfp
= alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE
;
1515 new_page
= khugepaged_alloc_page(hpage
, gfp
, node
);
1517 result
= SCAN_ALLOC_HUGE_PAGE_FAIL
;
1521 if (unlikely(mem_cgroup_try_charge(new_page
, mm
, gfp
, &memcg
, true))) {
1522 result
= SCAN_CGROUP_CHARGE_FAIL
;
1526 /* This will be less messy when we use multi-index entries */
1529 xas_create_range(&xas
);
1530 if (!xas_error(&xas
))
1532 xas_unlock_irq(&xas
);
1533 if (!xas_nomem(&xas
, GFP_KERNEL
)) {
1534 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1540 __SetPageLocked(new_page
);
1542 __SetPageSwapBacked(new_page
);
1543 new_page
->index
= start
;
1544 new_page
->mapping
= mapping
;
1547 * At this point the new_page is locked and not up-to-date.
1548 * It's safe to insert it into the page cache, because nobody would
1549 * be able to map it or use it in another way until we unlock it.
1552 xas_set(&xas
, start
);
1553 for (index
= start
; index
< end
; index
++) {
1554 struct page
*page
= xas_next(&xas
);
1556 VM_BUG_ON(index
!= xas
.xa_index
);
1560 * Stop if extent has been truncated or
1561 * hole-punched, and is now completely
1564 if (index
== start
) {
1565 if (!xas_next_entry(&xas
, end
- 1)) {
1566 result
= SCAN_TRUNCATED
;
1569 xas_set(&xas
, index
);
1571 if (!shmem_charge(mapping
->host
, 1)) {
1575 xas_store(&xas
, new_page
);
1580 if (xa_is_value(page
) || !PageUptodate(page
)) {
1581 xas_unlock_irq(&xas
);
1582 /* swap in or instantiate fallocated page */
1583 if (shmem_getpage(mapping
->host
, index
, &page
,
1588 } else if (trylock_page(page
)) {
1590 xas_unlock_irq(&xas
);
1592 result
= SCAN_PAGE_LOCK
;
1595 } else { /* !is_shmem */
1596 if (!page
|| xa_is_value(page
)) {
1597 xas_unlock_irq(&xas
);
1598 page_cache_sync_readahead(mapping
, &file
->f_ra
,
1601 /* drain pagevecs to help isolate_lru_page() */
1603 page
= find_lock_page(mapping
, index
);
1604 if (unlikely(page
== NULL
)) {
1608 } else if (trylock_page(page
)) {
1610 xas_unlock_irq(&xas
);
1612 result
= SCAN_PAGE_LOCK
;
1618 * The page must be locked, so we can drop the i_pages lock
1619 * without racing with truncate.
1621 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
1623 /* make sure the page is up to date */
1624 if (unlikely(!PageUptodate(page
))) {
1630 * If file was truncated then extended, or hole-punched, before
1631 * we locked the first page, then a THP might be there already.
1633 if (PageTransCompound(page
)) {
1634 result
= SCAN_PAGE_COMPOUND
;
1638 if (page_mapping(page
) != mapping
) {
1639 result
= SCAN_TRUNCATED
;
1643 if (!is_shmem
&& PageDirty(page
)) {
1645 * khugepaged only works on read-only fd, so this
1646 * page is dirty because it hasn't been flushed
1647 * since first write.
1653 if (isolate_lru_page(page
)) {
1654 result
= SCAN_DEL_PAGE_LRU
;
1658 if (page_has_private(page
) &&
1659 !try_to_release_page(page
, GFP_KERNEL
)) {
1660 result
= SCAN_PAGE_HAS_PRIVATE
;
1661 putback_lru_page(page
);
1665 if (page_mapped(page
))
1666 unmap_mapping_pages(mapping
, index
, 1, false);
1669 xas_set(&xas
, index
);
1671 VM_BUG_ON_PAGE(page
!= xas_load(&xas
), page
);
1672 VM_BUG_ON_PAGE(page_mapped(page
), page
);
1675 * The page is expected to have page_count() == 3:
1676 * - we hold a pin on it;
1677 * - one reference from page cache;
1678 * - one from isolate_lru_page;
1680 if (!page_ref_freeze(page
, 3)) {
1681 result
= SCAN_PAGE_COUNT
;
1682 xas_unlock_irq(&xas
);
1683 putback_lru_page(page
);
1688 * Add the page to the list to be able to undo the collapse if
1689 * something go wrong.
1691 list_add_tail(&page
->lru
, &pagelist
);
1693 /* Finally, replace with the new page. */
1694 xas_store(&xas
, new_page
);
1703 __inc_node_page_state(new_page
, NR_SHMEM_THPS
);
1705 __inc_node_page_state(new_page
, NR_FILE_THPS
);
1706 filemap_nr_thps_inc(mapping
);
1710 struct zone
*zone
= page_zone(new_page
);
1712 __mod_node_page_state(zone
->zone_pgdat
, NR_FILE_PAGES
, nr_none
);
1714 __mod_node_page_state(zone
->zone_pgdat
,
1719 xas_unlock_irq(&xas
);
1722 if (result
== SCAN_SUCCEED
) {
1723 struct page
*page
, *tmp
;
1726 * Replacing old pages with new one has succeeded, now we
1727 * need to copy the content and free the old pages.
1730 list_for_each_entry_safe(page
, tmp
, &pagelist
, lru
) {
1731 while (index
< page
->index
) {
1732 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1735 copy_highpage(new_page
+ (page
->index
% HPAGE_PMD_NR
),
1737 list_del(&page
->lru
);
1738 page
->mapping
= NULL
;
1739 page_ref_unfreeze(page
, 1);
1740 ClearPageActive(page
);
1741 ClearPageUnevictable(page
);
1746 while (index
< end
) {
1747 clear_highpage(new_page
+ (index
% HPAGE_PMD_NR
));
1751 SetPageUptodate(new_page
);
1752 page_ref_add(new_page
, HPAGE_PMD_NR
- 1);
1753 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1756 set_page_dirty(new_page
);
1757 lru_cache_add_anon(new_page
);
1759 lru_cache_add_file(new_page
);
1761 count_memcg_events(memcg
, THP_COLLAPSE_ALLOC
, 1);
1764 * Remove pte page tables, so we can re-fault the page as huge.
1766 retract_page_tables(mapping
, start
);
1769 khugepaged_pages_collapsed
++;
1773 /* Something went wrong: roll back page cache changes */
1775 mapping
->nrpages
-= nr_none
;
1778 shmem_uncharge(mapping
->host
, nr_none
);
1780 xas_set(&xas
, start
);
1781 xas_for_each(&xas
, page
, end
- 1) {
1782 page
= list_first_entry_or_null(&pagelist
,
1784 if (!page
|| xas
.xa_index
< page
->index
) {
1788 /* Put holes back where they were */
1789 xas_store(&xas
, NULL
);
1793 VM_BUG_ON_PAGE(page
->index
!= xas
.xa_index
, page
);
1795 /* Unfreeze the page. */
1796 list_del(&page
->lru
);
1797 page_ref_unfreeze(page
, 2);
1798 xas_store(&xas
, page
);
1800 xas_unlock_irq(&xas
);
1802 putback_lru_page(page
);
1806 xas_unlock_irq(&xas
);
1808 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1809 new_page
->mapping
= NULL
;
1812 unlock_page(new_page
);
1814 VM_BUG_ON(!list_empty(&pagelist
));
1815 /* TODO: tracepoints */
1818 static void khugepaged_scan_file(struct mm_struct
*mm
,
1819 struct file
*file
, pgoff_t start
, struct page
**hpage
)
1821 struct page
*page
= NULL
;
1822 struct address_space
*mapping
= file
->f_mapping
;
1823 XA_STATE(xas
, &mapping
->i_pages
, start
);
1825 int node
= NUMA_NO_NODE
;
1826 int result
= SCAN_SUCCEED
;
1830 memset(khugepaged_node_load
, 0, sizeof(khugepaged_node_load
));
1832 xas_for_each(&xas
, page
, start
+ HPAGE_PMD_NR
- 1) {
1833 if (xas_retry(&xas
, page
))
1836 if (xa_is_value(page
)) {
1837 if (++swap
> khugepaged_max_ptes_swap
) {
1838 result
= SCAN_EXCEED_SWAP_PTE
;
1844 if (PageTransCompound(page
)) {
1845 result
= SCAN_PAGE_COMPOUND
;
1849 node
= page_to_nid(page
);
1850 if (khugepaged_scan_abort(node
)) {
1851 result
= SCAN_SCAN_ABORT
;
1854 khugepaged_node_load
[node
]++;
1856 if (!PageLRU(page
)) {
1857 result
= SCAN_PAGE_LRU
;
1861 if (page_count(page
) !=
1862 1 + page_mapcount(page
) + page_has_private(page
)) {
1863 result
= SCAN_PAGE_COUNT
;
1868 * We probably should check if the page is referenced here, but
1869 * nobody would transfer pte_young() to PageReferenced() for us.
1870 * And rmap walk here is just too costly...
1875 if (need_resched()) {
1882 if (result
== SCAN_SUCCEED
) {
1883 if (present
< HPAGE_PMD_NR
- khugepaged_max_ptes_none
) {
1884 result
= SCAN_EXCEED_NONE_PTE
;
1886 node
= khugepaged_find_target_node();
1887 collapse_file(mm
, file
, start
, hpage
, node
);
1891 /* TODO: tracepoints */
1894 static void khugepaged_scan_file(struct mm_struct
*mm
,
1895 struct file
*file
, pgoff_t start
, struct page
**hpage
)
1900 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot
*mm_slot
)
1906 static unsigned int khugepaged_scan_mm_slot(unsigned int pages
,
1907 struct page
**hpage
)
1908 __releases(&khugepaged_mm_lock
)
1909 __acquires(&khugepaged_mm_lock
)
1911 struct mm_slot
*mm_slot
;
1912 struct mm_struct
*mm
;
1913 struct vm_area_struct
*vma
;
1917 lockdep_assert_held(&khugepaged_mm_lock
);
1919 if (khugepaged_scan
.mm_slot
)
1920 mm_slot
= khugepaged_scan
.mm_slot
;
1922 mm_slot
= list_entry(khugepaged_scan
.mm_head
.next
,
1923 struct mm_slot
, mm_node
);
1924 khugepaged_scan
.address
= 0;
1925 khugepaged_scan
.mm_slot
= mm_slot
;
1927 spin_unlock(&khugepaged_mm_lock
);
1928 khugepaged_collapse_pte_mapped_thps(mm_slot
);
1932 * Don't wait for semaphore (to avoid long wait times). Just move to
1933 * the next mm on the list.
1936 if (unlikely(!down_read_trylock(&mm
->mmap_sem
)))
1937 goto breakouterloop_mmap_sem
;
1938 if (likely(!khugepaged_test_exit(mm
)))
1939 vma
= find_vma(mm
, khugepaged_scan
.address
);
1942 for (; vma
; vma
= vma
->vm_next
) {
1943 unsigned long hstart
, hend
;
1946 if (unlikely(khugepaged_test_exit(mm
))) {
1950 if (!hugepage_vma_check(vma
, vma
->vm_flags
)) {
1955 hstart
= (vma
->vm_start
+ ~HPAGE_PMD_MASK
) & HPAGE_PMD_MASK
;
1956 hend
= vma
->vm_end
& HPAGE_PMD_MASK
;
1959 if (khugepaged_scan
.address
> hend
)
1961 if (khugepaged_scan
.address
< hstart
)
1962 khugepaged_scan
.address
= hstart
;
1963 VM_BUG_ON(khugepaged_scan
.address
& ~HPAGE_PMD_MASK
);
1965 while (khugepaged_scan
.address
< hend
) {
1968 if (unlikely(khugepaged_test_exit(mm
)))
1969 goto breakouterloop
;
1971 VM_BUG_ON(khugepaged_scan
.address
< hstart
||
1972 khugepaged_scan
.address
+ HPAGE_PMD_SIZE
>
1974 if (IS_ENABLED(CONFIG_SHMEM
) && vma
->vm_file
) {
1976 pgoff_t pgoff
= linear_page_index(vma
,
1977 khugepaged_scan
.address
);
1979 if (shmem_file(vma
->vm_file
)
1980 && !shmem_huge_enabled(vma
))
1982 file
= get_file(vma
->vm_file
);
1983 up_read(&mm
->mmap_sem
);
1985 khugepaged_scan_file(mm
, file
, pgoff
, hpage
);
1988 ret
= khugepaged_scan_pmd(mm
, vma
,
1989 khugepaged_scan
.address
,
1992 /* move to next address */
1993 khugepaged_scan
.address
+= HPAGE_PMD_SIZE
;
1994 progress
+= HPAGE_PMD_NR
;
1996 /* we released mmap_sem so break loop */
1997 goto breakouterloop_mmap_sem
;
1998 if (progress
>= pages
)
1999 goto breakouterloop
;
2003 up_read(&mm
->mmap_sem
); /* exit_mmap will destroy ptes after this */
2004 breakouterloop_mmap_sem
:
2006 spin_lock(&khugepaged_mm_lock
);
2007 VM_BUG_ON(khugepaged_scan
.mm_slot
!= mm_slot
);
2009 * Release the current mm_slot if this mm is about to die, or
2010 * if we scanned all vmas of this mm.
2012 if (khugepaged_test_exit(mm
) || !vma
) {
2014 * Make sure that if mm_users is reaching zero while
2015 * khugepaged runs here, khugepaged_exit will find
2016 * mm_slot not pointing to the exiting mm.
2018 if (mm_slot
->mm_node
.next
!= &khugepaged_scan
.mm_head
) {
2019 khugepaged_scan
.mm_slot
= list_entry(
2020 mm_slot
->mm_node
.next
,
2021 struct mm_slot
, mm_node
);
2022 khugepaged_scan
.address
= 0;
2024 khugepaged_scan
.mm_slot
= NULL
;
2025 khugepaged_full_scans
++;
2028 collect_mm_slot(mm_slot
);
2034 static int khugepaged_has_work(void)
2036 return !list_empty(&khugepaged_scan
.mm_head
) &&
2037 khugepaged_enabled();
2040 static int khugepaged_wait_event(void)
2042 return !list_empty(&khugepaged_scan
.mm_head
) ||
2043 kthread_should_stop();
2046 static void khugepaged_do_scan(void)
2048 struct page
*hpage
= NULL
;
2049 unsigned int progress
= 0, pass_through_head
= 0;
2050 unsigned int pages
= khugepaged_pages_to_scan
;
2053 barrier(); /* write khugepaged_pages_to_scan to local stack */
2055 while (progress
< pages
) {
2056 if (!khugepaged_prealloc_page(&hpage
, &wait
))
2061 if (unlikely(kthread_should_stop() || try_to_freeze()))
2064 spin_lock(&khugepaged_mm_lock
);
2065 if (!khugepaged_scan
.mm_slot
)
2066 pass_through_head
++;
2067 if (khugepaged_has_work() &&
2068 pass_through_head
< 2)
2069 progress
+= khugepaged_scan_mm_slot(pages
- progress
,
2073 spin_unlock(&khugepaged_mm_lock
);
2076 if (!IS_ERR_OR_NULL(hpage
))
2080 static bool khugepaged_should_wakeup(void)
2082 return kthread_should_stop() ||
2083 time_after_eq(jiffies
, khugepaged_sleep_expire
);
2086 static void khugepaged_wait_work(void)
2088 if (khugepaged_has_work()) {
2089 const unsigned long scan_sleep_jiffies
=
2090 msecs_to_jiffies(khugepaged_scan_sleep_millisecs
);
2092 if (!scan_sleep_jiffies
)
2095 khugepaged_sleep_expire
= jiffies
+ scan_sleep_jiffies
;
2096 wait_event_freezable_timeout(khugepaged_wait
,
2097 khugepaged_should_wakeup(),
2098 scan_sleep_jiffies
);
2102 if (khugepaged_enabled())
2103 wait_event_freezable(khugepaged_wait
, khugepaged_wait_event());
2106 static int khugepaged(void *none
)
2108 struct mm_slot
*mm_slot
;
2111 set_user_nice(current
, MAX_NICE
);
2113 while (!kthread_should_stop()) {
2114 khugepaged_do_scan();
2115 khugepaged_wait_work();
2118 spin_lock(&khugepaged_mm_lock
);
2119 mm_slot
= khugepaged_scan
.mm_slot
;
2120 khugepaged_scan
.mm_slot
= NULL
;
2122 collect_mm_slot(mm_slot
);
2123 spin_unlock(&khugepaged_mm_lock
);
2127 static void set_recommended_min_free_kbytes(void)
2131 unsigned long recommended_min
;
2133 for_each_populated_zone(zone
) {
2135 * We don't need to worry about fragmentation of
2136 * ZONE_MOVABLE since it only has movable pages.
2138 if (zone_idx(zone
) > gfp_zone(GFP_USER
))
2144 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2145 recommended_min
= pageblock_nr_pages
* nr_zones
* 2;
2148 * Make sure that on average at least two pageblocks are almost free
2149 * of another type, one for a migratetype to fall back to and a
2150 * second to avoid subsequent fallbacks of other types There are 3
2151 * MIGRATE_TYPES we care about.
2153 recommended_min
+= pageblock_nr_pages
* nr_zones
*
2154 MIGRATE_PCPTYPES
* MIGRATE_PCPTYPES
;
2156 /* don't ever allow to reserve more than 5% of the lowmem */
2157 recommended_min
= min(recommended_min
,
2158 (unsigned long) nr_free_buffer_pages() / 20);
2159 recommended_min
<<= (PAGE_SHIFT
-10);
2161 if (recommended_min
> min_free_kbytes
) {
2162 if (user_min_free_kbytes
>= 0)
2163 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2164 min_free_kbytes
, recommended_min
);
2166 min_free_kbytes
= recommended_min
;
2168 setup_per_zone_wmarks();
2171 int start_stop_khugepaged(void)
2173 static struct task_struct
*khugepaged_thread __read_mostly
;
2174 static DEFINE_MUTEX(khugepaged_mutex
);
2177 mutex_lock(&khugepaged_mutex
);
2178 if (khugepaged_enabled()) {
2179 if (!khugepaged_thread
)
2180 khugepaged_thread
= kthread_run(khugepaged
, NULL
,
2182 if (IS_ERR(khugepaged_thread
)) {
2183 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2184 err
= PTR_ERR(khugepaged_thread
);
2185 khugepaged_thread
= NULL
;
2189 if (!list_empty(&khugepaged_scan
.mm_head
))
2190 wake_up_interruptible(&khugepaged_wait
);
2192 set_recommended_min_free_kbytes();
2193 } else if (khugepaged_thread
) {
2194 kthread_stop(khugepaged_thread
);
2195 khugepaged_thread
= NULL
;
2198 mutex_unlock(&khugepaged_mutex
);