]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - mm/khugepaged.c
btrfs: handle btrfs_record_root_in_trans failure in btrfs_rename_exchange
[mirror_ubuntu-jammy-kernel.git] / mm / khugepaged.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b46e756f
KS
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6e84f315 6#include <linux/sched/mm.h>
f7ccbae4 7#include <linux/sched/coredump.h>
b46e756f
KS
8#include <linux/mmu_notifier.h>
9#include <linux/rmap.h>
10#include <linux/swap.h>
11#include <linux/mm_inline.h>
12#include <linux/kthread.h>
13#include <linux/khugepaged.h>
14#include <linux/freezer.h>
15#include <linux/mman.h>
16#include <linux/hashtable.h>
17#include <linux/userfaultfd_k.h>
18#include <linux/page_idle.h>
19#include <linux/swapops.h>
f3f0e1d2 20#include <linux/shmem_fs.h>
b46e756f
KS
21
22#include <asm/tlb.h>
23#include <asm/pgalloc.h>
24#include "internal.h"
25
26enum scan_result {
27 SCAN_FAIL,
28 SCAN_SUCCEED,
29 SCAN_PMD_NULL,
30 SCAN_EXCEED_NONE_PTE,
71a2c112
KS
31 SCAN_EXCEED_SWAP_PTE,
32 SCAN_EXCEED_SHARED_PTE,
b46e756f 33 SCAN_PTE_NON_PRESENT,
e1e267c7 34 SCAN_PTE_UFFD_WP,
b46e756f 35 SCAN_PAGE_RO,
0db501f7 36 SCAN_LACK_REFERENCED_PAGE,
b46e756f
KS
37 SCAN_PAGE_NULL,
38 SCAN_SCAN_ABORT,
39 SCAN_PAGE_COUNT,
40 SCAN_PAGE_LRU,
41 SCAN_PAGE_LOCK,
42 SCAN_PAGE_ANON,
43 SCAN_PAGE_COMPOUND,
44 SCAN_ANY_PROCESS,
45 SCAN_VMA_NULL,
46 SCAN_VMA_CHECK,
47 SCAN_ADDRESS_RANGE,
48 SCAN_SWAP_CACHE_PAGE,
49 SCAN_DEL_PAGE_LRU,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
f3f0e1d2 52 SCAN_TRUNCATED,
99cb0dbd 53 SCAN_PAGE_HAS_PRIVATE,
b46e756f
KS
54};
55
56#define CREATE_TRACE_POINTS
57#include <trace/events/huge_memory.h>
58
4aab2be0
VB
59static struct task_struct *khugepaged_thread __read_mostly;
60static DEFINE_MUTEX(khugepaged_mutex);
61
b46e756f
KS
62/* default scan 8*512 pte (or vmas) every 30 second */
63static unsigned int khugepaged_pages_to_scan __read_mostly;
64static unsigned int khugepaged_pages_collapsed;
65static unsigned int khugepaged_full_scans;
66static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67/* during fragmentation poll the hugepage allocator once every minute */
68static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69static unsigned long khugepaged_sleep_expire;
70static DEFINE_SPINLOCK(khugepaged_mm_lock);
71static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
72/*
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
75 * fault.
76 */
77static unsigned int khugepaged_max_ptes_none __read_mostly;
78static unsigned int khugepaged_max_ptes_swap __read_mostly;
71a2c112 79static unsigned int khugepaged_max_ptes_shared __read_mostly;
b46e756f
KS
80
81#define MM_SLOTS_HASH_BITS 10
82static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
83
84static struct kmem_cache *mm_slot_cache __read_mostly;
85
27e1f827
SL
86#define MAX_PTE_MAPPED_THP 8
87
b46e756f
KS
88/**
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
336e6b53
AS
93 * @nr_pte_mapped_thp: number of pte mapped THP
94 * @pte_mapped_thp: address array corresponding pte mapped THP
b46e756f
KS
95 */
96struct mm_slot {
97 struct hlist_node hash;
98 struct list_head mm_node;
99 struct mm_struct *mm;
27e1f827
SL
100
101 /* pte-mapped THP in this mm */
102 int nr_pte_mapped_thp;
103 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
b46e756f
KS
104};
105
106/**
107 * struct khugepaged_scan - cursor for scanning
108 * @mm_head: the head of the mm list to scan
109 * @mm_slot: the current mm_slot we are scanning
110 * @address: the next address inside that to be scanned
111 *
112 * There is only the one khugepaged_scan instance of this cursor structure.
113 */
114struct khugepaged_scan {
115 struct list_head mm_head;
116 struct mm_slot *mm_slot;
117 unsigned long address;
118};
119
120static struct khugepaged_scan khugepaged_scan = {
121 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
122};
123
e1465d12 124#ifdef CONFIG_SYSFS
b46e756f
KS
125static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
126 struct kobj_attribute *attr,
127 char *buf)
128{
ae7a927d 129 return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
b46e756f
KS
130}
131
132static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 const char *buf, size_t count)
135{
dfefd226 136 unsigned int msecs;
b46e756f
KS
137 int err;
138
dfefd226
AD
139 err = kstrtouint(buf, 10, &msecs);
140 if (err)
b46e756f
KS
141 return -EINVAL;
142
143 khugepaged_scan_sleep_millisecs = msecs;
144 khugepaged_sleep_expire = 0;
145 wake_up_interruptible(&khugepaged_wait);
146
147 return count;
148}
149static struct kobj_attribute scan_sleep_millisecs_attr =
150 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
151 scan_sleep_millisecs_store);
152
153static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
154 struct kobj_attribute *attr,
155 char *buf)
156{
ae7a927d 157 return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
b46e756f
KS
158}
159
160static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
161 struct kobj_attribute *attr,
162 const char *buf, size_t count)
163{
dfefd226 164 unsigned int msecs;
b46e756f
KS
165 int err;
166
dfefd226
AD
167 err = kstrtouint(buf, 10, &msecs);
168 if (err)
b46e756f
KS
169 return -EINVAL;
170
171 khugepaged_alloc_sleep_millisecs = msecs;
172 khugepaged_sleep_expire = 0;
173 wake_up_interruptible(&khugepaged_wait);
174
175 return count;
176}
177static struct kobj_attribute alloc_sleep_millisecs_attr =
178 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
179 alloc_sleep_millisecs_store);
180
181static ssize_t pages_to_scan_show(struct kobject *kobj,
182 struct kobj_attribute *attr,
183 char *buf)
184{
ae7a927d 185 return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
b46e756f
KS
186}
187static ssize_t pages_to_scan_store(struct kobject *kobj,
188 struct kobj_attribute *attr,
189 const char *buf, size_t count)
190{
dfefd226 191 unsigned int pages;
b46e756f 192 int err;
b46e756f 193
dfefd226
AD
194 err = kstrtouint(buf, 10, &pages);
195 if (err || !pages)
b46e756f
KS
196 return -EINVAL;
197
198 khugepaged_pages_to_scan = pages;
199
200 return count;
201}
202static struct kobj_attribute pages_to_scan_attr =
203 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
204 pages_to_scan_store);
205
206static ssize_t pages_collapsed_show(struct kobject *kobj,
207 struct kobj_attribute *attr,
208 char *buf)
209{
ae7a927d 210 return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
b46e756f
KS
211}
212static struct kobj_attribute pages_collapsed_attr =
213 __ATTR_RO(pages_collapsed);
214
215static ssize_t full_scans_show(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 char *buf)
218{
ae7a927d 219 return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
b46e756f
KS
220}
221static struct kobj_attribute full_scans_attr =
222 __ATTR_RO(full_scans);
223
224static ssize_t khugepaged_defrag_show(struct kobject *kobj,
225 struct kobj_attribute *attr, char *buf)
226{
227 return single_hugepage_flag_show(kobj, attr, buf,
ae7a927d 228 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
b46e756f
KS
229}
230static ssize_t khugepaged_defrag_store(struct kobject *kobj,
231 struct kobj_attribute *attr,
232 const char *buf, size_t count)
233{
234 return single_hugepage_flag_store(kobj, attr, buf, count,
235 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
236}
237static struct kobj_attribute khugepaged_defrag_attr =
238 __ATTR(defrag, 0644, khugepaged_defrag_show,
239 khugepaged_defrag_store);
240
241/*
242 * max_ptes_none controls if khugepaged should collapse hugepages over
243 * any unmapped ptes in turn potentially increasing the memory
244 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
245 * reduce the available free memory in the system as it
246 * runs. Increasing max_ptes_none will instead potentially reduce the
247 * free memory in the system during the khugepaged scan.
248 */
249static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
250 struct kobj_attribute *attr,
251 char *buf)
252{
ae7a927d 253 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
b46e756f
KS
254}
255static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
256 struct kobj_attribute *attr,
257 const char *buf, size_t count)
258{
259 int err;
260 unsigned long max_ptes_none;
261
262 err = kstrtoul(buf, 10, &max_ptes_none);
263 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 return -EINVAL;
265
266 khugepaged_max_ptes_none = max_ptes_none;
267
268 return count;
269}
270static struct kobj_attribute khugepaged_max_ptes_none_attr =
271 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
272 khugepaged_max_ptes_none_store);
273
274static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
275 struct kobj_attribute *attr,
276 char *buf)
277{
ae7a927d 278 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
b46e756f
KS
279}
280
281static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
282 struct kobj_attribute *attr,
283 const char *buf, size_t count)
284{
285 int err;
286 unsigned long max_ptes_swap;
287
288 err = kstrtoul(buf, 10, &max_ptes_swap);
289 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 return -EINVAL;
291
292 khugepaged_max_ptes_swap = max_ptes_swap;
293
294 return count;
295}
296
297static struct kobj_attribute khugepaged_max_ptes_swap_attr =
298 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
299 khugepaged_max_ptes_swap_store);
300
71a2c112 301static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
ae7a927d
JP
302 struct kobj_attribute *attr,
303 char *buf)
71a2c112 304{
ae7a927d 305 return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
71a2c112
KS
306}
307
308static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
309 struct kobj_attribute *attr,
310 const char *buf, size_t count)
311{
312 int err;
313 unsigned long max_ptes_shared;
314
315 err = kstrtoul(buf, 10, &max_ptes_shared);
316 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 return -EINVAL;
318
319 khugepaged_max_ptes_shared = max_ptes_shared;
320
321 return count;
322}
323
324static struct kobj_attribute khugepaged_max_ptes_shared_attr =
325 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
326 khugepaged_max_ptes_shared_store);
327
b46e756f
KS
328static struct attribute *khugepaged_attr[] = {
329 &khugepaged_defrag_attr.attr,
330 &khugepaged_max_ptes_none_attr.attr,
71a2c112
KS
331 &khugepaged_max_ptes_swap_attr.attr,
332 &khugepaged_max_ptes_shared_attr.attr,
b46e756f
KS
333 &pages_to_scan_attr.attr,
334 &pages_collapsed_attr.attr,
335 &full_scans_attr.attr,
336 &scan_sleep_millisecs_attr.attr,
337 &alloc_sleep_millisecs_attr.attr,
b46e756f
KS
338 NULL,
339};
340
341struct attribute_group khugepaged_attr_group = {
342 .attrs = khugepaged_attr,
343 .name = "khugepaged",
344};
e1465d12 345#endif /* CONFIG_SYSFS */
b46e756f 346
b46e756f
KS
347int hugepage_madvise(struct vm_area_struct *vma,
348 unsigned long *vm_flags, int advice)
349{
350 switch (advice) {
351 case MADV_HUGEPAGE:
352#ifdef CONFIG_S390
353 /*
354 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
355 * can't handle this properly after s390_enable_sie, so we simply
356 * ignore the madvise to prevent qemu from causing a SIGSEGV.
357 */
358 if (mm_has_pgste(vma->vm_mm))
359 return 0;
360#endif
361 *vm_flags &= ~VM_NOHUGEPAGE;
362 *vm_flags |= VM_HUGEPAGE;
363 /*
364 * If the vma become good for khugepaged to scan,
365 * register it here without waiting a page fault that
366 * may not happen any time soon.
367 */
368 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
369 khugepaged_enter_vma_merge(vma, *vm_flags))
370 return -ENOMEM;
371 break;
372 case MADV_NOHUGEPAGE:
373 *vm_flags &= ~VM_HUGEPAGE;
374 *vm_flags |= VM_NOHUGEPAGE;
375 /*
376 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377 * this vma even if we leave the mm registered in khugepaged if
378 * it got registered before VM_NOHUGEPAGE was set.
379 */
380 break;
381 }
382
383 return 0;
384}
385
386int __init khugepaged_init(void)
387{
388 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
389 sizeof(struct mm_slot),
390 __alignof__(struct mm_slot), 0, NULL);
391 if (!mm_slot_cache)
392 return -ENOMEM;
393
394 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
395 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
396 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
71a2c112 397 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
b46e756f
KS
398
399 return 0;
400}
401
402void __init khugepaged_destroy(void)
403{
404 kmem_cache_destroy(mm_slot_cache);
405}
406
407static inline struct mm_slot *alloc_mm_slot(void)
408{
409 if (!mm_slot_cache) /* initialization failed */
410 return NULL;
411 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412}
413
414static inline void free_mm_slot(struct mm_slot *mm_slot)
415{
416 kmem_cache_free(mm_slot_cache, mm_slot);
417}
418
419static struct mm_slot *get_mm_slot(struct mm_struct *mm)
420{
421 struct mm_slot *mm_slot;
422
423 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
424 if (mm == mm_slot->mm)
425 return mm_slot;
426
427 return NULL;
428}
429
430static void insert_to_mm_slots_hash(struct mm_struct *mm,
431 struct mm_slot *mm_slot)
432{
433 mm_slot->mm = mm;
434 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435}
436
437static inline int khugepaged_test_exit(struct mm_struct *mm)
438{
4d45e75a 439 return atomic_read(&mm->mm_users) == 0;
b46e756f
KS
440}
441
50f8b92f
SL
442static bool hugepage_vma_check(struct vm_area_struct *vma,
443 unsigned long vm_flags)
c2231020 444{
cd89fb06
RR
445 /* Explicitly disabled through madvise. */
446 if ((vm_flags & VM_NOHUGEPAGE) ||
c2231020
YS
447 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
448 return false;
99cb0dbd 449
cd89fb06
RR
450 /* Enabled via shmem mount options or sysfs settings. */
451 if (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) {
c2231020
YS
452 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
453 HPAGE_PMD_NR);
454 }
cd89fb06
RR
455
456 /* THP settings require madvise. */
457 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
458 return false;
459
460 /* Read-only file mappings need to be aligned for THP to work. */
461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
462 (vm_flags & VM_DENYWRITE)) {
463 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
464 HPAGE_PMD_NR);
465 }
466
c2231020
YS
467 if (!vma->anon_vma || vma->vm_ops)
468 return false;
222100ee 469 if (vma_is_temporary_stack(vma))
c2231020 470 return false;
50f8b92f 471 return !(vm_flags & VM_NO_KHUGEPAGED);
c2231020
YS
472}
473
b46e756f
KS
474int __khugepaged_enter(struct mm_struct *mm)
475{
476 struct mm_slot *mm_slot;
477 int wakeup;
478
479 mm_slot = alloc_mm_slot();
480 if (!mm_slot)
481 return -ENOMEM;
482
483 /* __khugepaged_exit() must not run from under us */
f3f99d63 484 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
b46e756f
KS
485 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
486 free_mm_slot(mm_slot);
487 return 0;
488 }
489
490 spin_lock(&khugepaged_mm_lock);
491 insert_to_mm_slots_hash(mm, mm_slot);
492 /*
493 * Insert just behind the scanning cursor, to let the area settle
494 * down a little.
495 */
496 wakeup = list_empty(&khugepaged_scan.mm_head);
497 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
498 spin_unlock(&khugepaged_mm_lock);
499
f1f10076 500 mmgrab(mm);
b46e756f
KS
501 if (wakeup)
502 wake_up_interruptible(&khugepaged_wait);
503
504 return 0;
505}
506
507int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
508 unsigned long vm_flags)
509{
510 unsigned long hstart, hend;
c2231020
YS
511
512 /*
99cb0dbd
SL
513 * khugepaged only supports read-only files for non-shmem files.
514 * khugepaged does not yet work on special mappings. And
515 * file-private shmem THP is not supported.
c2231020 516 */
50f8b92f 517 if (!hugepage_vma_check(vma, vm_flags))
b46e756f 518 return 0;
c2231020 519
b46e756f
KS
520 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
521 hend = vma->vm_end & HPAGE_PMD_MASK;
522 if (hstart < hend)
523 return khugepaged_enter(vma, vm_flags);
524 return 0;
525}
526
527void __khugepaged_exit(struct mm_struct *mm)
528{
529 struct mm_slot *mm_slot;
530 int free = 0;
531
532 spin_lock(&khugepaged_mm_lock);
533 mm_slot = get_mm_slot(mm);
534 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
535 hash_del(&mm_slot->hash);
536 list_del(&mm_slot->mm_node);
537 free = 1;
538 }
539 spin_unlock(&khugepaged_mm_lock);
540
541 if (free) {
542 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
543 free_mm_slot(mm_slot);
544 mmdrop(mm);
545 } else if (mm_slot) {
546 /*
547 * This is required to serialize against
548 * khugepaged_test_exit() (which is guaranteed to run
549 * under mmap sem read mode). Stop here (after we
550 * return all pagetables will be destroyed) until
551 * khugepaged has finished working on the pagetables
c1e8d7c6 552 * under the mmap_lock.
b46e756f 553 */
d8ed45c5
ML
554 mmap_write_lock(mm);
555 mmap_write_unlock(mm);
b46e756f
KS
556 }
557}
558
559static void release_pte_page(struct page *page)
560{
5503fbf2
KS
561 mod_node_page_state(page_pgdat(page),
562 NR_ISOLATED_ANON + page_is_file_lru(page),
563 -compound_nr(page));
b46e756f
KS
564 unlock_page(page);
565 putback_lru_page(page);
566}
567
5503fbf2
KS
568static void release_pte_pages(pte_t *pte, pte_t *_pte,
569 struct list_head *compound_pagelist)
b46e756f 570{
5503fbf2
KS
571 struct page *page, *tmp;
572
b46e756f
KS
573 while (--_pte >= pte) {
574 pte_t pteval = *_pte;
5503fbf2
KS
575
576 page = pte_page(pteval);
577 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
578 !PageCompound(page))
579 release_pte_page(page);
580 }
581
582 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
583 list_del(&page->lru);
584 release_pte_page(page);
b46e756f
KS
585 }
586}
587
9445689f
KS
588static bool is_refcount_suitable(struct page *page)
589{
590 int expected_refcount;
591
592 expected_refcount = total_mapcount(page);
593 if (PageSwapCache(page))
594 expected_refcount += compound_nr(page);
595
596 return page_count(page) == expected_refcount;
597}
598
b46e756f
KS
599static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
600 unsigned long address,
5503fbf2
KS
601 pte_t *pte,
602 struct list_head *compound_pagelist)
b46e756f
KS
603{
604 struct page *page = NULL;
605 pte_t *_pte;
71a2c112 606 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
0db501f7 607 bool writable = false;
b46e756f
KS
608
609 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
610 _pte++, address += PAGE_SIZE) {
611 pte_t pteval = *_pte;
612 if (pte_none(pteval) || (pte_present(pteval) &&
613 is_zero_pfn(pte_pfn(pteval)))) {
614 if (!userfaultfd_armed(vma) &&
615 ++none_or_zero <= khugepaged_max_ptes_none) {
616 continue;
617 } else {
618 result = SCAN_EXCEED_NONE_PTE;
619 goto out;
620 }
621 }
622 if (!pte_present(pteval)) {
623 result = SCAN_PTE_NON_PRESENT;
624 goto out;
625 }
626 page = vm_normal_page(vma, address, pteval);
627 if (unlikely(!page)) {
628 result = SCAN_PAGE_NULL;
629 goto out;
630 }
631
5503fbf2
KS
632 VM_BUG_ON_PAGE(!PageAnon(page), page);
633
71a2c112
KS
634 if (page_mapcount(page) > 1 &&
635 ++shared > khugepaged_max_ptes_shared) {
636 result = SCAN_EXCEED_SHARED_PTE;
637 goto out;
638 }
639
fece2029 640 if (PageCompound(page)) {
5503fbf2
KS
641 struct page *p;
642 page = compound_head(page);
fece2029 643
5503fbf2
KS
644 /*
645 * Check if we have dealt with the compound page
646 * already
647 */
648 list_for_each_entry(p, compound_pagelist, lru) {
649 if (page == p)
650 goto next;
651 }
652 }
b46e756f
KS
653
654 /*
655 * We can do it before isolate_lru_page because the
656 * page can't be freed from under us. NOTE: PG_lock
657 * is needed to serialize against split_huge_page
658 * when invoked from the VM.
659 */
660 if (!trylock_page(page)) {
661 result = SCAN_PAGE_LOCK;
662 goto out;
663 }
664
665 /*
9445689f
KS
666 * Check if the page has any GUP (or other external) pins.
667 *
668 * The page table that maps the page has been already unlinked
669 * from the page table tree and this process cannot get
670 * an additinal pin on the page.
671 *
672 * New pins can come later if the page is shared across fork,
673 * but not from this process. The other process cannot write to
674 * the page, only trigger CoW.
b46e756f 675 */
9445689f 676 if (!is_refcount_suitable(page)) {
b46e756f
KS
677 unlock_page(page);
678 result = SCAN_PAGE_COUNT;
679 goto out;
680 }
5503fbf2
KS
681 if (!pte_write(pteval) && PageSwapCache(page) &&
682 !reuse_swap_page(page, NULL)) {
b46e756f 683 /*
5503fbf2
KS
684 * Page is in the swap cache and cannot be re-used.
685 * It cannot be collapsed into a THP.
b46e756f 686 */
5503fbf2
KS
687 unlock_page(page);
688 result = SCAN_SWAP_CACHE_PAGE;
689 goto out;
b46e756f
KS
690 }
691
692 /*
693 * Isolate the page to avoid collapsing an hugepage
694 * currently in use by the VM.
695 */
696 if (isolate_lru_page(page)) {
697 unlock_page(page);
698 result = SCAN_DEL_PAGE_LRU;
699 goto out;
700 }
5503fbf2
KS
701 mod_node_page_state(page_pgdat(page),
702 NR_ISOLATED_ANON + page_is_file_lru(page),
703 compound_nr(page));
b46e756f
KS
704 VM_BUG_ON_PAGE(!PageLocked(page), page);
705 VM_BUG_ON_PAGE(PageLRU(page), page);
706
5503fbf2
KS
707 if (PageCompound(page))
708 list_add_tail(&page->lru, compound_pagelist);
709next:
0db501f7 710 /* There should be enough young pte to collapse the page */
b46e756f
KS
711 if (pte_young(pteval) ||
712 page_is_young(page) || PageReferenced(page) ||
713 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 714 referenced++;
5503fbf2
KS
715
716 if (pte_write(pteval))
717 writable = true;
b46e756f
KS
718 }
719 if (likely(writable)) {
720 if (likely(referenced)) {
721 result = SCAN_SUCCEED;
722 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
723 referenced, writable, result);
724 return 1;
725 }
726 } else {
727 result = SCAN_PAGE_RO;
728 }
729
730out:
5503fbf2 731 release_pte_pages(pte, _pte, compound_pagelist);
b46e756f
KS
732 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
733 referenced, writable, result);
734 return 0;
735}
736
737static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
738 struct vm_area_struct *vma,
739 unsigned long address,
5503fbf2
KS
740 spinlock_t *ptl,
741 struct list_head *compound_pagelist)
b46e756f 742{
5503fbf2 743 struct page *src_page, *tmp;
b46e756f 744 pte_t *_pte;
338a16ba
DR
745 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
746 _pte++, page++, address += PAGE_SIZE) {
b46e756f 747 pte_t pteval = *_pte;
b46e756f
KS
748
749 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
750 clear_user_highpage(page, address);
751 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
752 if (is_zero_pfn(pte_pfn(pteval))) {
753 /*
754 * ptl mostly unnecessary.
755 */
756 spin_lock(ptl);
757 /*
758 * paravirt calls inside pte_clear here are
759 * superfluous.
760 */
761 pte_clear(vma->vm_mm, address, _pte);
762 spin_unlock(ptl);
763 }
764 } else {
765 src_page = pte_page(pteval);
766 copy_user_highpage(page, src_page, address, vma);
5503fbf2
KS
767 if (!PageCompound(src_page))
768 release_pte_page(src_page);
b46e756f
KS
769 /*
770 * ptl mostly unnecessary, but preempt has to
771 * be disabled to update the per-cpu stats
772 * inside page_remove_rmap().
773 */
774 spin_lock(ptl);
775 /*
776 * paravirt calls inside pte_clear here are
777 * superfluous.
778 */
779 pte_clear(vma->vm_mm, address, _pte);
780 page_remove_rmap(src_page, false);
781 spin_unlock(ptl);
782 free_page_and_swap_cache(src_page);
783 }
b46e756f 784 }
5503fbf2
KS
785
786 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
787 list_del(&src_page->lru);
788 release_pte_page(src_page);
789 }
b46e756f
KS
790}
791
792static void khugepaged_alloc_sleep(void)
793{
794 DEFINE_WAIT(wait);
795
796 add_wait_queue(&khugepaged_wait, &wait);
797 freezable_schedule_timeout_interruptible(
798 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
799 remove_wait_queue(&khugepaged_wait, &wait);
800}
801
802static int khugepaged_node_load[MAX_NUMNODES];
803
804static bool khugepaged_scan_abort(int nid)
805{
806 int i;
807
808 /*
a5f5f91d 809 * If node_reclaim_mode is disabled, then no extra effort is made to
b46e756f
KS
810 * allocate memory locally.
811 */
a5f5f91d 812 if (!node_reclaim_mode)
b46e756f
KS
813 return false;
814
815 /* If there is a count for this node already, it must be acceptable */
816 if (khugepaged_node_load[nid])
817 return false;
818
819 for (i = 0; i < MAX_NUMNODES; i++) {
820 if (!khugepaged_node_load[i])
821 continue;
a55c7454 822 if (node_distance(nid, i) > node_reclaim_distance)
b46e756f
KS
823 return true;
824 }
825 return false;
826}
827
828/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
829static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
830{
25160354 831 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
b46e756f
KS
832}
833
834#ifdef CONFIG_NUMA
835static int khugepaged_find_target_node(void)
836{
837 static int last_khugepaged_target_node = NUMA_NO_NODE;
838 int nid, target_node = 0, max_value = 0;
839
840 /* find first node with max normal pages hit */
841 for (nid = 0; nid < MAX_NUMNODES; nid++)
842 if (khugepaged_node_load[nid] > max_value) {
843 max_value = khugepaged_node_load[nid];
844 target_node = nid;
845 }
846
847 /* do some balance if several nodes have the same hit record */
848 if (target_node <= last_khugepaged_target_node)
849 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
850 nid++)
851 if (max_value == khugepaged_node_load[nid]) {
852 target_node = nid;
853 break;
854 }
855
856 last_khugepaged_target_node = target_node;
857 return target_node;
858}
859
860static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
861{
862 if (IS_ERR(*hpage)) {
863 if (!*wait)
864 return false;
865
866 *wait = false;
867 *hpage = NULL;
868 khugepaged_alloc_sleep();
869 } else if (*hpage) {
870 put_page(*hpage);
871 *hpage = NULL;
872 }
873
874 return true;
875}
876
877static struct page *
988ddb71 878khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f
KS
879{
880 VM_BUG_ON_PAGE(*hpage, *hpage);
881
b46e756f
KS
882 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
883 if (unlikely(!*hpage)) {
884 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
885 *hpage = ERR_PTR(-ENOMEM);
886 return NULL;
887 }
888
889 prep_transhuge_page(*hpage);
890 count_vm_event(THP_COLLAPSE_ALLOC);
891 return *hpage;
892}
893#else
894static int khugepaged_find_target_node(void)
895{
896 return 0;
897}
898
899static inline struct page *alloc_khugepaged_hugepage(void)
900{
901 struct page *page;
902
903 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
904 HPAGE_PMD_ORDER);
905 if (page)
906 prep_transhuge_page(page);
907 return page;
908}
909
910static struct page *khugepaged_alloc_hugepage(bool *wait)
911{
912 struct page *hpage;
913
914 do {
915 hpage = alloc_khugepaged_hugepage();
916 if (!hpage) {
917 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
918 if (!*wait)
919 return NULL;
920
921 *wait = false;
922 khugepaged_alloc_sleep();
923 } else
924 count_vm_event(THP_COLLAPSE_ALLOC);
925 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
926
927 return hpage;
928}
929
930static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
931{
033b5d77
HD
932 /*
933 * If the hpage allocated earlier was briefly exposed in page cache
934 * before collapse_file() failed, it is possible that racing lookups
935 * have not yet completed, and would then be unpleasantly surprised by
936 * finding the hpage reused for the same mapping at a different offset.
937 * Just release the previous allocation if there is any danger of that.
938 */
939 if (*hpage && page_count(*hpage) > 1) {
940 put_page(*hpage);
941 *hpage = NULL;
942 }
943
b46e756f
KS
944 if (!*hpage)
945 *hpage = khugepaged_alloc_hugepage(wait);
946
947 if (unlikely(!*hpage))
948 return false;
949
950 return true;
951}
952
953static struct page *
988ddb71 954khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
b46e756f 955{
b46e756f
KS
956 VM_BUG_ON(!*hpage);
957
958 return *hpage;
959}
960#endif
961
b46e756f 962/*
c1e8d7c6
ML
963 * If mmap_lock temporarily dropped, revalidate vma
964 * before taking mmap_lock.
b46e756f
KS
965 * Return 0 if succeeds, otherwise return none-zero
966 * value (scan code).
967 */
968
c131f751
KS
969static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
970 struct vm_area_struct **vmap)
b46e756f
KS
971{
972 struct vm_area_struct *vma;
973 unsigned long hstart, hend;
974
975 if (unlikely(khugepaged_test_exit(mm)))
976 return SCAN_ANY_PROCESS;
977
c131f751 978 *vmap = vma = find_vma(mm, address);
b46e756f
KS
979 if (!vma)
980 return SCAN_VMA_NULL;
981
982 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
983 hend = vma->vm_end & HPAGE_PMD_MASK;
984 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
985 return SCAN_ADDRESS_RANGE;
50f8b92f 986 if (!hugepage_vma_check(vma, vma->vm_flags))
b46e756f 987 return SCAN_VMA_CHECK;
594cced1
KS
988 /* Anon VMA expected */
989 if (!vma->anon_vma || vma->vm_ops)
990 return SCAN_VMA_CHECK;
b46e756f
KS
991 return 0;
992}
993
994/*
995 * Bring missing pages in from swap, to complete THP collapse.
996 * Only done if khugepaged_scan_pmd believes it is worthwhile.
997 *
998 * Called and returns without pte mapped or spinlocks held,
c1e8d7c6 999 * but with mmap_lock held to protect against vma changes.
b46e756f
KS
1000 */
1001
1002static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1003 struct vm_area_struct *vma,
2b635dd3 1004 unsigned long haddr, pmd_t *pmd,
0db501f7 1005 int referenced)
b46e756f 1006{
2b740303
SJ
1007 int swapped_in = 0;
1008 vm_fault_t ret = 0;
2b635dd3
WD
1009 unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
1010
1011 for (address = haddr; address < end; address += PAGE_SIZE) {
1012 struct vm_fault vmf = {
1013 .vma = vma,
1014 .address = address,
1015 .pgoff = linear_page_index(vma, haddr),
1016 .flags = FAULT_FLAG_ALLOW_RETRY,
1017 .pmd = pmd,
1018 };
1019
1020 vmf.pte = pte_offset_map(pmd, address);
2994302b 1021 vmf.orig_pte = *vmf.pte;
2b635dd3
WD
1022 if (!is_swap_pte(vmf.orig_pte)) {
1023 pte_unmap(vmf.pte);
b46e756f 1024 continue;
2b635dd3 1025 }
b46e756f 1026 swapped_in++;
2994302b 1027 ret = do_swap_page(&vmf);
0db501f7 1028
c1e8d7c6 1029 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
b46e756f 1030 if (ret & VM_FAULT_RETRY) {
d8ed45c5 1031 mmap_read_lock(mm);
2b635dd3 1032 if (hugepage_vma_revalidate(mm, haddr, &vma)) {
47f863ea 1033 /* vma is no longer available, don't continue to swapin */
0db501f7 1034 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 1035 return false;
47f863ea 1036 }
b46e756f 1037 /* check if the pmd is still valid */
2b635dd3 1038 if (mm_find_pmd(mm, haddr) != pmd) {
835152a2 1039 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f 1040 return false;
835152a2 1041 }
b46e756f
KS
1042 }
1043 if (ret & VM_FAULT_ERROR) {
0db501f7 1044 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
b46e756f
KS
1045 return false;
1046 }
b46e756f 1047 }
ae2c5d80
KS
1048
1049 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1050 if (swapped_in)
1051 lru_add_drain();
1052
0db501f7 1053 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
b46e756f
KS
1054 return true;
1055}
1056
1057static void collapse_huge_page(struct mm_struct *mm,
1058 unsigned long address,
1059 struct page **hpage,
ffe945e6 1060 int node, int referenced, int unmapped)
b46e756f 1061{
5503fbf2 1062 LIST_HEAD(compound_pagelist);
b46e756f
KS
1063 pmd_t *pmd, _pmd;
1064 pte_t *pte;
1065 pgtable_t pgtable;
1066 struct page *new_page;
1067 spinlock_t *pmd_ptl, *pte_ptl;
1068 int isolated = 0, result = 0;
c131f751 1069 struct vm_area_struct *vma;
ac46d4f3 1070 struct mmu_notifier_range range;
b46e756f
KS
1071 gfp_t gfp;
1072
1073 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1074
1075 /* Only allocate from the target node */
41b6167e 1076 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
b46e756f 1077
988ddb71 1078 /*
c1e8d7c6 1079 * Before allocating the hugepage, release the mmap_lock read lock.
988ddb71 1080 * The allocation can take potentially a long time if it involves
c1e8d7c6 1081 * sync compaction, and we do not need to hold the mmap_lock during
988ddb71
KS
1082 * that. We will recheck the vma after taking it again in write mode.
1083 */
d8ed45c5 1084 mmap_read_unlock(mm);
988ddb71 1085 new_page = khugepaged_alloc_page(hpage, gfp, node);
b46e756f
KS
1086 if (!new_page) {
1087 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1088 goto out_nolock;
1089 }
1090
d9eb1ea2 1091 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
b46e756f
KS
1092 result = SCAN_CGROUP_CHARGE_FAIL;
1093 goto out_nolock;
1094 }
9d82c694 1095 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
b46e756f 1096
d8ed45c5 1097 mmap_read_lock(mm);
c131f751 1098 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f 1099 if (result) {
d8ed45c5 1100 mmap_read_unlock(mm);
b46e756f
KS
1101 goto out_nolock;
1102 }
1103
1104 pmd = mm_find_pmd(mm, address);
1105 if (!pmd) {
1106 result = SCAN_PMD_NULL;
d8ed45c5 1107 mmap_read_unlock(mm);
b46e756f
KS
1108 goto out_nolock;
1109 }
1110
1111 /*
c1e8d7c6
ML
1112 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1113 * If it fails, we release mmap_lock and jump out_nolock.
b46e756f
KS
1114 * Continuing to collapse causes inconsistency.
1115 */
ffe945e6
KS
1116 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1117 pmd, referenced)) {
d8ed45c5 1118 mmap_read_unlock(mm);
b46e756f
KS
1119 goto out_nolock;
1120 }
1121
d8ed45c5 1122 mmap_read_unlock(mm);
b46e756f
KS
1123 /*
1124 * Prevent all access to pagetables with the exception of
1125 * gup_fast later handled by the ptep_clear_flush and the VM
1126 * handled by the anon_vma lock + PG_lock.
1127 */
d8ed45c5 1128 mmap_write_lock(mm);
c131f751 1129 result = hugepage_vma_revalidate(mm, address, &vma);
b46e756f
KS
1130 if (result)
1131 goto out;
1132 /* check if the pmd is still valid */
1133 if (mm_find_pmd(mm, address) != pmd)
1134 goto out;
1135
1136 anon_vma_lock_write(vma->anon_vma);
1137
7269f999 1138 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
6f4f13e8 1139 address, address + HPAGE_PMD_SIZE);
ac46d4f3 1140 mmu_notifier_invalidate_range_start(&range);
ec649c9d
VS
1141
1142 pte = pte_offset_map(pmd, address);
1143 pte_ptl = pte_lockptr(mm, pmd);
1144
b46e756f
KS
1145 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1146 /*
1147 * After this gup_fast can't run anymore. This also removes
1148 * any huge TLB entry from the CPU so we won't allow
1149 * huge and small TLB entries for the same virtual address
1150 * to avoid the risk of CPU bugs in that area.
1151 */
1152 _pmd = pmdp_collapse_flush(vma, address, pmd);
1153 spin_unlock(pmd_ptl);
ac46d4f3 1154 mmu_notifier_invalidate_range_end(&range);
b46e756f
KS
1155
1156 spin_lock(pte_ptl);
5503fbf2
KS
1157 isolated = __collapse_huge_page_isolate(vma, address, pte,
1158 &compound_pagelist);
b46e756f
KS
1159 spin_unlock(pte_ptl);
1160
1161 if (unlikely(!isolated)) {
1162 pte_unmap(pte);
1163 spin_lock(pmd_ptl);
1164 BUG_ON(!pmd_none(*pmd));
1165 /*
1166 * We can only use set_pmd_at when establishing
1167 * hugepmds and never for establishing regular pmds that
1168 * points to regular pagetables. Use pmd_populate for that
1169 */
1170 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1171 spin_unlock(pmd_ptl);
1172 anon_vma_unlock_write(vma->anon_vma);
1173 result = SCAN_FAIL;
1174 goto out;
1175 }
1176
1177 /*
1178 * All pages are isolated and locked so anon_vma rmap
1179 * can't run anymore.
1180 */
1181 anon_vma_unlock_write(vma->anon_vma);
1182
5503fbf2
KS
1183 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1184 &compound_pagelist);
b46e756f
KS
1185 pte_unmap(pte);
1186 __SetPageUptodate(new_page);
1187 pgtable = pmd_pgtable(_pmd);
1188
1189 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014 1190 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
b46e756f
KS
1191
1192 /*
1193 * spin_lock() below is not the equivalent of smp_wmb(), so
1194 * this is needed to avoid the copy_huge_page writes to become
1195 * visible after the set_pmd_at() write.
1196 */
1197 smp_wmb();
1198
1199 spin_lock(pmd_ptl);
1200 BUG_ON(!pmd_none(*pmd));
be5d0a74 1201 page_add_new_anon_rmap(new_page, vma, address, true);
b518154e 1202 lru_cache_add_inactive_or_unevictable(new_page, vma);
b46e756f
KS
1203 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1204 set_pmd_at(mm, address, pmd, _pmd);
1205 update_mmu_cache_pmd(vma, address, pmd);
1206 spin_unlock(pmd_ptl);
1207
1208 *hpage = NULL;
1209
1210 khugepaged_pages_collapsed++;
1211 result = SCAN_SUCCEED;
1212out_up_write:
d8ed45c5 1213 mmap_write_unlock(mm);
b46e756f 1214out_nolock:
9d82c694
JW
1215 if (!IS_ERR_OR_NULL(*hpage))
1216 mem_cgroup_uncharge(*hpage);
b46e756f
KS
1217 trace_mm_collapse_huge_page(mm, isolated, result);
1218 return;
1219out:
b46e756f
KS
1220 goto out_up_write;
1221}
1222
1223static int khugepaged_scan_pmd(struct mm_struct *mm,
1224 struct vm_area_struct *vma,
1225 unsigned long address,
1226 struct page **hpage)
1227{
1228 pmd_t *pmd;
1229 pte_t *pte, *_pte;
71a2c112
KS
1230 int ret = 0, result = 0, referenced = 0;
1231 int none_or_zero = 0, shared = 0;
b46e756f
KS
1232 struct page *page = NULL;
1233 unsigned long _address;
1234 spinlock_t *ptl;
1235 int node = NUMA_NO_NODE, unmapped = 0;
0db501f7 1236 bool writable = false;
b46e756f
KS
1237
1238 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1239
1240 pmd = mm_find_pmd(mm, address);
1241 if (!pmd) {
1242 result = SCAN_PMD_NULL;
1243 goto out;
1244 }
1245
1246 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1247 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1248 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1249 _pte++, _address += PAGE_SIZE) {
1250 pte_t pteval = *_pte;
1251 if (is_swap_pte(pteval)) {
1252 if (++unmapped <= khugepaged_max_ptes_swap) {
e1e267c7
PX
1253 /*
1254 * Always be strict with uffd-wp
1255 * enabled swap entries. Please see
1256 * comment below for pte_uffd_wp().
1257 */
1258 if (pte_swp_uffd_wp(pteval)) {
1259 result = SCAN_PTE_UFFD_WP;
1260 goto out_unmap;
1261 }
b46e756f
KS
1262 continue;
1263 } else {
1264 result = SCAN_EXCEED_SWAP_PTE;
1265 goto out_unmap;
1266 }
1267 }
1268 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1269 if (!userfaultfd_armed(vma) &&
1270 ++none_or_zero <= khugepaged_max_ptes_none) {
1271 continue;
1272 } else {
1273 result = SCAN_EXCEED_NONE_PTE;
1274 goto out_unmap;
1275 }
1276 }
1277 if (!pte_present(pteval)) {
1278 result = SCAN_PTE_NON_PRESENT;
1279 goto out_unmap;
1280 }
e1e267c7
PX
1281 if (pte_uffd_wp(pteval)) {
1282 /*
1283 * Don't collapse the page if any of the small
1284 * PTEs are armed with uffd write protection.
1285 * Here we can also mark the new huge pmd as
1286 * write protected if any of the small ones is
8958b249 1287 * marked but that could bring unknown
e1e267c7
PX
1288 * userfault messages that falls outside of
1289 * the registered range. So, just be simple.
1290 */
1291 result = SCAN_PTE_UFFD_WP;
1292 goto out_unmap;
1293 }
b46e756f
KS
1294 if (pte_write(pteval))
1295 writable = true;
1296
1297 page = vm_normal_page(vma, _address, pteval);
1298 if (unlikely(!page)) {
1299 result = SCAN_PAGE_NULL;
1300 goto out_unmap;
1301 }
1302
71a2c112
KS
1303 if (page_mapcount(page) > 1 &&
1304 ++shared > khugepaged_max_ptes_shared) {
1305 result = SCAN_EXCEED_SHARED_PTE;
1306 goto out_unmap;
1307 }
1308
5503fbf2 1309 page = compound_head(page);
b46e756f
KS
1310
1311 /*
1312 * Record which node the original page is from and save this
1313 * information to khugepaged_node_load[].
1314 * Khupaged will allocate hugepage from the node has the max
1315 * hit record.
1316 */
1317 node = page_to_nid(page);
1318 if (khugepaged_scan_abort(node)) {
1319 result = SCAN_SCAN_ABORT;
1320 goto out_unmap;
1321 }
1322 khugepaged_node_load[node]++;
1323 if (!PageLRU(page)) {
1324 result = SCAN_PAGE_LRU;
1325 goto out_unmap;
1326 }
1327 if (PageLocked(page)) {
1328 result = SCAN_PAGE_LOCK;
1329 goto out_unmap;
1330 }
1331 if (!PageAnon(page)) {
1332 result = SCAN_PAGE_ANON;
1333 goto out_unmap;
1334 }
1335
1336 /*
9445689f
KS
1337 * Check if the page has any GUP (or other external) pins.
1338 *
1339 * Here the check is racy it may see totmal_mapcount > refcount
1340 * in some cases.
1341 * For example, one process with one forked child process.
1342 * The parent has the PMD split due to MADV_DONTNEED, then
1343 * the child is trying unmap the whole PMD, but khugepaged
1344 * may be scanning the parent between the child has
1345 * PageDoubleMap flag cleared and dec the mapcount. So
1346 * khugepaged may see total_mapcount > refcount.
1347 *
1348 * But such case is ephemeral we could always retry collapse
1349 * later. However it may report false positive if the page
1350 * has excessive GUP pins (i.e. 512). Anyway the same check
1351 * will be done again later the risk seems low.
b46e756f 1352 */
9445689f 1353 if (!is_refcount_suitable(page)) {
b46e756f
KS
1354 result = SCAN_PAGE_COUNT;
1355 goto out_unmap;
1356 }
1357 if (pte_young(pteval) ||
1358 page_is_young(page) || PageReferenced(page) ||
1359 mmu_notifier_test_young(vma->vm_mm, address))
0db501f7 1360 referenced++;
b46e756f 1361 }
ffe945e6 1362 if (!writable) {
b46e756f 1363 result = SCAN_PAGE_RO;
ffe945e6
KS
1364 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1365 result = SCAN_LACK_REFERENCED_PAGE;
1366 } else {
1367 result = SCAN_SUCCEED;
1368 ret = 1;
b46e756f
KS
1369 }
1370out_unmap:
1371 pte_unmap_unlock(pte, ptl);
1372 if (ret) {
1373 node = khugepaged_find_target_node();
c1e8d7c6 1374 /* collapse_huge_page will return with the mmap_lock released */
ffe945e6
KS
1375 collapse_huge_page(mm, address, hpage, node,
1376 referenced, unmapped);
b46e756f
KS
1377 }
1378out:
1379 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1380 none_or_zero, result, unmapped);
1381 return ret;
1382}
1383
1384static void collect_mm_slot(struct mm_slot *mm_slot)
1385{
1386 struct mm_struct *mm = mm_slot->mm;
1387
35f3aa39 1388 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
1389
1390 if (khugepaged_test_exit(mm)) {
1391 /* free mm_slot */
1392 hash_del(&mm_slot->hash);
1393 list_del(&mm_slot->mm_node);
1394
1395 /*
1396 * Not strictly needed because the mm exited already.
1397 *
1398 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1399 */
1400
1401 /* khugepaged_mm_lock actually not necessary for the below */
1402 free_mm_slot(mm_slot);
1403 mmdrop(mm);
1404 }
1405}
1406
396bcc52 1407#ifdef CONFIG_SHMEM
27e1f827
SL
1408/*
1409 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1410 * khugepaged should try to collapse the page table.
1411 */
1412static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1413 unsigned long addr)
1414{
1415 struct mm_slot *mm_slot;
1416
1417 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1418
1419 spin_lock(&khugepaged_mm_lock);
1420 mm_slot = get_mm_slot(mm);
1421 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1422 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1423 spin_unlock(&khugepaged_mm_lock);
1424 return 0;
1425}
1426
1427/**
336e6b53
AS
1428 * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1429 * address haddr.
1430 *
1431 * @mm: process address space where collapse happens
1432 * @addr: THP collapse address
27e1f827
SL
1433 *
1434 * This function checks whether all the PTEs in the PMD are pointing to the
1435 * right THP. If so, retract the page table so the THP can refault in with
1436 * as pmd-mapped.
1437 */
1438void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1439{
1440 unsigned long haddr = addr & HPAGE_PMD_MASK;
1441 struct vm_area_struct *vma = find_vma(mm, haddr);
119a5fc1 1442 struct page *hpage;
27e1f827
SL
1443 pte_t *start_pte, *pte;
1444 pmd_t *pmd, _pmd;
1445 spinlock_t *ptl;
1446 int count = 0;
1447 int i;
1448
1449 if (!vma || !vma->vm_file ||
1450 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1451 return;
1452
1453 /*
1454 * This vm_flags may not have VM_HUGEPAGE if the page was not
1455 * collapsed by this mm. But we can still collapse if the page is
1456 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1457 * will not fail the vma for missing VM_HUGEPAGE
1458 */
1459 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1460 return;
1461
119a5fc1
HD
1462 hpage = find_lock_page(vma->vm_file->f_mapping,
1463 linear_page_index(vma, haddr));
1464 if (!hpage)
1465 return;
1466
1467 if (!PageHead(hpage))
1468 goto drop_hpage;
1469
27e1f827
SL
1470 pmd = mm_find_pmd(mm, haddr);
1471 if (!pmd)
119a5fc1 1472 goto drop_hpage;
27e1f827
SL
1473
1474 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1475
1476 /* step 1: check all mapped PTEs are to the right huge page */
1477 for (i = 0, addr = haddr, pte = start_pte;
1478 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1479 struct page *page;
1480
1481 /* empty pte, skip */
1482 if (pte_none(*pte))
1483 continue;
1484
1485 /* page swapped out, abort */
1486 if (!pte_present(*pte))
1487 goto abort;
1488
1489 page = vm_normal_page(vma, addr, *pte);
1490
27e1f827 1491 /*
119a5fc1
HD
1492 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1493 * page table, but the new page will not be a subpage of hpage.
27e1f827 1494 */
119a5fc1 1495 if (hpage + i != page)
27e1f827
SL
1496 goto abort;
1497 count++;
1498 }
1499
1500 /* step 2: adjust rmap */
1501 for (i = 0, addr = haddr, pte = start_pte;
1502 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1503 struct page *page;
1504
1505 if (pte_none(*pte))
1506 continue;
1507 page = vm_normal_page(vma, addr, *pte);
1508 page_remove_rmap(page, false);
1509 }
1510
1511 pte_unmap_unlock(start_pte, ptl);
1512
1513 /* step 3: set proper refcount and mm_counters. */
119a5fc1 1514 if (count) {
27e1f827
SL
1515 page_ref_sub(hpage, count);
1516 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1517 }
1518
1519 /* step 4: collapse pmd */
1520 ptl = pmd_lock(vma->vm_mm, pmd);
723a80da 1521 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
27e1f827
SL
1522 spin_unlock(ptl);
1523 mm_dec_nr_ptes(mm);
1524 pte_free(mm, pmd_pgtable(_pmd));
119a5fc1
HD
1525
1526drop_hpage:
1527 unlock_page(hpage);
1528 put_page(hpage);
27e1f827
SL
1529 return;
1530
1531abort:
1532 pte_unmap_unlock(start_pte, ptl);
119a5fc1 1533 goto drop_hpage;
27e1f827
SL
1534}
1535
1536static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1537{
1538 struct mm_struct *mm = mm_slot->mm;
1539 int i;
1540
1541 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1542 return 0;
1543
d8ed45c5 1544 if (!mmap_write_trylock(mm))
27e1f827
SL
1545 return -EBUSY;
1546
1547 if (unlikely(khugepaged_test_exit(mm)))
1548 goto out;
1549
1550 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1551 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1552
1553out:
1554 mm_slot->nr_pte_mapped_thp = 0;
d8ed45c5 1555 mmap_write_unlock(mm);
27e1f827
SL
1556 return 0;
1557}
1558
f3f0e1d2
KS
1559static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1560{
1561 struct vm_area_struct *vma;
18e77600 1562 struct mm_struct *mm;
f3f0e1d2
KS
1563 unsigned long addr;
1564 pmd_t *pmd, _pmd;
1565
1566 i_mmap_lock_write(mapping);
1567 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
27e1f827
SL
1568 /*
1569 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1570 * got written to. These VMAs are likely not worth investing
3e4e28c5 1571 * mmap_write_lock(mm) as PMD-mapping is likely to be split
27e1f827
SL
1572 * later.
1573 *
1574 * Not that vma->anon_vma check is racy: it can be set up after
c1e8d7c6 1575 * the check but before we took mmap_lock by the fault path.
27e1f827
SL
1576 * But page lock would prevent establishing any new ptes of the
1577 * page, so we are safe.
1578 *
1579 * An alternative would be drop the check, but check that page
1580 * table is clear before calling pmdp_collapse_flush() under
1581 * ptl. It has higher chance to recover THP for the VMA, but
1582 * has higher cost too.
1583 */
f3f0e1d2
KS
1584 if (vma->anon_vma)
1585 continue;
1586 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1587 if (addr & ~HPAGE_PMD_MASK)
1588 continue;
1589 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1590 continue;
18e77600
HD
1591 mm = vma->vm_mm;
1592 pmd = mm_find_pmd(mm, addr);
f3f0e1d2
KS
1593 if (!pmd)
1594 continue;
1595 /*
c1e8d7c6 1596 * We need exclusive mmap_lock to retract page table.
27e1f827
SL
1597 *
1598 * We use trylock due to lock inversion: we need to acquire
c1e8d7c6 1599 * mmap_lock while holding page lock. Fault path does it in
27e1f827 1600 * reverse order. Trylock is a way to avoid deadlock.
f3f0e1d2 1601 */
18e77600
HD
1602 if (mmap_write_trylock(mm)) {
1603 if (!khugepaged_test_exit(mm)) {
1604 spinlock_t *ptl = pmd_lock(mm, pmd);
1605 /* assume page table is clear */
1606 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1607 spin_unlock(ptl);
1608 mm_dec_nr_ptes(mm);
1609 pte_free(mm, pmd_pgtable(_pmd));
1610 }
1611 mmap_write_unlock(mm);
27e1f827
SL
1612 } else {
1613 /* Try again later */
18e77600 1614 khugepaged_add_pte_mapped_thp(mm, addr);
f3f0e1d2
KS
1615 }
1616 }
1617 i_mmap_unlock_write(mapping);
1618}
1619
1620/**
99cb0dbd 1621 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
f3f0e1d2 1622 *
336e6b53
AS
1623 * @mm: process address space where collapse happens
1624 * @file: file that collapse on
1625 * @start: collapse start address
1626 * @hpage: new allocated huge page for collapse
1627 * @node: appointed node the new huge page allocate from
1628 *
f3f0e1d2 1629 * Basic scheme is simple, details are more complex:
87c460a0 1630 * - allocate and lock a new huge page;
77da9389 1631 * - scan page cache replacing old pages with the new one
99cb0dbd 1632 * + swap/gup in pages if necessary;
f3f0e1d2 1633 * + fill in gaps;
77da9389
MW
1634 * + keep old pages around in case rollback is required;
1635 * - if replacing succeeds:
f3f0e1d2
KS
1636 * + copy data over;
1637 * + free old pages;
87c460a0 1638 * + unlock huge page;
f3f0e1d2
KS
1639 * - if replacing failed;
1640 * + put all pages back and unfreeze them;
77da9389 1641 * + restore gaps in the page cache;
87c460a0 1642 * + unlock and free huge page;
f3f0e1d2 1643 */
579c571e
SL
1644static void collapse_file(struct mm_struct *mm,
1645 struct file *file, pgoff_t start,
f3f0e1d2
KS
1646 struct page **hpage, int node)
1647{
579c571e 1648 struct address_space *mapping = file->f_mapping;
f3f0e1d2 1649 gfp_t gfp;
77da9389 1650 struct page *new_page;
f3f0e1d2
KS
1651 pgoff_t index, end = start + HPAGE_PMD_NR;
1652 LIST_HEAD(pagelist);
77da9389 1653 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
f3f0e1d2 1654 int nr_none = 0, result = SCAN_SUCCEED;
99cb0dbd 1655 bool is_shmem = shmem_file(file);
bf9ecead 1656 int nr;
f3f0e1d2 1657
99cb0dbd 1658 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
f3f0e1d2
KS
1659 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1660
1661 /* Only allocate from the target node */
41b6167e 1662 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
f3f0e1d2
KS
1663
1664 new_page = khugepaged_alloc_page(hpage, gfp, node);
1665 if (!new_page) {
1666 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1667 goto out;
1668 }
1669
d9eb1ea2 1670 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
f3f0e1d2
KS
1671 result = SCAN_CGROUP_CHARGE_FAIL;
1672 goto out;
1673 }
9d82c694 1674 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
f3f0e1d2 1675
95feeabb
HD
1676 /* This will be less messy when we use multi-index entries */
1677 do {
1678 xas_lock_irq(&xas);
1679 xas_create_range(&xas);
1680 if (!xas_error(&xas))
1681 break;
1682 xas_unlock_irq(&xas);
1683 if (!xas_nomem(&xas, GFP_KERNEL)) {
95feeabb
HD
1684 result = SCAN_FAIL;
1685 goto out;
1686 }
1687 } while (1);
1688
042a3082 1689 __SetPageLocked(new_page);
99cb0dbd
SL
1690 if (is_shmem)
1691 __SetPageSwapBacked(new_page);
f3f0e1d2
KS
1692 new_page->index = start;
1693 new_page->mapping = mapping;
f3f0e1d2 1694
f3f0e1d2 1695 /*
87c460a0
HD
1696 * At this point the new_page is locked and not up-to-date.
1697 * It's safe to insert it into the page cache, because nobody would
1698 * be able to map it or use it in another way until we unlock it.
f3f0e1d2
KS
1699 */
1700
77da9389
MW
1701 xas_set(&xas, start);
1702 for (index = start; index < end; index++) {
1703 struct page *page = xas_next(&xas);
1704
1705 VM_BUG_ON(index != xas.xa_index);
99cb0dbd
SL
1706 if (is_shmem) {
1707 if (!page) {
1708 /*
1709 * Stop if extent has been truncated or
1710 * hole-punched, and is now completely
1711 * empty.
1712 */
1713 if (index == start) {
1714 if (!xas_next_entry(&xas, end - 1)) {
1715 result = SCAN_TRUNCATED;
1716 goto xa_locked;
1717 }
1718 xas_set(&xas, index);
1719 }
1720 if (!shmem_charge(mapping->host, 1)) {
1721 result = SCAN_FAIL;
042a3082 1722 goto xa_locked;
701270fa 1723 }
99cb0dbd
SL
1724 xas_store(&xas, new_page);
1725 nr_none++;
1726 continue;
701270fa 1727 }
99cb0dbd
SL
1728
1729 if (xa_is_value(page) || !PageUptodate(page)) {
1730 xas_unlock_irq(&xas);
1731 /* swap in or instantiate fallocated page */
1732 if (shmem_getpage(mapping->host, index, &page,
1733 SGP_NOHUGE)) {
1734 result = SCAN_FAIL;
1735 goto xa_unlocked;
1736 }
1737 } else if (trylock_page(page)) {
1738 get_page(page);
1739 xas_unlock_irq(&xas);
1740 } else {
1741 result = SCAN_PAGE_LOCK;
042a3082 1742 goto xa_locked;
77da9389 1743 }
99cb0dbd
SL
1744 } else { /* !is_shmem */
1745 if (!page || xa_is_value(page)) {
1746 xas_unlock_irq(&xas);
1747 page_cache_sync_readahead(mapping, &file->f_ra,
1748 file, index,
e5a59d30 1749 end - index);
99cb0dbd
SL
1750 /* drain pagevecs to help isolate_lru_page() */
1751 lru_add_drain();
1752 page = find_lock_page(mapping, index);
1753 if (unlikely(page == NULL)) {
1754 result = SCAN_FAIL;
1755 goto xa_unlocked;
1756 }
75f36069
SL
1757 } else if (PageDirty(page)) {
1758 /*
1759 * khugepaged only works on read-only fd,
1760 * so this page is dirty because it hasn't
1761 * been flushed since first write. There
1762 * won't be new dirty pages.
1763 *
1764 * Trigger async flush here and hope the
1765 * writeback is done when khugepaged
1766 * revisits this page.
1767 *
1768 * This is a one-off situation. We are not
1769 * forcing writeback in loop.
1770 */
1771 xas_unlock_irq(&xas);
1772 filemap_flush(mapping);
1773 result = SCAN_FAIL;
1774 goto xa_unlocked;
99cb0dbd
SL
1775 } else if (trylock_page(page)) {
1776 get_page(page);
1777 xas_unlock_irq(&xas);
1778 } else {
1779 result = SCAN_PAGE_LOCK;
1780 goto xa_locked;
f3f0e1d2 1781 }
f3f0e1d2
KS
1782 }
1783
1784 /*
b93b0163 1785 * The page must be locked, so we can drop the i_pages lock
f3f0e1d2
KS
1786 * without racing with truncate.
1787 */
1788 VM_BUG_ON_PAGE(!PageLocked(page), page);
4655e5e5
SL
1789
1790 /* make sure the page is up to date */
1791 if (unlikely(!PageUptodate(page))) {
1792 result = SCAN_FAIL;
1793 goto out_unlock;
1794 }
06a5e126
HD
1795
1796 /*
1797 * If file was truncated then extended, or hole-punched, before
1798 * we locked the first page, then a THP might be there already.
1799 */
1800 if (PageTransCompound(page)) {
1801 result = SCAN_PAGE_COMPOUND;
1802 goto out_unlock;
1803 }
f3f0e1d2
KS
1804
1805 if (page_mapping(page) != mapping) {
1806 result = SCAN_TRUNCATED;
1807 goto out_unlock;
1808 }
f3f0e1d2 1809
4655e5e5
SL
1810 if (!is_shmem && PageDirty(page)) {
1811 /*
1812 * khugepaged only works on read-only fd, so this
1813 * page is dirty because it hasn't been flushed
1814 * since first write.
1815 */
1816 result = SCAN_FAIL;
1817 goto out_unlock;
1818 }
1819
f3f0e1d2
KS
1820 if (isolate_lru_page(page)) {
1821 result = SCAN_DEL_PAGE_LRU;
042a3082 1822 goto out_unlock;
f3f0e1d2
KS
1823 }
1824
99cb0dbd
SL
1825 if (page_has_private(page) &&
1826 !try_to_release_page(page, GFP_KERNEL)) {
1827 result = SCAN_PAGE_HAS_PRIVATE;
2f33a706 1828 putback_lru_page(page);
99cb0dbd
SL
1829 goto out_unlock;
1830 }
1831
f3f0e1d2 1832 if (page_mapped(page))
977fbdcd 1833 unmap_mapping_pages(mapping, index, 1, false);
f3f0e1d2 1834
77da9389
MW
1835 xas_lock_irq(&xas);
1836 xas_set(&xas, index);
f3f0e1d2 1837
77da9389 1838 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
f3f0e1d2
KS
1839 VM_BUG_ON_PAGE(page_mapped(page), page);
1840
1841 /*
1842 * The page is expected to have page_count() == 3:
1843 * - we hold a pin on it;
77da9389 1844 * - one reference from page cache;
f3f0e1d2
KS
1845 * - one from isolate_lru_page;
1846 */
1847 if (!page_ref_freeze(page, 3)) {
1848 result = SCAN_PAGE_COUNT;
042a3082
HD
1849 xas_unlock_irq(&xas);
1850 putback_lru_page(page);
1851 goto out_unlock;
f3f0e1d2
KS
1852 }
1853
1854 /*
1855 * Add the page to the list to be able to undo the collapse if
1856 * something go wrong.
1857 */
1858 list_add_tail(&page->lru, &pagelist);
1859
1860 /* Finally, replace with the new page. */
4101196b 1861 xas_store(&xas, new_page);
f3f0e1d2 1862 continue;
f3f0e1d2
KS
1863out_unlock:
1864 unlock_page(page);
1865 put_page(page);
042a3082 1866 goto xa_unlocked;
f3f0e1d2 1867 }
bf9ecead 1868 nr = thp_nr_pages(new_page);
f3f0e1d2 1869
99cb0dbd 1870 if (is_shmem)
57b2847d 1871 __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
09d91cda 1872 else {
bf9ecead 1873 __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
09d91cda
SL
1874 filemap_nr_thps_inc(mapping);
1875 }
99cb0dbd 1876
042a3082 1877 if (nr_none) {
9d82c694 1878 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
99cb0dbd 1879 if (is_shmem)
9d82c694 1880 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
042a3082
HD
1881 }
1882
1883xa_locked:
1884 xas_unlock_irq(&xas);
77da9389 1885xa_unlocked:
042a3082 1886
f3f0e1d2 1887 if (result == SCAN_SUCCEED) {
77da9389 1888 struct page *page, *tmp;
f3f0e1d2
KS
1889
1890 /*
77da9389
MW
1891 * Replacing old pages with new one has succeeded, now we
1892 * need to copy the content and free the old pages.
f3f0e1d2 1893 */
2af8ff29 1894 index = start;
f3f0e1d2 1895 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2af8ff29
HD
1896 while (index < page->index) {
1897 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1898 index++;
1899 }
f3f0e1d2
KS
1900 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1901 page);
1902 list_del(&page->lru);
f3f0e1d2 1903 page->mapping = NULL;
042a3082 1904 page_ref_unfreeze(page, 1);
f3f0e1d2
KS
1905 ClearPageActive(page);
1906 ClearPageUnevictable(page);
042a3082 1907 unlock_page(page);
f3f0e1d2 1908 put_page(page);
2af8ff29
HD
1909 index++;
1910 }
1911 while (index < end) {
1912 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1913 index++;
f3f0e1d2
KS
1914 }
1915
f3f0e1d2 1916 SetPageUptodate(new_page);
87c460a0 1917 page_ref_add(new_page, HPAGE_PMD_NR - 1);
6058eaec 1918 if (is_shmem)
99cb0dbd 1919 set_page_dirty(new_page);
6058eaec 1920 lru_cache_add(new_page);
f3f0e1d2 1921
042a3082
HD
1922 /*
1923 * Remove pte page tables, so we can re-fault the page as huge.
1924 */
1925 retract_page_tables(mapping, start);
f3f0e1d2 1926 *hpage = NULL;
87aa7529
YS
1927
1928 khugepaged_pages_collapsed++;
f3f0e1d2 1929 } else {
77da9389 1930 struct page *page;
aaa52e34 1931
77da9389 1932 /* Something went wrong: roll back page cache changes */
77da9389 1933 xas_lock_irq(&xas);
aaa52e34 1934 mapping->nrpages -= nr_none;
99cb0dbd
SL
1935
1936 if (is_shmem)
1937 shmem_uncharge(mapping->host, nr_none);
aaa52e34 1938
77da9389
MW
1939 xas_set(&xas, start);
1940 xas_for_each(&xas, page, end - 1) {
f3f0e1d2
KS
1941 page = list_first_entry_or_null(&pagelist,
1942 struct page, lru);
77da9389 1943 if (!page || xas.xa_index < page->index) {
f3f0e1d2
KS
1944 if (!nr_none)
1945 break;
f3f0e1d2 1946 nr_none--;
59749e6c 1947 /* Put holes back where they were */
77da9389 1948 xas_store(&xas, NULL);
f3f0e1d2
KS
1949 continue;
1950 }
1951
77da9389 1952 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
f3f0e1d2
KS
1953
1954 /* Unfreeze the page. */
1955 list_del(&page->lru);
1956 page_ref_unfreeze(page, 2);
77da9389
MW
1957 xas_store(&xas, page);
1958 xas_pause(&xas);
1959 xas_unlock_irq(&xas);
f3f0e1d2 1960 unlock_page(page);
042a3082 1961 putback_lru_page(page);
77da9389 1962 xas_lock_irq(&xas);
f3f0e1d2
KS
1963 }
1964 VM_BUG_ON(nr_none);
77da9389 1965 xas_unlock_irq(&xas);
f3f0e1d2 1966
f3f0e1d2
KS
1967 new_page->mapping = NULL;
1968 }
042a3082
HD
1969
1970 unlock_page(new_page);
f3f0e1d2
KS
1971out:
1972 VM_BUG_ON(!list_empty(&pagelist));
9d82c694
JW
1973 if (!IS_ERR_OR_NULL(*hpage))
1974 mem_cgroup_uncharge(*hpage);
f3f0e1d2
KS
1975 /* TODO: tracepoints */
1976}
1977
579c571e
SL
1978static void khugepaged_scan_file(struct mm_struct *mm,
1979 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
1980{
1981 struct page *page = NULL;
579c571e 1982 struct address_space *mapping = file->f_mapping;
85b392db 1983 XA_STATE(xas, &mapping->i_pages, start);
f3f0e1d2
KS
1984 int present, swap;
1985 int node = NUMA_NO_NODE;
1986 int result = SCAN_SUCCEED;
1987
1988 present = 0;
1989 swap = 0;
1990 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1991 rcu_read_lock();
85b392db
MW
1992 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
1993 if (xas_retry(&xas, page))
f3f0e1d2 1994 continue;
f3f0e1d2 1995
85b392db 1996 if (xa_is_value(page)) {
f3f0e1d2
KS
1997 if (++swap > khugepaged_max_ptes_swap) {
1998 result = SCAN_EXCEED_SWAP_PTE;
1999 break;
2000 }
2001 continue;
2002 }
2003
2004 if (PageTransCompound(page)) {
2005 result = SCAN_PAGE_COMPOUND;
2006 break;
2007 }
2008
2009 node = page_to_nid(page);
2010 if (khugepaged_scan_abort(node)) {
2011 result = SCAN_SCAN_ABORT;
2012 break;
2013 }
2014 khugepaged_node_load[node]++;
2015
2016 if (!PageLRU(page)) {
2017 result = SCAN_PAGE_LRU;
2018 break;
2019 }
2020
99cb0dbd
SL
2021 if (page_count(page) !=
2022 1 + page_mapcount(page) + page_has_private(page)) {
f3f0e1d2
KS
2023 result = SCAN_PAGE_COUNT;
2024 break;
2025 }
2026
2027 /*
2028 * We probably should check if the page is referenced here, but
2029 * nobody would transfer pte_young() to PageReferenced() for us.
2030 * And rmap walk here is just too costly...
2031 */
2032
2033 present++;
2034
2035 if (need_resched()) {
85b392db 2036 xas_pause(&xas);
f3f0e1d2 2037 cond_resched_rcu();
f3f0e1d2
KS
2038 }
2039 }
2040 rcu_read_unlock();
2041
2042 if (result == SCAN_SUCCEED) {
2043 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2044 result = SCAN_EXCEED_NONE_PTE;
2045 } else {
2046 node = khugepaged_find_target_node();
579c571e 2047 collapse_file(mm, file, start, hpage, node);
f3f0e1d2
KS
2048 }
2049 }
2050
2051 /* TODO: tracepoints */
2052}
2053#else
579c571e
SL
2054static void khugepaged_scan_file(struct mm_struct *mm,
2055 struct file *file, pgoff_t start, struct page **hpage)
f3f0e1d2
KS
2056{
2057 BUILD_BUG();
2058}
27e1f827
SL
2059
2060static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2061{
2062 return 0;
2063}
f3f0e1d2
KS
2064#endif
2065
b46e756f
KS
2066static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2067 struct page **hpage)
2068 __releases(&khugepaged_mm_lock)
2069 __acquires(&khugepaged_mm_lock)
2070{
2071 struct mm_slot *mm_slot;
2072 struct mm_struct *mm;
2073 struct vm_area_struct *vma;
2074 int progress = 0;
2075
2076 VM_BUG_ON(!pages);
35f3aa39 2077 lockdep_assert_held(&khugepaged_mm_lock);
b46e756f
KS
2078
2079 if (khugepaged_scan.mm_slot)
2080 mm_slot = khugepaged_scan.mm_slot;
2081 else {
2082 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2083 struct mm_slot, mm_node);
2084 khugepaged_scan.address = 0;
2085 khugepaged_scan.mm_slot = mm_slot;
2086 }
2087 spin_unlock(&khugepaged_mm_lock);
27e1f827 2088 khugepaged_collapse_pte_mapped_thps(mm_slot);
b46e756f
KS
2089
2090 mm = mm_slot->mm;
3b454ad3
YS
2091 /*
2092 * Don't wait for semaphore (to avoid long wait times). Just move to
2093 * the next mm on the list.
2094 */
2095 vma = NULL;
d8ed45c5 2096 if (unlikely(!mmap_read_trylock(mm)))
c1e8d7c6 2097 goto breakouterloop_mmap_lock;
3b454ad3 2098 if (likely(!khugepaged_test_exit(mm)))
b46e756f
KS
2099 vma = find_vma(mm, khugepaged_scan.address);
2100
2101 progress++;
2102 for (; vma; vma = vma->vm_next) {
2103 unsigned long hstart, hend;
2104
2105 cond_resched();
2106 if (unlikely(khugepaged_test_exit(mm))) {
2107 progress++;
2108 break;
2109 }
50f8b92f 2110 if (!hugepage_vma_check(vma, vma->vm_flags)) {
b46e756f
KS
2111skip:
2112 progress++;
2113 continue;
2114 }
2115 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2116 hend = vma->vm_end & HPAGE_PMD_MASK;
2117 if (hstart >= hend)
2118 goto skip;
2119 if (khugepaged_scan.address > hend)
2120 goto skip;
2121 if (khugepaged_scan.address < hstart)
2122 khugepaged_scan.address = hstart;
2123 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
396bcc52
MWO
2124 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2125 goto skip;
b46e756f
KS
2126
2127 while (khugepaged_scan.address < hend) {
2128 int ret;
2129 cond_resched();
2130 if (unlikely(khugepaged_test_exit(mm)))
2131 goto breakouterloop;
2132
2133 VM_BUG_ON(khugepaged_scan.address < hstart ||
2134 khugepaged_scan.address + HPAGE_PMD_SIZE >
2135 hend);
99cb0dbd 2136 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
396bcc52 2137 struct file *file = get_file(vma->vm_file);
f3f0e1d2
KS
2138 pgoff_t pgoff = linear_page_index(vma,
2139 khugepaged_scan.address);
99cb0dbd 2140
d8ed45c5 2141 mmap_read_unlock(mm);
f3f0e1d2 2142 ret = 1;
579c571e 2143 khugepaged_scan_file(mm, file, pgoff, hpage);
f3f0e1d2
KS
2144 fput(file);
2145 } else {
2146 ret = khugepaged_scan_pmd(mm, vma,
2147 khugepaged_scan.address,
2148 hpage);
2149 }
b46e756f
KS
2150 /* move to next address */
2151 khugepaged_scan.address += HPAGE_PMD_SIZE;
2152 progress += HPAGE_PMD_NR;
2153 if (ret)
c1e8d7c6
ML
2154 /* we released mmap_lock so break loop */
2155 goto breakouterloop_mmap_lock;
b46e756f
KS
2156 if (progress >= pages)
2157 goto breakouterloop;
2158 }
2159 }
2160breakouterloop:
d8ed45c5 2161 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
c1e8d7c6 2162breakouterloop_mmap_lock:
b46e756f
KS
2163
2164 spin_lock(&khugepaged_mm_lock);
2165 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2166 /*
2167 * Release the current mm_slot if this mm is about to die, or
2168 * if we scanned all vmas of this mm.
2169 */
2170 if (khugepaged_test_exit(mm) || !vma) {
2171 /*
2172 * Make sure that if mm_users is reaching zero while
2173 * khugepaged runs here, khugepaged_exit will find
2174 * mm_slot not pointing to the exiting mm.
2175 */
2176 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2177 khugepaged_scan.mm_slot = list_entry(
2178 mm_slot->mm_node.next,
2179 struct mm_slot, mm_node);
2180 khugepaged_scan.address = 0;
2181 } else {
2182 khugepaged_scan.mm_slot = NULL;
2183 khugepaged_full_scans++;
2184 }
2185
2186 collect_mm_slot(mm_slot);
2187 }
2188
2189 return progress;
2190}
2191
2192static int khugepaged_has_work(void)
2193{
2194 return !list_empty(&khugepaged_scan.mm_head) &&
2195 khugepaged_enabled();
2196}
2197
2198static int khugepaged_wait_event(void)
2199{
2200 return !list_empty(&khugepaged_scan.mm_head) ||
2201 kthread_should_stop();
2202}
2203
2204static void khugepaged_do_scan(void)
2205{
2206 struct page *hpage = NULL;
2207 unsigned int progress = 0, pass_through_head = 0;
2208 unsigned int pages = khugepaged_pages_to_scan;
2209 bool wait = true;
2210
2211 barrier(); /* write khugepaged_pages_to_scan to local stack */
2212
a980df33
KS
2213 lru_add_drain_all();
2214
b46e756f
KS
2215 while (progress < pages) {
2216 if (!khugepaged_prealloc_page(&hpage, &wait))
2217 break;
2218
2219 cond_resched();
2220
2221 if (unlikely(kthread_should_stop() || try_to_freeze()))
2222 break;
2223
2224 spin_lock(&khugepaged_mm_lock);
2225 if (!khugepaged_scan.mm_slot)
2226 pass_through_head++;
2227 if (khugepaged_has_work() &&
2228 pass_through_head < 2)
2229 progress += khugepaged_scan_mm_slot(pages - progress,
2230 &hpage);
2231 else
2232 progress = pages;
2233 spin_unlock(&khugepaged_mm_lock);
2234 }
2235
2236 if (!IS_ERR_OR_NULL(hpage))
2237 put_page(hpage);
2238}
2239
2240static bool khugepaged_should_wakeup(void)
2241{
2242 return kthread_should_stop() ||
2243 time_after_eq(jiffies, khugepaged_sleep_expire);
2244}
2245
2246static void khugepaged_wait_work(void)
2247{
2248 if (khugepaged_has_work()) {
2249 const unsigned long scan_sleep_jiffies =
2250 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2251
2252 if (!scan_sleep_jiffies)
2253 return;
2254
2255 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2256 wait_event_freezable_timeout(khugepaged_wait,
2257 khugepaged_should_wakeup(),
2258 scan_sleep_jiffies);
2259 return;
2260 }
2261
2262 if (khugepaged_enabled())
2263 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2264}
2265
2266static int khugepaged(void *none)
2267{
2268 struct mm_slot *mm_slot;
2269
2270 set_freezable();
2271 set_user_nice(current, MAX_NICE);
2272
2273 while (!kthread_should_stop()) {
2274 khugepaged_do_scan();
2275 khugepaged_wait_work();
2276 }
2277
2278 spin_lock(&khugepaged_mm_lock);
2279 mm_slot = khugepaged_scan.mm_slot;
2280 khugepaged_scan.mm_slot = NULL;
2281 if (mm_slot)
2282 collect_mm_slot(mm_slot);
2283 spin_unlock(&khugepaged_mm_lock);
2284 return 0;
2285}
2286
2287static void set_recommended_min_free_kbytes(void)
2288{
2289 struct zone *zone;
2290 int nr_zones = 0;
2291 unsigned long recommended_min;
2292
b7d349c7
JK
2293 for_each_populated_zone(zone) {
2294 /*
2295 * We don't need to worry about fragmentation of
2296 * ZONE_MOVABLE since it only has movable pages.
2297 */
2298 if (zone_idx(zone) > gfp_zone(GFP_USER))
2299 continue;
2300
b46e756f 2301 nr_zones++;
b7d349c7 2302 }
b46e756f
KS
2303
2304 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2305 recommended_min = pageblock_nr_pages * nr_zones * 2;
2306
2307 /*
2308 * Make sure that on average at least two pageblocks are almost free
2309 * of another type, one for a migratetype to fall back to and a
2310 * second to avoid subsequent fallbacks of other types There are 3
2311 * MIGRATE_TYPES we care about.
2312 */
2313 recommended_min += pageblock_nr_pages * nr_zones *
2314 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2315
2316 /* don't ever allow to reserve more than 5% of the lowmem */
2317 recommended_min = min(recommended_min,
2318 (unsigned long) nr_free_buffer_pages() / 20);
2319 recommended_min <<= (PAGE_SHIFT-10);
2320
2321 if (recommended_min > min_free_kbytes) {
2322 if (user_min_free_kbytes >= 0)
2323 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2324 min_free_kbytes, recommended_min);
2325
2326 min_free_kbytes = recommended_min;
2327 }
2328 setup_per_zone_wmarks();
2329}
2330
2331int start_stop_khugepaged(void)
2332{
b46e756f
KS
2333 int err = 0;
2334
2335 mutex_lock(&khugepaged_mutex);
2336 if (khugepaged_enabled()) {
2337 if (!khugepaged_thread)
2338 khugepaged_thread = kthread_run(khugepaged, NULL,
2339 "khugepaged");
2340 if (IS_ERR(khugepaged_thread)) {
2341 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2342 err = PTR_ERR(khugepaged_thread);
2343 khugepaged_thread = NULL;
2344 goto fail;
2345 }
2346
2347 if (!list_empty(&khugepaged_scan.mm_head))
2348 wake_up_interruptible(&khugepaged_wait);
2349
2350 set_recommended_min_free_kbytes();
2351 } else if (khugepaged_thread) {
2352 kthread_stop(khugepaged_thread);
2353 khugepaged_thread = NULL;
2354 }
2355fail:
2356 mutex_unlock(&khugepaged_mutex);
2357 return err;
2358}
4aab2be0
VB
2359
2360void khugepaged_min_free_kbytes_update(void)
2361{
2362 mutex_lock(&khugepaged_mutex);
2363 if (khugepaged_enabled() && khugepaged_thread)
2364 set_recommended_min_free_kbytes();
2365 mutex_unlock(&khugepaged_mutex);
2366}