1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/backing-dev.h>
22 #include <linux/dax.h>
23 #include <linux/khugepaged.h>
24 #include <linux/freezer.h>
25 #include <linux/pfn_t.h>
26 #include <linux/mman.h>
27 #include <linux/memremap.h>
28 #include <linux/pagemap.h>
29 #include <linux/debugfs.h>
30 #include <linux/migrate.h>
31 #include <linux/hashtable.h>
32 #include <linux/userfaultfd_k.h>
33 #include <linux/page_idle.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/oom.h>
36 #include <linux/numa.h>
37 #include <linux/page_owner.h>
38 #include <linux/sched/sysctl.h>
39 #include <linux/memory-tiers.h>
42 #include <asm/pgalloc.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/thp.h>
50 * By default, transparent hugepage support is disabled in order to avoid
51 * risking an increased memory footprint for applications that are not
52 * guaranteed to benefit from it. When transparent hugepage support is
53 * enabled, it is for all mappings, and khugepaged scans all mappings.
54 * Defrag is invoked by khugepaged hugepage allocations and by page faults
55 * for all hugepage allocations.
57 unsigned long transparent_hugepage_flags __read_mostly
=
58 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
59 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
62 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
65 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
66 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
68 static struct shrinker deferred_split_shrinker
;
70 static atomic_t huge_zero_refcount
;
71 struct page
*huge_zero_page __read_mostly
;
72 unsigned long huge_zero_pfn __read_mostly
= ~0UL;
74 bool hugepage_vma_check(struct vm_area_struct
*vma
, unsigned long vm_flags
,
75 bool smaps
, bool in_pf
, bool enforce_sysfs
)
77 if (!vma
->vm_mm
) /* vdso */
81 * Explicitly disabled through madvise or prctl, or some
82 * architectures may disable THP for some mappings, for
85 if ((vm_flags
& VM_NOHUGEPAGE
) ||
86 test_bit(MMF_DISABLE_THP
, &vma
->vm_mm
->flags
))
89 * If the hardware/firmware marked hugepage support disabled.
91 if (transparent_hugepage_flags
& (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX
))
94 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
99 * Special VMA and hugetlb VMA.
100 * Must be checked after dax since some dax mappings may have
103 if (vm_flags
& VM_NO_KHUGEPAGED
)
107 * Check alignment for file vma and size for both file and anon vma.
109 * Skip the check for page fault. Huge fault does the check in fault
110 * handlers. And this check is not suitable for huge PUD fault.
113 !transhuge_vma_suitable(vma
, (vma
->vm_end
- HPAGE_PMD_SIZE
)))
117 * Enabled via shmem mount options or sysfs settings.
118 * Must be done before hugepage flags check since shmem has its
121 if (!in_pf
&& shmem_file(vma
->vm_file
))
122 return shmem_huge_enabled(vma
, !enforce_sysfs
);
124 /* Enforce sysfs THP requirements as necessary */
126 (!hugepage_flags_enabled() || (!(vm_flags
& VM_HUGEPAGE
) &&
127 !hugepage_flags_always())))
130 /* Only regular file is valid */
131 if (!in_pf
&& file_thp_enabled(vma
))
134 if (!vma_is_anonymous(vma
))
137 if (vma_is_temporary_stack(vma
))
141 * THPeligible bit of smaps should show 1 for proper VMAs even
142 * though anon_vma is not initialized yet.
144 * Allow page fault since anon_vma may be not initialized until
145 * the first page fault.
148 return (smaps
|| in_pf
);
153 static bool get_huge_zero_page(void)
155 struct page
*zero_page
;
157 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
160 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
163 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
167 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
169 __free_pages(zero_page
, compound_order(zero_page
));
172 WRITE_ONCE(huge_zero_pfn
, page_to_pfn(zero_page
));
174 /* We take additional reference here. It will be put back by shrinker */
175 atomic_set(&huge_zero_refcount
, 2);
177 count_vm_event(THP_ZERO_PAGE_ALLOC
);
181 static void put_huge_zero_page(void)
184 * Counter should never go to zero here. Only shrinker can put
187 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
190 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
192 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
193 return READ_ONCE(huge_zero_page
);
195 if (!get_huge_zero_page())
198 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
199 put_huge_zero_page();
201 return READ_ONCE(huge_zero_page
);
204 void mm_put_huge_zero_page(struct mm_struct
*mm
)
206 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
207 put_huge_zero_page();
210 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
211 struct shrink_control
*sc
)
213 /* we can free zero page only if last reference remains */
214 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
217 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
218 struct shrink_control
*sc
)
220 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
221 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
222 BUG_ON(zero_page
== NULL
);
223 WRITE_ONCE(huge_zero_pfn
, ~0UL);
224 __free_pages(zero_page
, compound_order(zero_page
));
231 static struct shrinker huge_zero_page_shrinker
= {
232 .count_objects
= shrink_huge_zero_page_count
,
233 .scan_objects
= shrink_huge_zero_page_scan
,
234 .seeks
= DEFAULT_SEEKS
,
238 static ssize_t
enabled_show(struct kobject
*kobj
,
239 struct kobj_attribute
*attr
, char *buf
)
243 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
244 output
= "[always] madvise never";
245 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
246 &transparent_hugepage_flags
))
247 output
= "always [madvise] never";
249 output
= "always madvise [never]";
251 return sysfs_emit(buf
, "%s\n", output
);
254 static ssize_t
enabled_store(struct kobject
*kobj
,
255 struct kobj_attribute
*attr
,
256 const char *buf
, size_t count
)
260 if (sysfs_streq(buf
, "always")) {
261 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
262 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
263 } else if (sysfs_streq(buf
, "madvise")) {
264 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
265 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
266 } else if (sysfs_streq(buf
, "never")) {
267 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
268 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
273 int err
= start_stop_khugepaged();
280 static struct kobj_attribute enabled_attr
= __ATTR_RW(enabled
);
282 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
283 struct kobj_attribute
*attr
, char *buf
,
284 enum transparent_hugepage_flag flag
)
286 return sysfs_emit(buf
, "%d\n",
287 !!test_bit(flag
, &transparent_hugepage_flags
));
290 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
291 struct kobj_attribute
*attr
,
292 const char *buf
, size_t count
,
293 enum transparent_hugepage_flag flag
)
298 ret
= kstrtoul(buf
, 10, &value
);
305 set_bit(flag
, &transparent_hugepage_flags
);
307 clear_bit(flag
, &transparent_hugepage_flags
);
312 static ssize_t
defrag_show(struct kobject
*kobj
,
313 struct kobj_attribute
*attr
, char *buf
)
317 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
,
318 &transparent_hugepage_flags
))
319 output
= "[always] defer defer+madvise madvise never";
320 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
,
321 &transparent_hugepage_flags
))
322 output
= "always [defer] defer+madvise madvise never";
323 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
,
324 &transparent_hugepage_flags
))
325 output
= "always defer [defer+madvise] madvise never";
326 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
,
327 &transparent_hugepage_flags
))
328 output
= "always defer defer+madvise [madvise] never";
330 output
= "always defer defer+madvise madvise [never]";
332 return sysfs_emit(buf
, "%s\n", output
);
335 static ssize_t
defrag_store(struct kobject
*kobj
,
336 struct kobj_attribute
*attr
,
337 const char *buf
, size_t count
)
339 if (sysfs_streq(buf
, "always")) {
340 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
341 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
342 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
343 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
344 } else if (sysfs_streq(buf
, "defer+madvise")) {
345 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
346 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
347 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
348 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
349 } else if (sysfs_streq(buf
, "defer")) {
350 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
351 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
352 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
353 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
354 } else if (sysfs_streq(buf
, "madvise")) {
355 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
356 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
357 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
358 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
359 } else if (sysfs_streq(buf
, "never")) {
360 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
361 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
362 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
363 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
369 static struct kobj_attribute defrag_attr
= __ATTR_RW(defrag
);
371 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
372 struct kobj_attribute
*attr
, char *buf
)
374 return single_hugepage_flag_show(kobj
, attr
, buf
,
375 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
377 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
378 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
380 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
381 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
383 static struct kobj_attribute use_zero_page_attr
= __ATTR_RW(use_zero_page
);
385 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
386 struct kobj_attribute
*attr
, char *buf
)
388 return sysfs_emit(buf
, "%lu\n", HPAGE_PMD_SIZE
);
390 static struct kobj_attribute hpage_pmd_size_attr
=
391 __ATTR_RO(hpage_pmd_size
);
393 static struct attribute
*hugepage_attr
[] = {
396 &use_zero_page_attr
.attr
,
397 &hpage_pmd_size_attr
.attr
,
399 &shmem_enabled_attr
.attr
,
404 static const struct attribute_group hugepage_attr_group
= {
405 .attrs
= hugepage_attr
,
408 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
412 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
413 if (unlikely(!*hugepage_kobj
)) {
414 pr_err("failed to create transparent hugepage kobject\n");
418 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
420 pr_err("failed to register transparent hugepage group\n");
424 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
426 pr_err("failed to register transparent hugepage group\n");
427 goto remove_hp_group
;
433 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
435 kobject_put(*hugepage_kobj
);
439 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
441 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
442 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
443 kobject_put(hugepage_kobj
);
446 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
451 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
454 #endif /* CONFIG_SYSFS */
456 static int __init
hugepage_init(void)
459 struct kobject
*hugepage_kobj
;
461 if (!has_transparent_hugepage()) {
463 * Hardware doesn't support hugepages, hence disable
466 transparent_hugepage_flags
= 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX
;
471 * hugepages can't be allocated by the buddy allocator
473 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
475 * we use page->mapping and page->index in second tail page
476 * as list_head: assuming THP order >= 2
478 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
480 err
= hugepage_init_sysfs(&hugepage_kobj
);
484 err
= khugepaged_init();
488 err
= register_shrinker(&huge_zero_page_shrinker
, "thp-zero");
490 goto err_hzp_shrinker
;
491 err
= register_shrinker(&deferred_split_shrinker
, "thp-deferred_split");
493 goto err_split_shrinker
;
496 * By default disable transparent hugepages on smaller systems,
497 * where the extra memory used could hurt more than TLB overhead
498 * is likely to save. The admin can still enable it through /sys.
500 if (totalram_pages() < (512 << (20 - PAGE_SHIFT
))) {
501 transparent_hugepage_flags
= 0;
505 err
= start_stop_khugepaged();
511 unregister_shrinker(&deferred_split_shrinker
);
513 unregister_shrinker(&huge_zero_page_shrinker
);
515 khugepaged_destroy();
517 hugepage_exit_sysfs(hugepage_kobj
);
521 subsys_initcall(hugepage_init
);
523 static int __init
setup_transparent_hugepage(char *str
)
528 if (!strcmp(str
, "always")) {
529 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
530 &transparent_hugepage_flags
);
531 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
532 &transparent_hugepage_flags
);
534 } else if (!strcmp(str
, "madvise")) {
535 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
536 &transparent_hugepage_flags
);
537 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
538 &transparent_hugepage_flags
);
540 } else if (!strcmp(str
, "never")) {
541 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
542 &transparent_hugepage_flags
);
543 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
544 &transparent_hugepage_flags
);
549 pr_warn("transparent_hugepage= cannot parse, ignored\n");
552 __setup("transparent_hugepage=", setup_transparent_hugepage
);
554 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
556 if (likely(vma
->vm_flags
& VM_WRITE
))
557 pmd
= pmd_mkwrite(pmd
);
562 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
564 struct mem_cgroup
*memcg
= page_memcg(compound_head(page
));
565 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
568 return &memcg
->deferred_split_queue
;
570 return &pgdat
->deferred_split_queue
;
573 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
575 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
577 return &pgdat
->deferred_split_queue
;
581 void prep_transhuge_page(struct page
*page
)
584 * we use page->mapping and page->index in second tail page
585 * as list_head: assuming THP order >= 2
588 INIT_LIST_HEAD(page_deferred_list(page
));
589 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
592 static inline bool is_transparent_hugepage(struct page
*page
)
596 if (!PageCompound(page
))
599 folio
= page_folio(page
);
600 return is_huge_zero_page(&folio
->page
) ||
601 folio
->_folio_dtor
== TRANSHUGE_PAGE_DTOR
;
604 static unsigned long __thp_get_unmapped_area(struct file
*filp
,
605 unsigned long addr
, unsigned long len
,
606 loff_t off
, unsigned long flags
, unsigned long size
)
608 loff_t off_end
= off
+ len
;
609 loff_t off_align
= round_up(off
, size
);
610 unsigned long len_pad
, ret
;
612 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
615 len_pad
= len
+ size
;
616 if (len_pad
< len
|| (off
+ len_pad
) < off
)
619 ret
= current
->mm
->get_unmapped_area(filp
, addr
, len_pad
,
620 off
>> PAGE_SHIFT
, flags
);
623 * The failure might be due to length padding. The caller will retry
624 * without the padding.
626 if (IS_ERR_VALUE(ret
))
630 * Do not try to align to THP boundary if allocation at the address
636 ret
+= (off
- ret
) & (size
- 1);
640 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
641 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
644 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
646 ret
= __thp_get_unmapped_area(filp
, addr
, len
, off
, flags
, PMD_SIZE
);
650 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
652 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
654 static vm_fault_t
__do_huge_pmd_anonymous_page(struct vm_fault
*vmf
,
655 struct page
*page
, gfp_t gfp
)
657 struct vm_area_struct
*vma
= vmf
->vma
;
659 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
662 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
664 if (mem_cgroup_charge(page_folio(page
), vma
->vm_mm
, gfp
)) {
666 count_vm_event(THP_FAULT_FALLBACK
);
667 count_vm_event(THP_FAULT_FALLBACK_CHARGE
);
668 return VM_FAULT_FALLBACK
;
670 cgroup_throttle_swaprate(page
, gfp
);
672 pgtable
= pte_alloc_one(vma
->vm_mm
);
673 if (unlikely(!pgtable
)) {
678 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
680 * The memory barrier inside __SetPageUptodate makes sure that
681 * clear_huge_page writes become visible before the set_pmd_at()
684 __SetPageUptodate(page
);
686 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
687 if (unlikely(!pmd_none(*vmf
->pmd
))) {
692 ret
= check_stable_address_space(vma
->vm_mm
);
696 /* Deliver the page fault to userland */
697 if (userfaultfd_missing(vma
)) {
698 spin_unlock(vmf
->ptl
);
700 pte_free(vma
->vm_mm
, pgtable
);
701 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
702 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
706 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
707 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
708 page_add_new_anon_rmap(page
, vma
, haddr
);
709 lru_cache_add_inactive_or_unevictable(page
, vma
);
710 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
711 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
712 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
713 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
714 mm_inc_nr_ptes(vma
->vm_mm
);
715 spin_unlock(vmf
->ptl
);
716 count_vm_event(THP_FAULT_ALLOC
);
717 count_memcg_event_mm(vma
->vm_mm
, THP_FAULT_ALLOC
);
722 spin_unlock(vmf
->ptl
);
725 pte_free(vma
->vm_mm
, pgtable
);
732 * always: directly stall for all thp allocations
733 * defer: wake kswapd and fail if not immediately available
734 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
735 * fail if not immediately available
736 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
738 * never: never stall for any thp allocation
740 gfp_t
vma_thp_gfp_mask(struct vm_area_struct
*vma
)
742 const bool vma_madvised
= vma
&& (vma
->vm_flags
& VM_HUGEPAGE
);
744 /* Always do synchronous compaction */
745 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
746 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
748 /* Kick kcompactd and fail quickly */
749 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
750 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
752 /* Synchronous compaction if madvised, otherwise kick kcompactd */
753 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
754 return GFP_TRANSHUGE_LIGHT
|
755 (vma_madvised
? __GFP_DIRECT_RECLAIM
:
756 __GFP_KSWAPD_RECLAIM
);
758 /* Only do synchronous compaction if madvised */
759 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
760 return GFP_TRANSHUGE_LIGHT
|
761 (vma_madvised
? __GFP_DIRECT_RECLAIM
: 0);
763 return GFP_TRANSHUGE_LIGHT
;
766 /* Caller must hold page table lock. */
767 static void set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
768 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
769 struct page
*zero_page
)
774 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
775 entry
= pmd_mkhuge(entry
);
776 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
777 set_pmd_at(mm
, haddr
, pmd
, entry
);
781 vm_fault_t
do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
783 struct vm_area_struct
*vma
= vmf
->vma
;
786 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
788 if (!transhuge_vma_suitable(vma
, haddr
))
789 return VM_FAULT_FALLBACK
;
790 if (unlikely(anon_vma_prepare(vma
)))
792 khugepaged_enter_vma(vma
, vma
->vm_flags
);
794 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
795 !mm_forbids_zeropage(vma
->vm_mm
) &&
796 transparent_hugepage_use_zero_page()) {
798 struct page
*zero_page
;
800 pgtable
= pte_alloc_one(vma
->vm_mm
);
801 if (unlikely(!pgtable
))
803 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
804 if (unlikely(!zero_page
)) {
805 pte_free(vma
->vm_mm
, pgtable
);
806 count_vm_event(THP_FAULT_FALLBACK
);
807 return VM_FAULT_FALLBACK
;
809 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
811 if (pmd_none(*vmf
->pmd
)) {
812 ret
= check_stable_address_space(vma
->vm_mm
);
814 spin_unlock(vmf
->ptl
);
815 pte_free(vma
->vm_mm
, pgtable
);
816 } else if (userfaultfd_missing(vma
)) {
817 spin_unlock(vmf
->ptl
);
818 pte_free(vma
->vm_mm
, pgtable
);
819 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
820 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
822 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
823 haddr
, vmf
->pmd
, zero_page
);
824 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
825 spin_unlock(vmf
->ptl
);
828 spin_unlock(vmf
->ptl
);
829 pte_free(vma
->vm_mm
, pgtable
);
833 gfp
= vma_thp_gfp_mask(vma
);
834 folio
= vma_alloc_folio(gfp
, HPAGE_PMD_ORDER
, vma
, haddr
, true);
835 if (unlikely(!folio
)) {
836 count_vm_event(THP_FAULT_FALLBACK
);
837 return VM_FAULT_FALLBACK
;
839 return __do_huge_pmd_anonymous_page(vmf
, &folio
->page
, gfp
);
842 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
843 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
846 struct mm_struct
*mm
= vma
->vm_mm
;
850 ptl
= pmd_lock(mm
, pmd
);
851 if (!pmd_none(*pmd
)) {
853 if (pmd_pfn(*pmd
) != pfn_t_to_pfn(pfn
)) {
854 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd
));
857 entry
= pmd_mkyoung(*pmd
);
858 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
859 if (pmdp_set_access_flags(vma
, addr
, pmd
, entry
, 1))
860 update_mmu_cache_pmd(vma
, addr
, pmd
);
866 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
867 if (pfn_t_devmap(pfn
))
868 entry
= pmd_mkdevmap(entry
);
870 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
871 entry
= maybe_pmd_mkwrite(entry
, vma
);
875 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
880 set_pmd_at(mm
, addr
, pmd
, entry
);
881 update_mmu_cache_pmd(vma
, addr
, pmd
);
886 pte_free(mm
, pgtable
);
890 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
891 * @vmf: Structure describing the fault
892 * @pfn: pfn to insert
893 * @pgprot: page protection to use
894 * @write: whether it's a write fault
896 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
897 * also consult the vmf_insert_mixed_prot() documentation when
898 * @pgprot != @vmf->vma->vm_page_prot.
900 * Return: vm_fault_t value.
902 vm_fault_t
vmf_insert_pfn_pmd_prot(struct vm_fault
*vmf
, pfn_t pfn
,
903 pgprot_t pgprot
, bool write
)
905 unsigned long addr
= vmf
->address
& PMD_MASK
;
906 struct vm_area_struct
*vma
= vmf
->vma
;
907 pgtable_t pgtable
= NULL
;
910 * If we had pmd_special, we could avoid all these restrictions,
911 * but we need to be consistent with PTEs and architectures that
912 * can't support a 'special' bit.
914 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
916 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
917 (VM_PFNMAP
|VM_MIXEDMAP
));
918 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
920 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
921 return VM_FAULT_SIGBUS
;
923 if (arch_needs_pgtable_deposit()) {
924 pgtable
= pte_alloc_one(vma
->vm_mm
);
929 track_pfn_insert(vma
, &pgprot
, pfn
);
931 insert_pfn_pmd(vma
, addr
, vmf
->pmd
, pfn
, pgprot
, write
, pgtable
);
932 return VM_FAULT_NOPAGE
;
934 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot
);
936 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
937 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
939 if (likely(vma
->vm_flags
& VM_WRITE
))
940 pud
= pud_mkwrite(pud
);
944 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
945 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
947 struct mm_struct
*mm
= vma
->vm_mm
;
951 ptl
= pud_lock(mm
, pud
);
952 if (!pud_none(*pud
)) {
954 if (pud_pfn(*pud
) != pfn_t_to_pfn(pfn
)) {
955 WARN_ON_ONCE(!is_huge_zero_pud(*pud
));
958 entry
= pud_mkyoung(*pud
);
959 entry
= maybe_pud_mkwrite(pud_mkdirty(entry
), vma
);
960 if (pudp_set_access_flags(vma
, addr
, pud
, entry
, 1))
961 update_mmu_cache_pud(vma
, addr
, pud
);
966 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
967 if (pfn_t_devmap(pfn
))
968 entry
= pud_mkdevmap(entry
);
970 entry
= pud_mkyoung(pud_mkdirty(entry
));
971 entry
= maybe_pud_mkwrite(entry
, vma
);
973 set_pud_at(mm
, addr
, pud
, entry
);
974 update_mmu_cache_pud(vma
, addr
, pud
);
981 * vmf_insert_pfn_pud_prot - insert a pud size pfn
982 * @vmf: Structure describing the fault
983 * @pfn: pfn to insert
984 * @pgprot: page protection to use
985 * @write: whether it's a write fault
987 * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
988 * also consult the vmf_insert_mixed_prot() documentation when
989 * @pgprot != @vmf->vma->vm_page_prot.
991 * Return: vm_fault_t value.
993 vm_fault_t
vmf_insert_pfn_pud_prot(struct vm_fault
*vmf
, pfn_t pfn
,
994 pgprot_t pgprot
, bool write
)
996 unsigned long addr
= vmf
->address
& PUD_MASK
;
997 struct vm_area_struct
*vma
= vmf
->vma
;
1000 * If we had pud_special, we could avoid all these restrictions,
1001 * but we need to be consistent with PTEs and architectures that
1002 * can't support a 'special' bit.
1004 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
1005 !pfn_t_devmap(pfn
));
1006 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
1007 (VM_PFNMAP
|VM_MIXEDMAP
));
1008 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
1010 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
1011 return VM_FAULT_SIGBUS
;
1013 track_pfn_insert(vma
, &pgprot
, pfn
);
1015 insert_pfn_pud(vma
, addr
, vmf
->pud
, pfn
, pgprot
, write
);
1016 return VM_FAULT_NOPAGE
;
1018 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot
);
1019 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1021 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1022 pmd_t
*pmd
, bool write
)
1026 _pmd
= pmd_mkyoung(*pmd
);
1028 _pmd
= pmd_mkdirty(_pmd
);
1029 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
1031 update_mmu_cache_pmd(vma
, addr
, pmd
);
1034 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
1035 pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
)
1037 unsigned long pfn
= pmd_pfn(*pmd
);
1038 struct mm_struct
*mm
= vma
->vm_mm
;
1042 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1044 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1045 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
1046 (FOLL_PIN
| FOLL_GET
)))
1049 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
1052 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
1057 if (flags
& FOLL_TOUCH
)
1058 touch_pmd(vma
, addr
, pmd
, flags
& FOLL_WRITE
);
1061 * device mapped pages can only be returned if the
1062 * caller will manage the page reference count.
1064 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1065 return ERR_PTR(-EEXIST
);
1067 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
1068 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1070 return ERR_PTR(-EFAULT
);
1071 page
= pfn_to_page(pfn
);
1072 ret
= try_grab_page(page
, flags
);
1074 page
= ERR_PTR(ret
);
1079 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1080 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
1081 struct vm_area_struct
*dst_vma
, struct vm_area_struct
*src_vma
)
1083 spinlock_t
*dst_ptl
, *src_ptl
;
1084 struct page
*src_page
;
1086 pgtable_t pgtable
= NULL
;
1089 /* Skip if can be re-fill on fault */
1090 if (!vma_is_anonymous(dst_vma
))
1093 pgtable
= pte_alloc_one(dst_mm
);
1094 if (unlikely(!pgtable
))
1097 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
1098 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
1099 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1104 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1105 if (unlikely(is_swap_pmd(pmd
))) {
1106 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1108 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1109 if (!is_readable_migration_entry(entry
)) {
1110 entry
= make_readable_migration_entry(
1112 pmd
= swp_entry_to_pmd(entry
);
1113 if (pmd_swp_soft_dirty(*src_pmd
))
1114 pmd
= pmd_swp_mksoft_dirty(pmd
);
1115 if (pmd_swp_uffd_wp(*src_pmd
))
1116 pmd
= pmd_swp_mkuffd_wp(pmd
);
1117 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
1119 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1120 mm_inc_nr_ptes(dst_mm
);
1121 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1122 if (!userfaultfd_wp(dst_vma
))
1123 pmd
= pmd_swp_clear_uffd_wp(pmd
);
1124 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1130 if (unlikely(!pmd_trans_huge(pmd
))) {
1131 pte_free(dst_mm
, pgtable
);
1135 * When page table lock is held, the huge zero pmd should not be
1136 * under splitting since we don't split the page itself, only pmd to
1139 if (is_huge_zero_pmd(pmd
)) {
1141 * get_huge_zero_page() will never allocate a new page here,
1142 * since we already have a zero page to copy. It just takes a
1145 mm_get_huge_zero_page(dst_mm
);
1149 src_page
= pmd_page(pmd
);
1150 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
1153 if (unlikely(page_try_dup_anon_rmap(src_page
, true, src_vma
))) {
1154 /* Page maybe pinned: split and retry the fault on PTEs. */
1156 pte_free(dst_mm
, pgtable
);
1157 spin_unlock(src_ptl
);
1158 spin_unlock(dst_ptl
);
1159 __split_huge_pmd(src_vma
, src_pmd
, addr
, false, NULL
);
1162 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1164 mm_inc_nr_ptes(dst_mm
);
1165 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1166 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
1167 if (!userfaultfd_wp(dst_vma
))
1168 pmd
= pmd_clear_uffd_wp(pmd
);
1169 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
1170 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1174 spin_unlock(src_ptl
);
1175 spin_unlock(dst_ptl
);
1180 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1181 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1182 pud_t
*pud
, bool write
)
1186 _pud
= pud_mkyoung(*pud
);
1188 _pud
= pud_mkdirty(_pud
);
1189 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1191 update_mmu_cache_pud(vma
, addr
, pud
);
1194 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1195 pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
)
1197 unsigned long pfn
= pud_pfn(*pud
);
1198 struct mm_struct
*mm
= vma
->vm_mm
;
1202 assert_spin_locked(pud_lockptr(mm
, pud
));
1204 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1207 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1208 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
1209 (FOLL_PIN
| FOLL_GET
)))
1212 if (pud_present(*pud
) && pud_devmap(*pud
))
1217 if (flags
& FOLL_TOUCH
)
1218 touch_pud(vma
, addr
, pud
, flags
& FOLL_WRITE
);
1221 * device mapped pages can only be returned if the
1222 * caller will manage the page reference count.
1224 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1226 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1227 return ERR_PTR(-EEXIST
);
1229 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1230 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1232 return ERR_PTR(-EFAULT
);
1233 page
= pfn_to_page(pfn
);
1235 ret
= try_grab_page(page
, flags
);
1237 page
= ERR_PTR(ret
);
1242 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1243 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1244 struct vm_area_struct
*vma
)
1246 spinlock_t
*dst_ptl
, *src_ptl
;
1250 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1251 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1252 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1256 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1260 * When page table lock is held, the huge zero pud should not be
1261 * under splitting since we don't split the page itself, only pud to
1264 if (is_huge_zero_pud(pud
)) {
1265 /* No huge zero pud yet */
1269 * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
1270 * and split if duplicating fails.
1272 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1273 pud
= pud_mkold(pud_wrprotect(pud
));
1274 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1278 spin_unlock(src_ptl
);
1279 spin_unlock(dst_ptl
);
1283 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1285 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1287 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1288 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1291 touch_pud(vmf
->vma
, vmf
->address
, vmf
->pud
, write
);
1293 spin_unlock(vmf
->ptl
);
1295 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1297 void huge_pmd_set_accessed(struct vm_fault
*vmf
)
1299 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1301 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1302 if (unlikely(!pmd_same(*vmf
->pmd
, vmf
->orig_pmd
)))
1305 touch_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
, write
);
1308 spin_unlock(vmf
->ptl
);
1311 vm_fault_t
do_huge_pmd_wp_page(struct vm_fault
*vmf
)
1313 const bool unshare
= vmf
->flags
& FAULT_FLAG_UNSHARE
;
1314 struct vm_area_struct
*vma
= vmf
->vma
;
1315 struct folio
*folio
;
1317 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1318 pmd_t orig_pmd
= vmf
->orig_pmd
;
1320 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1321 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1323 if (is_huge_zero_pmd(orig_pmd
))
1326 spin_lock(vmf
->ptl
);
1328 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1329 spin_unlock(vmf
->ptl
);
1333 page
= pmd_page(orig_pmd
);
1334 folio
= page_folio(page
);
1335 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1337 /* Early check when only holding the PT lock. */
1338 if (PageAnonExclusive(page
))
1341 if (!folio_trylock(folio
)) {
1343 spin_unlock(vmf
->ptl
);
1345 spin_lock(vmf
->ptl
);
1346 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1347 spin_unlock(vmf
->ptl
);
1348 folio_unlock(folio
);
1355 /* Recheck after temporarily dropping the PT lock. */
1356 if (PageAnonExclusive(page
)) {
1357 folio_unlock(folio
);
1362 * See do_wp_page(): we can only reuse the folio exclusively if
1363 * there are no additional references. Note that we always drain
1364 * the LRU pagevecs immediately after adding a THP.
1366 if (folio_ref_count(folio
) >
1367 1 + folio_test_swapcache(folio
) * folio_nr_pages(folio
))
1368 goto unlock_fallback
;
1369 if (folio_test_swapcache(folio
))
1370 folio_free_swap(folio
);
1371 if (folio_ref_count(folio
) == 1) {
1374 page_move_anon_rmap(page
, vma
);
1375 folio_unlock(folio
);
1377 if (unlikely(unshare
)) {
1378 spin_unlock(vmf
->ptl
);
1381 entry
= pmd_mkyoung(orig_pmd
);
1382 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1383 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1384 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1385 spin_unlock(vmf
->ptl
);
1390 folio_unlock(folio
);
1391 spin_unlock(vmf
->ptl
);
1393 __split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
, false, NULL
);
1394 return VM_FAULT_FALLBACK
;
1397 static inline bool can_change_pmd_writable(struct vm_area_struct
*vma
,
1398 unsigned long addr
, pmd_t pmd
)
1402 if (WARN_ON_ONCE(!(vma
->vm_flags
& VM_WRITE
)))
1405 /* Don't touch entries that are not even readable (NUMA hinting). */
1406 if (pmd_protnone(pmd
))
1409 /* Do we need write faults for softdirty tracking? */
1410 if (vma_soft_dirty_enabled(vma
) && !pmd_soft_dirty(pmd
))
1413 /* Do we need write faults for uffd-wp tracking? */
1414 if (userfaultfd_huge_pmd_wp(vma
, pmd
))
1417 if (!(vma
->vm_flags
& VM_SHARED
)) {
1418 /* See can_change_pte_writable(). */
1419 page
= vm_normal_page_pmd(vma
, addr
, pmd
);
1420 return page
&& PageAnon(page
) && PageAnonExclusive(page
);
1423 /* See can_change_pte_writable(). */
1424 return pmd_dirty(pmd
);
1427 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
1428 static inline bool can_follow_write_pmd(pmd_t pmd
, struct page
*page
,
1429 struct vm_area_struct
*vma
,
1432 /* If the pmd is writable, we can write to the page. */
1436 /* Maybe FOLL_FORCE is set to override it? */
1437 if (!(flags
& FOLL_FORCE
))
1440 /* But FOLL_FORCE has no effect on shared mappings */
1441 if (vma
->vm_flags
& (VM_MAYSHARE
| VM_SHARED
))
1444 /* ... or read-only private ones */
1445 if (!(vma
->vm_flags
& VM_MAYWRITE
))
1448 /* ... or already writable ones that just need to take a write fault */
1449 if (vma
->vm_flags
& VM_WRITE
)
1453 * See can_change_pte_writable(): we broke COW and could map the page
1454 * writable if we have an exclusive anonymous page ...
1456 if (!page
|| !PageAnon(page
) || !PageAnonExclusive(page
))
1459 /* ... and a write-fault isn't required for other reasons. */
1460 if (vma_soft_dirty_enabled(vma
) && !pmd_soft_dirty(pmd
))
1462 return !userfaultfd_huge_pmd_wp(vma
, pmd
);
1465 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1470 struct mm_struct
*mm
= vma
->vm_mm
;
1474 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1476 page
= pmd_page(*pmd
);
1477 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1479 if ((flags
& FOLL_WRITE
) &&
1480 !can_follow_write_pmd(*pmd
, page
, vma
, flags
))
1483 /* Avoid dumping huge zero page */
1484 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1485 return ERR_PTR(-EFAULT
);
1487 /* Full NUMA hinting faults to serialise migration in fault paths */
1488 if (pmd_protnone(*pmd
) && !gup_can_follow_protnone(flags
))
1491 if (!pmd_write(*pmd
) && gup_must_unshare(vma
, flags
, page
))
1492 return ERR_PTR(-EMLINK
);
1494 VM_BUG_ON_PAGE((flags
& FOLL_PIN
) && PageAnon(page
) &&
1495 !PageAnonExclusive(page
), page
);
1497 ret
= try_grab_page(page
, flags
);
1499 return ERR_PTR(ret
);
1501 if (flags
& FOLL_TOUCH
)
1502 touch_pmd(vma
, addr
, pmd
, flags
& FOLL_WRITE
);
1504 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1505 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1510 /* NUMA hinting page fault entry point for trans huge pmds */
1511 vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
)
1513 struct vm_area_struct
*vma
= vmf
->vma
;
1514 pmd_t oldpmd
= vmf
->orig_pmd
;
1517 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1518 int page_nid
= NUMA_NO_NODE
;
1519 int target_nid
, last_cpupid
= (-1 & LAST_CPUPID_MASK
);
1520 bool migrated
= false, writable
= false;
1523 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1524 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1525 spin_unlock(vmf
->ptl
);
1529 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1532 * Detect now whether the PMD could be writable; this information
1533 * is only valid while holding the PT lock.
1535 writable
= pmd_write(pmd
);
1536 if (!writable
&& vma_wants_manual_pte_write_upgrade(vma
) &&
1537 can_change_pmd_writable(vma
, vmf
->address
, pmd
))
1540 page
= vm_normal_page_pmd(vma
, haddr
, pmd
);
1544 /* See similar comment in do_numa_page for explanation */
1546 flags
|= TNF_NO_GROUP
;
1548 page_nid
= page_to_nid(page
);
1550 * For memory tiering mode, cpupid of slow memory page is used
1551 * to record page access time. So use default value.
1553 if (node_is_toptier(page_nid
))
1554 last_cpupid
= page_cpupid_last(page
);
1555 target_nid
= numa_migrate_prep(page
, vma
, haddr
, page_nid
,
1558 if (target_nid
== NUMA_NO_NODE
) {
1563 spin_unlock(vmf
->ptl
);
1566 migrated
= migrate_misplaced_page(page
, vma
, target_nid
);
1568 flags
|= TNF_MIGRATED
;
1569 page_nid
= target_nid
;
1571 flags
|= TNF_MIGRATE_FAIL
;
1572 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1573 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1574 spin_unlock(vmf
->ptl
);
1581 if (page_nid
!= NUMA_NO_NODE
)
1582 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1588 /* Restore the PMD */
1589 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1590 pmd
= pmd_mkyoung(pmd
);
1592 pmd
= pmd_mkwrite(pmd
);
1593 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1594 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1595 spin_unlock(vmf
->ptl
);
1600 * Return true if we do MADV_FREE successfully on entire pmd page.
1601 * Otherwise, return false.
1603 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1604 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1608 struct folio
*folio
;
1609 struct mm_struct
*mm
= tlb
->mm
;
1612 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1614 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1619 if (is_huge_zero_pmd(orig_pmd
))
1622 if (unlikely(!pmd_present(orig_pmd
))) {
1623 VM_BUG_ON(thp_migration_supported() &&
1624 !is_pmd_migration_entry(orig_pmd
));
1628 folio
= pfn_folio(pmd_pfn(orig_pmd
));
1630 * If other processes are mapping this folio, we couldn't discard
1631 * the folio unless they all do MADV_FREE so let's skip the folio.
1633 if (folio_mapcount(folio
) != 1)
1636 if (!folio_trylock(folio
))
1640 * If user want to discard part-pages of THP, split it so MADV_FREE
1641 * will deactivate only them.
1643 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1647 folio_unlock(folio
);
1652 if (folio_test_dirty(folio
))
1653 folio_clear_dirty(folio
);
1654 folio_unlock(folio
);
1656 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1657 pmdp_invalidate(vma
, addr
, pmd
);
1658 orig_pmd
= pmd_mkold(orig_pmd
);
1659 orig_pmd
= pmd_mkclean(orig_pmd
);
1661 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1662 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1665 folio_mark_lazyfree(folio
);
1673 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1677 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1678 pte_free(mm
, pgtable
);
1682 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1683 pmd_t
*pmd
, unsigned long addr
)
1688 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1690 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1694 * For architectures like ppc64 we look at deposited pgtable
1695 * when calling pmdp_huge_get_and_clear. So do the
1696 * pgtable_trans_huge_withdraw after finishing pmdp related
1699 orig_pmd
= pmdp_huge_get_and_clear_full(vma
, addr
, pmd
,
1701 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1702 if (vma_is_special_huge(vma
)) {
1703 if (arch_needs_pgtable_deposit())
1704 zap_deposited_table(tlb
->mm
, pmd
);
1706 } else if (is_huge_zero_pmd(orig_pmd
)) {
1707 zap_deposited_table(tlb
->mm
, pmd
);
1710 struct page
*page
= NULL
;
1711 int flush_needed
= 1;
1713 if (pmd_present(orig_pmd
)) {
1714 page
= pmd_page(orig_pmd
);
1715 page_remove_rmap(page
, vma
, true);
1716 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1717 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1718 } else if (thp_migration_supported()) {
1721 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1722 entry
= pmd_to_swp_entry(orig_pmd
);
1723 page
= pfn_swap_entry_to_page(entry
);
1726 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1728 if (PageAnon(page
)) {
1729 zap_deposited_table(tlb
->mm
, pmd
);
1730 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1732 if (arch_needs_pgtable_deposit())
1733 zap_deposited_table(tlb
->mm
, pmd
);
1734 add_mm_counter(tlb
->mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
1739 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1744 #ifndef pmd_move_must_withdraw
1745 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1746 spinlock_t
*old_pmd_ptl
,
1747 struct vm_area_struct
*vma
)
1750 * With split pmd lock we also need to move preallocated
1751 * PTE page table if new_pmd is on different PMD page table.
1753 * We also don't deposit and withdraw tables for file pages.
1755 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1759 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1761 #ifdef CONFIG_MEM_SOFT_DIRTY
1762 if (unlikely(is_pmd_migration_entry(pmd
)))
1763 pmd
= pmd_swp_mksoft_dirty(pmd
);
1764 else if (pmd_present(pmd
))
1765 pmd
= pmd_mksoft_dirty(pmd
);
1770 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1771 unsigned long new_addr
, pmd_t
*old_pmd
, pmd_t
*new_pmd
)
1773 spinlock_t
*old_ptl
, *new_ptl
;
1775 struct mm_struct
*mm
= vma
->vm_mm
;
1776 bool force_flush
= false;
1779 * The destination pmd shouldn't be established, free_pgtables()
1780 * should have release it.
1782 if (WARN_ON(!pmd_none(*new_pmd
))) {
1783 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1788 * We don't have to worry about the ordering of src and dst
1789 * ptlocks because exclusive mmap_lock prevents deadlock.
1791 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1793 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1794 if (new_ptl
!= old_ptl
)
1795 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1796 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1797 if (pmd_present(pmd
))
1799 VM_BUG_ON(!pmd_none(*new_pmd
));
1801 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1803 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1804 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1806 pmd
= move_soft_dirty_pmd(pmd
);
1807 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1809 flush_pmd_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1810 if (new_ptl
!= old_ptl
)
1811 spin_unlock(new_ptl
);
1812 spin_unlock(old_ptl
);
1820 * - 0 if PMD could not be locked
1821 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1822 * or if prot_numa but THP migration is not supported
1823 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1825 int change_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1826 pmd_t
*pmd
, unsigned long addr
, pgprot_t newprot
,
1827 unsigned long cp_flags
)
1829 struct mm_struct
*mm
= vma
->vm_mm
;
1831 pmd_t oldpmd
, entry
;
1832 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
1833 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
1834 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
1837 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1839 if (prot_numa
&& !thp_migration_supported())
1842 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1846 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1847 if (is_swap_pmd(*pmd
)) {
1848 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1849 struct page
*page
= pfn_swap_entry_to_page(entry
);
1851 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1852 if (is_writable_migration_entry(entry
)) {
1855 * A protection check is difficult so
1856 * just be safe and disable write
1859 entry
= make_readable_exclusive_migration_entry(swp_offset(entry
));
1861 entry
= make_readable_migration_entry(swp_offset(entry
));
1862 newpmd
= swp_entry_to_pmd(entry
);
1863 if (pmd_swp_soft_dirty(*pmd
))
1864 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1865 if (pmd_swp_uffd_wp(*pmd
))
1866 newpmd
= pmd_swp_mkuffd_wp(newpmd
);
1867 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1877 * Avoid trapping faults against the zero page. The read-only
1878 * data is likely to be read-cached on the local CPU and
1879 * local/remote hits to the zero page are not interesting.
1881 if (is_huge_zero_pmd(*pmd
))
1884 if (pmd_protnone(*pmd
))
1887 page
= pmd_page(*pmd
);
1888 toptier
= node_is_toptier(page_to_nid(page
));
1890 * Skip scanning top tier node if normal numa
1891 * balancing is disabled
1893 if (!(sysctl_numa_balancing_mode
& NUMA_BALANCING_NORMAL
) &&
1897 if (sysctl_numa_balancing_mode
& NUMA_BALANCING_MEMORY_TIERING
&&
1899 xchg_page_access_time(page
, jiffies_to_msecs(jiffies
));
1902 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1903 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1904 * which is also under mmap_read_lock(mm):
1907 * change_huge_pmd(prot_numa=1)
1908 * pmdp_huge_get_and_clear_notify()
1909 * madvise_dontneed()
1911 * pmd_trans_huge(*pmd) == 0 (without ptl)
1914 * // pmd is re-established
1916 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1917 * which may break userspace.
1919 * pmdp_invalidate_ad() is required to make sure we don't miss
1920 * dirty/young flags set by hardware.
1922 oldpmd
= pmdp_invalidate_ad(vma
, addr
, pmd
);
1924 entry
= pmd_modify(oldpmd
, newprot
);
1926 entry
= pmd_mkuffd_wp(entry
);
1927 else if (uffd_wp_resolve
)
1929 * Leave the write bit to be handled by PF interrupt
1930 * handler, then things like COW could be properly
1933 entry
= pmd_clear_uffd_wp(entry
);
1935 /* See change_pte_range(). */
1936 if ((cp_flags
& MM_CP_TRY_CHANGE_WRITABLE
) && !pmd_write(entry
) &&
1937 can_change_pmd_writable(vma
, addr
, entry
))
1938 entry
= pmd_mkwrite(entry
);
1941 set_pmd_at(mm
, addr
, pmd
, entry
);
1943 if (huge_pmd_needs_flush(oldpmd
, entry
))
1944 tlb_flush_pmd_range(tlb
, addr
, HPAGE_PMD_SIZE
);
1951 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1953 * Note that if it returns page table lock pointer, this routine returns without
1954 * unlocking page table lock. So callers must unlock it.
1956 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1959 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1960 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
1968 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1970 * Note that if it returns page table lock pointer, this routine returns without
1971 * unlocking page table lock. So callers must unlock it.
1973 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1977 ptl
= pud_lock(vma
->vm_mm
, pud
);
1978 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
1984 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1985 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1986 pud_t
*pud
, unsigned long addr
)
1990 ptl
= __pud_trans_huge_lock(pud
, vma
);
1994 pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
, tlb
->fullmm
);
1995 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
1996 if (vma_is_special_huge(vma
)) {
1998 /* No zero page support yet */
2000 /* No support for anonymous PUD pages yet */
2006 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
2007 unsigned long haddr
)
2009 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
2010 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2011 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
2012 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
2014 count_vm_event(THP_SPLIT_PUD
);
2016 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
2019 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
2020 unsigned long address
)
2023 struct mmu_notifier_range range
;
2025 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2026 address
& HPAGE_PUD_MASK
,
2027 (address
& HPAGE_PUD_MASK
) + HPAGE_PUD_SIZE
);
2028 mmu_notifier_invalidate_range_start(&range
);
2029 ptl
= pud_lock(vma
->vm_mm
, pud
);
2030 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
2032 __split_huge_pud_locked(vma
, pud
, range
.start
);
2037 * No need to double call mmu_notifier->invalidate_range() callback as
2038 * the above pudp_huge_clear_flush_notify() did already call it.
2040 mmu_notifier_invalidate_range_only_end(&range
);
2042 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2044 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
2045 unsigned long haddr
, pmd_t
*pmd
)
2047 struct mm_struct
*mm
= vma
->vm_mm
;
2053 * Leave pmd empty until pte is filled note that it is fine to delay
2054 * notification until mmu_notifier_invalidate_range_end() as we are
2055 * replacing a zero pmd write protected page with a zero pte write
2058 * See Documentation/mm/mmu_notifier.rst
2060 pmdp_huge_clear_flush(vma
, haddr
, pmd
);
2062 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2063 pmd_populate(mm
, &_pmd
, pgtable
);
2065 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
2067 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
2068 entry
= pte_mkspecial(entry
);
2069 pte
= pte_offset_map(&_pmd
, haddr
);
2070 VM_BUG_ON(!pte_none(*pte
));
2071 set_pte_at(mm
, haddr
, pte
, entry
);
2074 smp_wmb(); /* make pte visible before pmd */
2075 pmd_populate(mm
, pmd
, pgtable
);
2078 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2079 unsigned long haddr
, bool freeze
)
2081 struct mm_struct
*mm
= vma
->vm_mm
;
2084 pmd_t old_pmd
, _pmd
;
2085 bool young
, write
, soft_dirty
, pmd_migration
= false, uffd_wp
= false;
2086 bool anon_exclusive
= false, dirty
= false;
2090 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
2091 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2092 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
2093 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
2094 && !pmd_devmap(*pmd
));
2096 count_vm_event(THP_SPLIT_PMD
);
2098 if (!vma_is_anonymous(vma
)) {
2099 old_pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2101 * We are going to unmap this huge page. So
2102 * just go ahead and zap it
2104 if (arch_needs_pgtable_deposit())
2105 zap_deposited_table(mm
, pmd
);
2106 if (vma_is_special_huge(vma
))
2108 if (unlikely(is_pmd_migration_entry(old_pmd
))) {
2111 entry
= pmd_to_swp_entry(old_pmd
);
2112 page
= pfn_swap_entry_to_page(entry
);
2114 page
= pmd_page(old_pmd
);
2115 if (!PageDirty(page
) && pmd_dirty(old_pmd
))
2116 set_page_dirty(page
);
2117 if (!PageReferenced(page
) && pmd_young(old_pmd
))
2118 SetPageReferenced(page
);
2119 page_remove_rmap(page
, vma
, true);
2122 add_mm_counter(mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
2126 if (is_huge_zero_pmd(*pmd
)) {
2128 * FIXME: Do we want to invalidate secondary mmu by calling
2129 * mmu_notifier_invalidate_range() see comments below inside
2130 * __split_huge_pmd() ?
2132 * We are going from a zero huge page write protected to zero
2133 * small page also write protected so it does not seems useful
2134 * to invalidate secondary mmu at this time.
2136 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2140 * Up to this point the pmd is present and huge and userland has the
2141 * whole access to the hugepage during the split (which happens in
2142 * place). If we overwrite the pmd with the not-huge version pointing
2143 * to the pte here (which of course we could if all CPUs were bug
2144 * free), userland could trigger a small page size TLB miss on the
2145 * small sized TLB while the hugepage TLB entry is still established in
2146 * the huge TLB. Some CPU doesn't like that.
2147 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2148 * 383 on page 105. Intel should be safe but is also warns that it's
2149 * only safe if the permission and cache attributes of the two entries
2150 * loaded in the two TLB is identical (which should be the case here).
2151 * But it is generally safer to never allow small and huge TLB entries
2152 * for the same virtual address to be loaded simultaneously. So instead
2153 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2154 * current pmd notpresent (atomically because here the pmd_trans_huge
2155 * must remain set at all times on the pmd until the split is complete
2156 * for this pmd), then we flush the SMP TLB and finally we write the
2157 * non-huge version of the pmd entry with pmd_populate.
2159 old_pmd
= pmdp_invalidate(vma
, haddr
, pmd
);
2161 pmd_migration
= is_pmd_migration_entry(old_pmd
);
2162 if (unlikely(pmd_migration
)) {
2165 entry
= pmd_to_swp_entry(old_pmd
);
2166 page
= pfn_swap_entry_to_page(entry
);
2167 write
= is_writable_migration_entry(entry
);
2169 anon_exclusive
= is_readable_exclusive_migration_entry(entry
);
2170 young
= is_migration_entry_young(entry
);
2171 dirty
= is_migration_entry_dirty(entry
);
2172 soft_dirty
= pmd_swp_soft_dirty(old_pmd
);
2173 uffd_wp
= pmd_swp_uffd_wp(old_pmd
);
2175 page
= pmd_page(old_pmd
);
2176 if (pmd_dirty(old_pmd
)) {
2180 write
= pmd_write(old_pmd
);
2181 young
= pmd_young(old_pmd
);
2182 soft_dirty
= pmd_soft_dirty(old_pmd
);
2183 uffd_wp
= pmd_uffd_wp(old_pmd
);
2185 VM_BUG_ON_PAGE(!page_count(page
), page
);
2188 * Without "freeze", we'll simply split the PMD, propagating the
2189 * PageAnonExclusive() flag for each PTE by setting it for
2190 * each subpage -- no need to (temporarily) clear.
2192 * With "freeze" we want to replace mapped pages by
2193 * migration entries right away. This is only possible if we
2194 * managed to clear PageAnonExclusive() -- see
2195 * set_pmd_migration_entry().
2197 * In case we cannot clear PageAnonExclusive(), split the PMD
2198 * only and let try_to_migrate_one() fail later.
2200 * See page_try_share_anon_rmap(): invalidate PMD first.
2202 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
2203 if (freeze
&& anon_exclusive
&& page_try_share_anon_rmap(page
))
2206 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2210 * Withdraw the table only after we mark the pmd entry invalid.
2211 * This's critical for some architectures (Power).
2213 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2214 pmd_populate(mm
, &_pmd
, pgtable
);
2216 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2219 * Note that NUMA hinting access restrictions are not
2220 * transferred to avoid any possibility of altering
2221 * permissions across VMAs.
2223 if (freeze
|| pmd_migration
) {
2224 swp_entry_t swp_entry
;
2226 swp_entry
= make_writable_migration_entry(
2227 page_to_pfn(page
+ i
));
2228 else if (anon_exclusive
)
2229 swp_entry
= make_readable_exclusive_migration_entry(
2230 page_to_pfn(page
+ i
));
2232 swp_entry
= make_readable_migration_entry(
2233 page_to_pfn(page
+ i
));
2235 swp_entry
= make_migration_entry_young(swp_entry
);
2237 swp_entry
= make_migration_entry_dirty(swp_entry
);
2238 entry
= swp_entry_to_pte(swp_entry
);
2240 entry
= pte_swp_mksoft_dirty(entry
);
2242 entry
= pte_swp_mkuffd_wp(entry
);
2244 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2245 entry
= maybe_mkwrite(entry
, vma
);
2247 SetPageAnonExclusive(page
+ i
);
2249 entry
= pte_mkold(entry
);
2250 /* NOTE: this may set soft-dirty too on some archs */
2252 entry
= pte_mkdirty(entry
);
2254 * NOTE: this needs to happen after pte_mkdirty,
2255 * because some archs (sparc64, loongarch) could
2256 * set hw write bit when mkdirty.
2259 entry
= pte_wrprotect(entry
);
2261 entry
= pte_mksoft_dirty(entry
);
2263 entry
= pte_mkuffd_wp(entry
);
2264 page_add_anon_rmap(page
+ i
, vma
, addr
, false);
2266 pte
= pte_offset_map(&_pmd
, addr
);
2267 BUG_ON(!pte_none(*pte
));
2268 set_pte_at(mm
, addr
, pte
, entry
);
2273 page_remove_rmap(page
, vma
, true);
2277 smp_wmb(); /* make pte visible before pmd */
2278 pmd_populate(mm
, pmd
, pgtable
);
2281 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2282 unsigned long address
, bool freeze
, struct folio
*folio
)
2285 struct mmu_notifier_range range
;
2287 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
->vm_mm
,
2288 address
& HPAGE_PMD_MASK
,
2289 (address
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
);
2290 mmu_notifier_invalidate_range_start(&range
);
2291 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
2294 * If caller asks to setup a migration entry, we need a folio to check
2295 * pmd against. Otherwise we can end up replacing wrong folio.
2297 VM_BUG_ON(freeze
&& !folio
);
2298 VM_WARN_ON_ONCE(folio
&& !folio_test_locked(folio
));
2300 if (pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
) ||
2301 is_pmd_migration_entry(*pmd
)) {
2303 * It's safe to call pmd_page when folio is set because it's
2304 * guaranteed that pmd is present.
2306 if (folio
&& folio
!= page_folio(pmd_page(*pmd
)))
2308 __split_huge_pmd_locked(vma
, pmd
, range
.start
, freeze
);
2314 * No need to double call mmu_notifier->invalidate_range() callback.
2315 * They are 3 cases to consider inside __split_huge_pmd_locked():
2316 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2317 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2318 * fault will trigger a flush_notify before pointing to a new page
2319 * (it is fine if the secondary mmu keeps pointing to the old zero
2320 * page in the meantime)
2321 * 3) Split a huge pmd into pte pointing to the same page. No need
2322 * to invalidate secondary tlb entry they are all still valid.
2323 * any further changes to individual pte will notify. So no need
2324 * to call mmu_notifier->invalidate_range()
2326 mmu_notifier_invalidate_range_only_end(&range
);
2329 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2330 bool freeze
, struct folio
*folio
)
2332 pmd_t
*pmd
= mm_find_pmd(vma
->vm_mm
, address
);
2337 __split_huge_pmd(vma
, pmd
, address
, freeze
, folio
);
2340 static inline void split_huge_pmd_if_needed(struct vm_area_struct
*vma
, unsigned long address
)
2343 * If the new address isn't hpage aligned and it could previously
2344 * contain an hugepage: check if we need to split an huge pmd.
2346 if (!IS_ALIGNED(address
, HPAGE_PMD_SIZE
) &&
2347 range_in_vma(vma
, ALIGN_DOWN(address
, HPAGE_PMD_SIZE
),
2348 ALIGN(address
, HPAGE_PMD_SIZE
)))
2349 split_huge_pmd_address(vma
, address
, false, NULL
);
2352 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2353 unsigned long start
,
2357 /* Check if we need to split start first. */
2358 split_huge_pmd_if_needed(vma
, start
);
2360 /* Check if we need to split end next. */
2361 split_huge_pmd_if_needed(vma
, end
);
2364 * If we're also updating the next vma vm_start,
2365 * check if we need to split it.
2367 if (adjust_next
> 0) {
2368 struct vm_area_struct
*next
= find_vma(vma
->vm_mm
, vma
->vm_end
);
2369 unsigned long nstart
= next
->vm_start
;
2370 nstart
+= adjust_next
;
2371 split_huge_pmd_if_needed(next
, nstart
);
2375 static void unmap_folio(struct folio
*folio
)
2377 enum ttu_flags ttu_flags
= TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
|
2380 VM_BUG_ON_FOLIO(!folio_test_large(folio
), folio
);
2383 * Anon pages need migration entries to preserve them, but file
2384 * pages can simply be left unmapped, then faulted back on demand.
2385 * If that is ever changed (perhaps for mlock), update remap_page().
2387 if (folio_test_anon(folio
))
2388 try_to_migrate(folio
, ttu_flags
);
2390 try_to_unmap(folio
, ttu_flags
| TTU_IGNORE_MLOCK
);
2393 static void remap_page(struct folio
*folio
, unsigned long nr
)
2397 /* If unmap_folio() uses try_to_migrate() on file, remove this check */
2398 if (!folio_test_anon(folio
))
2401 remove_migration_ptes(folio
, folio
, true);
2402 i
+= folio_nr_pages(folio
);
2405 folio
= folio_next(folio
);
2409 static void lru_add_page_tail(struct page
*head
, struct page
*tail
,
2410 struct lruvec
*lruvec
, struct list_head
*list
)
2412 VM_BUG_ON_PAGE(!PageHead(head
), head
);
2413 VM_BUG_ON_PAGE(PageCompound(tail
), head
);
2414 VM_BUG_ON_PAGE(PageLRU(tail
), head
);
2415 lockdep_assert_held(&lruvec
->lru_lock
);
2418 /* page reclaim is reclaiming a huge page */
2419 VM_WARN_ON(PageLRU(head
));
2421 list_add_tail(&tail
->lru
, list
);
2423 /* head is still on lru (and we have it frozen) */
2424 VM_WARN_ON(!PageLRU(head
));
2425 if (PageUnevictable(tail
))
2426 tail
->mlock_count
= 0;
2428 list_add_tail(&tail
->lru
, &head
->lru
);
2433 static void __split_huge_page_tail(struct page
*head
, int tail
,
2434 struct lruvec
*lruvec
, struct list_head
*list
)
2436 struct page
*page_tail
= head
+ tail
;
2438 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2441 * Clone page flags before unfreezing refcount.
2443 * After successful get_page_unless_zero() might follow flags change,
2444 * for example lock_page() which set PG_waiters.
2446 * Note that for mapped sub-pages of an anonymous THP,
2447 * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
2448 * the migration entry instead from where remap_page() will restore it.
2449 * We can still have PG_anon_exclusive set on effectively unmapped and
2450 * unreferenced sub-pages of an anonymous THP: we can simply drop
2451 * PG_anon_exclusive (-> PG_mappedtodisk) for these here.
2453 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2454 page_tail
->flags
|= (head
->flags
&
2455 ((1L << PG_referenced
) |
2456 (1L << PG_swapbacked
) |
2457 (1L << PG_swapcache
) |
2458 (1L << PG_mlocked
) |
2459 (1L << PG_uptodate
) |
2461 (1L << PG_workingset
) |
2463 (1L << PG_unevictable
) |
2464 #ifdef CONFIG_ARCH_USES_PG_ARCH_X
2469 LRU_GEN_MASK
| LRU_REFS_MASK
));
2471 /* ->mapping in first and second tail page is replaced by other uses */
2472 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2474 page_tail
->mapping
= head
->mapping
;
2475 page_tail
->index
= head
->index
+ tail
;
2478 * page->private should not be set in tail pages with the exception
2479 * of swap cache pages that store the swp_entry_t in tail pages.
2480 * Fix up and warn once if private is unexpectedly set.
2482 * What of 32-bit systems, on which folio->_pincount overlays
2483 * head[1].private? No problem: THP_SWAP is not enabled on 32-bit, and
2484 * pincount must be 0 for folio_ref_freeze() to have succeeded.
2486 if (!folio_test_swapcache(page_folio(head
))) {
2487 VM_WARN_ON_ONCE_PAGE(page_tail
->private != 0, page_tail
);
2488 page_tail
->private = 0;
2491 /* Page flags must be visible before we make the page non-compound. */
2495 * Clear PageTail before unfreezing page refcount.
2497 * After successful get_page_unless_zero() might follow put_page()
2498 * which needs correct compound_head().
2500 clear_compound_head(page_tail
);
2502 /* Finally unfreeze refcount. Additional reference from page cache. */
2503 page_ref_unfreeze(page_tail
, 1 + (!PageAnon(head
) ||
2504 PageSwapCache(head
)));
2506 if (page_is_young(head
))
2507 set_page_young(page_tail
);
2508 if (page_is_idle(head
))
2509 set_page_idle(page_tail
);
2511 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2514 * always add to the tail because some iterators expect new
2515 * pages to show after the currently processed elements - e.g.
2518 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2521 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2524 struct folio
*folio
= page_folio(page
);
2525 struct page
*head
= &folio
->page
;
2526 struct lruvec
*lruvec
;
2527 struct address_space
*swap_cache
= NULL
;
2528 unsigned long offset
= 0;
2529 unsigned int nr
= thp_nr_pages(head
);
2532 /* complete memcg works before add pages to LRU */
2533 split_page_memcg(head
, nr
);
2535 if (PageAnon(head
) && PageSwapCache(head
)) {
2536 swp_entry_t entry
= { .val
= page_private(head
) };
2538 offset
= swp_offset(entry
);
2539 swap_cache
= swap_address_space(entry
);
2540 xa_lock(&swap_cache
->i_pages
);
2543 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2544 lruvec
= folio_lruvec_lock(folio
);
2546 ClearPageHasHWPoisoned(head
);
2548 for (i
= nr
- 1; i
>= 1; i
--) {
2549 __split_huge_page_tail(head
, i
, lruvec
, list
);
2550 /* Some pages can be beyond EOF: drop them from page cache */
2551 if (head
[i
].index
>= end
) {
2552 struct folio
*tail
= page_folio(head
+ i
);
2554 if (shmem_mapping(head
->mapping
))
2555 shmem_uncharge(head
->mapping
->host
, 1);
2556 else if (folio_test_clear_dirty(tail
))
2557 folio_account_cleaned(tail
,
2558 inode_to_wb(folio
->mapping
->host
));
2559 __filemap_remove_folio(tail
, NULL
);
2561 } else if (!PageAnon(page
)) {
2562 __xa_store(&head
->mapping
->i_pages
, head
[i
].index
,
2564 } else if (swap_cache
) {
2565 __xa_store(&swap_cache
->i_pages
, offset
+ i
,
2570 ClearPageCompound(head
);
2571 unlock_page_lruvec(lruvec
);
2572 /* Caller disabled irqs, so they are still disabled here */
2574 split_page_owner(head
, nr
);
2576 /* See comment in __split_huge_page_tail() */
2577 if (PageAnon(head
)) {
2578 /* Additional pin to swap cache */
2579 if (PageSwapCache(head
)) {
2580 page_ref_add(head
, 2);
2581 xa_unlock(&swap_cache
->i_pages
);
2586 /* Additional pin to page cache */
2587 page_ref_add(head
, 2);
2588 xa_unlock(&head
->mapping
->i_pages
);
2592 remap_page(folio
, nr
);
2594 if (PageSwapCache(head
)) {
2595 swp_entry_t entry
= { .val
= page_private(head
) };
2597 split_swap_cluster(entry
);
2600 for (i
= 0; i
< nr
; i
++) {
2601 struct page
*subpage
= head
+ i
;
2602 if (subpage
== page
)
2604 unlock_page(subpage
);
2607 * Subpages may be freed if there wasn't any mapping
2608 * like if add_to_swap() is running on a lru page that
2609 * had its mapping zapped. And freeing these pages
2610 * requires taking the lru_lock so we do the put_page
2611 * of the tail pages after the split is complete.
2613 free_page_and_swap_cache(subpage
);
2617 /* Racy check whether the huge page can be split */
2618 bool can_split_folio(struct folio
*folio
, int *pextra_pins
)
2622 /* Additional pins from page cache */
2623 if (folio_test_anon(folio
))
2624 extra_pins
= folio_test_swapcache(folio
) ?
2625 folio_nr_pages(folio
) : 0;
2627 extra_pins
= folio_nr_pages(folio
);
2629 *pextra_pins
= extra_pins
;
2630 return folio_mapcount(folio
) == folio_ref_count(folio
) - extra_pins
- 1;
2634 * This function splits huge page into normal pages. @page can point to any
2635 * subpage of huge page to split. Split doesn't change the position of @page.
2637 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2638 * The huge page must be locked.
2640 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2642 * Both head page and tail pages will inherit mapping, flags, and so on from
2645 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2646 * they are not mapped.
2648 * Returns 0 if the hugepage is split successfully.
2649 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2652 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2654 struct folio
*folio
= page_folio(page
);
2655 struct deferred_split
*ds_queue
= get_deferred_split_queue(&folio
->page
);
2656 XA_STATE(xas
, &folio
->mapping
->i_pages
, folio
->index
);
2657 struct anon_vma
*anon_vma
= NULL
;
2658 struct address_space
*mapping
= NULL
;
2659 int extra_pins
, ret
;
2663 VM_BUG_ON_FOLIO(!folio_test_locked(folio
), folio
);
2664 VM_BUG_ON_FOLIO(!folio_test_large(folio
), folio
);
2666 is_hzp
= is_huge_zero_page(&folio
->page
);
2667 VM_WARN_ON_ONCE_FOLIO(is_hzp
, folio
);
2671 if (folio_test_writeback(folio
))
2674 if (folio_test_anon(folio
)) {
2676 * The caller does not necessarily hold an mmap_lock that would
2677 * prevent the anon_vma disappearing so we first we take a
2678 * reference to it and then lock the anon_vma for write. This
2679 * is similar to folio_lock_anon_vma_read except the write lock
2680 * is taken to serialise against parallel split or collapse
2683 anon_vma
= folio_get_anon_vma(folio
);
2690 anon_vma_lock_write(anon_vma
);
2694 mapping
= folio
->mapping
;
2702 gfp
= current_gfp_context(mapping_gfp_mask(mapping
) &
2705 if (folio_test_private(folio
) &&
2706 !filemap_release_folio(folio
, gfp
)) {
2711 xas_split_alloc(&xas
, folio
, folio_order(folio
), gfp
);
2712 if (xas_error(&xas
)) {
2713 ret
= xas_error(&xas
);
2718 i_mmap_lock_read(mapping
);
2721 *__split_huge_page() may need to trim off pages beyond EOF:
2722 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2723 * which cannot be nested inside the page tree lock. So note
2724 * end now: i_size itself may be changed at any moment, but
2725 * folio lock is good enough to serialize the trimming.
2727 end
= DIV_ROUND_UP(i_size_read(mapping
->host
), PAGE_SIZE
);
2728 if (shmem_mapping(mapping
))
2729 end
= shmem_fallocend(mapping
->host
, end
);
2733 * Racy check if we can split the page, before unmap_folio() will
2736 if (!can_split_folio(folio
, &extra_pins
)) {
2743 /* block interrupt reentry in xa_lock and spinlock */
2744 local_irq_disable();
2747 * Check if the folio is present in page cache.
2748 * We assume all tail are present too, if folio is there.
2752 if (xas_load(&xas
) != folio
)
2756 /* Prevent deferred_split_scan() touching ->_refcount */
2757 spin_lock(&ds_queue
->split_queue_lock
);
2758 if (folio_ref_freeze(folio
, 1 + extra_pins
)) {
2759 if (!list_empty(&folio
->_deferred_list
)) {
2760 ds_queue
->split_queue_len
--;
2761 list_del(&folio
->_deferred_list
);
2763 spin_unlock(&ds_queue
->split_queue_lock
);
2765 int nr
= folio_nr_pages(folio
);
2767 xas_split(&xas
, folio
, folio_order(folio
));
2768 if (folio_test_swapbacked(folio
)) {
2769 __lruvec_stat_mod_folio(folio
, NR_SHMEM_THPS
,
2772 __lruvec_stat_mod_folio(folio
, NR_FILE_THPS
,
2774 filemap_nr_thps_dec(mapping
);
2778 __split_huge_page(page
, list
, end
);
2781 spin_unlock(&ds_queue
->split_queue_lock
);
2786 remap_page(folio
, folio_nr_pages(folio
));
2792 anon_vma_unlock_write(anon_vma
);
2793 put_anon_vma(anon_vma
);
2796 i_mmap_unlock_read(mapping
);
2799 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2803 void free_transhuge_page(struct page
*page
)
2805 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2806 unsigned long flags
;
2808 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2809 if (!list_empty(page_deferred_list(page
))) {
2810 ds_queue
->split_queue_len
--;
2811 list_del(page_deferred_list(page
));
2813 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2814 free_compound_page(page
);
2817 void deferred_split_huge_page(struct page
*page
)
2819 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2821 struct mem_cgroup
*memcg
= page_memcg(compound_head(page
));
2823 unsigned long flags
;
2825 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2828 * The try_to_unmap() in page reclaim path might reach here too,
2829 * this may cause a race condition to corrupt deferred split queue.
2830 * And, if page reclaim is already handling the same page, it is
2831 * unnecessary to handle it again in shrinker.
2833 * Check PageSwapCache to determine if the page is being
2834 * handled by page reclaim since THP swap would add the page into
2835 * swap cache before calling try_to_unmap().
2837 if (PageSwapCache(page
))
2840 if (!list_empty(page_deferred_list(page
)))
2843 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2844 if (list_empty(page_deferred_list(page
))) {
2845 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2846 list_add_tail(page_deferred_list(page
), &ds_queue
->split_queue
);
2847 ds_queue
->split_queue_len
++;
2850 set_shrinker_bit(memcg
, page_to_nid(page
),
2851 deferred_split_shrinker
.id
);
2854 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2857 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2858 struct shrink_control
*sc
)
2860 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2861 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2865 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2867 return READ_ONCE(ds_queue
->split_queue_len
);
2870 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2871 struct shrink_control
*sc
)
2873 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2874 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2875 unsigned long flags
;
2877 struct folio
*folio
, *next
;
2882 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2885 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2886 /* Take pin on all head pages to avoid freeing them under us */
2887 list_for_each_entry_safe(folio
, next
, &ds_queue
->split_queue
,
2889 if (folio_try_get(folio
)) {
2890 list_move(&folio
->_deferred_list
, &list
);
2892 /* We lost race with folio_put() */
2893 list_del_init(&folio
->_deferred_list
);
2894 ds_queue
->split_queue_len
--;
2896 if (!--sc
->nr_to_scan
)
2899 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2901 list_for_each_entry_safe(folio
, next
, &list
, _deferred_list
) {
2902 if (!folio_trylock(folio
))
2904 /* split_huge_page() removes page from list on success */
2905 if (!split_folio(folio
))
2907 folio_unlock(folio
);
2912 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2913 list_splice_tail(&list
, &ds_queue
->split_queue
);
2914 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2917 * Stop shrinker if we didn't split any page, but the queue is empty.
2918 * This can happen if pages were freed under us.
2920 if (!split
&& list_empty(&ds_queue
->split_queue
))
2925 static struct shrinker deferred_split_shrinker
= {
2926 .count_objects
= deferred_split_count
,
2927 .scan_objects
= deferred_split_scan
,
2928 .seeks
= DEFAULT_SEEKS
,
2929 .flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
|
2933 #ifdef CONFIG_DEBUG_FS
2934 static void split_huge_pages_all(void)
2938 struct folio
*folio
;
2939 unsigned long pfn
, max_zone_pfn
;
2940 unsigned long total
= 0, split
= 0;
2942 pr_debug("Split all THPs\n");
2943 for_each_zone(zone
) {
2944 if (!managed_zone(zone
))
2946 max_zone_pfn
= zone_end_pfn(zone
);
2947 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2950 page
= pfn_to_online_page(pfn
);
2951 if (!page
|| PageTail(page
))
2953 folio
= page_folio(page
);
2954 if (!folio_try_get(folio
))
2957 if (unlikely(page_folio(page
) != folio
))
2960 if (zone
!= folio_zone(folio
))
2963 if (!folio_test_large(folio
)
2964 || folio_test_hugetlb(folio
)
2965 || !folio_test_lru(folio
))
2970 nr_pages
= folio_nr_pages(folio
);
2971 if (!split_folio(folio
))
2973 pfn
+= nr_pages
- 1;
2974 folio_unlock(folio
);
2981 pr_debug("%lu of %lu THP split\n", split
, total
);
2984 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct
*vma
)
2986 return vma_is_special_huge(vma
) || (vma
->vm_flags
& VM_IO
) ||
2987 is_vm_hugetlb_page(vma
);
2990 static int split_huge_pages_pid(int pid
, unsigned long vaddr_start
,
2991 unsigned long vaddr_end
)
2994 struct task_struct
*task
;
2995 struct mm_struct
*mm
;
2996 unsigned long total
= 0, split
= 0;
2999 vaddr_start
&= PAGE_MASK
;
3000 vaddr_end
&= PAGE_MASK
;
3002 /* Find the task_struct from pid */
3004 task
= find_task_by_vpid(pid
);
3010 get_task_struct(task
);
3013 /* Find the mm_struct */
3014 mm
= get_task_mm(task
);
3015 put_task_struct(task
);
3022 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
3023 pid
, vaddr_start
, vaddr_end
);
3027 * always increase addr by PAGE_SIZE, since we could have a PTE page
3028 * table filled with PTE-mapped THPs, each of which is distinct.
3030 for (addr
= vaddr_start
; addr
< vaddr_end
; addr
+= PAGE_SIZE
) {
3031 struct vm_area_struct
*vma
= vma_lookup(mm
, addr
);
3037 /* skip special VMA and hugetlb VMA */
3038 if (vma_not_suitable_for_thp_split(vma
)) {
3043 /* FOLL_DUMP to ignore special (like zero) pages */
3044 page
= follow_page(vma
, addr
, FOLL_GET
| FOLL_DUMP
);
3046 if (IS_ERR_OR_NULL(page
))
3049 if (!is_transparent_hugepage(page
))
3053 if (!can_split_folio(page_folio(page
), NULL
))
3056 if (!trylock_page(page
))
3059 if (!split_huge_page(page
))
3067 mmap_read_unlock(mm
);
3070 pr_debug("%lu of %lu THP split\n", split
, total
);
3076 static int split_huge_pages_in_file(const char *file_path
, pgoff_t off_start
,
3079 struct filename
*file
;
3080 struct file
*candidate
;
3081 struct address_space
*mapping
;
3085 unsigned long total
= 0, split
= 0;
3087 file
= getname_kernel(file_path
);
3091 candidate
= file_open_name(file
, O_RDONLY
, 0);
3092 if (IS_ERR(candidate
))
3095 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3096 file_path
, off_start
, off_end
);
3098 mapping
= candidate
->f_mapping
;
3100 for (index
= off_start
; index
< off_end
; index
+= nr_pages
) {
3101 struct folio
*folio
= __filemap_get_folio(mapping
, index
,
3105 if (xa_is_value(folio
) || !folio
)
3108 if (!folio_test_large(folio
))
3112 nr_pages
= folio_nr_pages(folio
);
3114 if (!folio_trylock(folio
))
3117 if (!split_folio(folio
))
3120 folio_unlock(folio
);
3126 filp_close(candidate
, NULL
);
3129 pr_debug("%lu of %lu file-backed THP split\n", split
, total
);
3135 #define MAX_INPUT_BUF_SZ 255
3137 static ssize_t
split_huge_pages_write(struct file
*file
, const char __user
*buf
,
3138 size_t count
, loff_t
*ppops
)
3140 static DEFINE_MUTEX(split_debug_mutex
);
3142 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3143 char input_buf
[MAX_INPUT_BUF_SZ
];
3145 unsigned long vaddr_start
, vaddr_end
;
3147 ret
= mutex_lock_interruptible(&split_debug_mutex
);
3153 memset(input_buf
, 0, MAX_INPUT_BUF_SZ
);
3154 if (copy_from_user(input_buf
, buf
, min_t(size_t, count
, MAX_INPUT_BUF_SZ
)))
3157 input_buf
[MAX_INPUT_BUF_SZ
- 1] = '\0';
3159 if (input_buf
[0] == '/') {
3161 char *buf
= input_buf
;
3162 char file_path
[MAX_INPUT_BUF_SZ
];
3163 pgoff_t off_start
= 0, off_end
= 0;
3164 size_t input_len
= strlen(input_buf
);
3166 tok
= strsep(&buf
, ",");
3168 strcpy(file_path
, tok
);
3174 ret
= sscanf(buf
, "0x%lx,0x%lx", &off_start
, &off_end
);
3179 ret
= split_huge_pages_in_file(file_path
, off_start
, off_end
);
3186 ret
= sscanf(input_buf
, "%d,0x%lx,0x%lx", &pid
, &vaddr_start
, &vaddr_end
);
3187 if (ret
== 1 && pid
== 1) {
3188 split_huge_pages_all();
3189 ret
= strlen(input_buf
);
3191 } else if (ret
!= 3) {
3196 ret
= split_huge_pages_pid(pid
, vaddr_start
, vaddr_end
);
3198 ret
= strlen(input_buf
);
3200 mutex_unlock(&split_debug_mutex
);
3205 static const struct file_operations split_huge_pages_fops
= {
3206 .owner
= THIS_MODULE
,
3207 .write
= split_huge_pages_write
,
3208 .llseek
= no_llseek
,
3211 static int __init
split_huge_pages_debugfs(void)
3213 debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
3214 &split_huge_pages_fops
);
3217 late_initcall(split_huge_pages_debugfs
);
3220 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3221 int set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
3224 struct vm_area_struct
*vma
= pvmw
->vma
;
3225 struct mm_struct
*mm
= vma
->vm_mm
;
3226 unsigned long address
= pvmw
->address
;
3227 bool anon_exclusive
;
3232 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3235 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
3236 pmdval
= pmdp_invalidate(vma
, address
, pvmw
->pmd
);
3238 /* See page_try_share_anon_rmap(): invalidate PMD first. */
3239 anon_exclusive
= PageAnon(page
) && PageAnonExclusive(page
);
3240 if (anon_exclusive
&& page_try_share_anon_rmap(page
)) {
3241 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdval
);
3245 if (pmd_dirty(pmdval
))
3246 set_page_dirty(page
);
3247 if (pmd_write(pmdval
))
3248 entry
= make_writable_migration_entry(page_to_pfn(page
));
3249 else if (anon_exclusive
)
3250 entry
= make_readable_exclusive_migration_entry(page_to_pfn(page
));
3252 entry
= make_readable_migration_entry(page_to_pfn(page
));
3253 if (pmd_young(pmdval
))
3254 entry
= make_migration_entry_young(entry
);
3255 if (pmd_dirty(pmdval
))
3256 entry
= make_migration_entry_dirty(entry
);
3257 pmdswp
= swp_entry_to_pmd(entry
);
3258 if (pmd_soft_dirty(pmdval
))
3259 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
3260 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
3261 page_remove_rmap(page
, vma
, true);
3263 trace_set_migration_pmd(address
, pmd_val(pmdswp
));
3268 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
3270 struct vm_area_struct
*vma
= pvmw
->vma
;
3271 struct mm_struct
*mm
= vma
->vm_mm
;
3272 unsigned long address
= pvmw
->address
;
3273 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
3277 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3280 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
3282 pmde
= mk_huge_pmd(new, READ_ONCE(vma
->vm_page_prot
));
3283 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
3284 pmde
= pmd_mksoft_dirty(pmde
);
3285 if (is_writable_migration_entry(entry
))
3286 pmde
= maybe_pmd_mkwrite(pmde
, vma
);
3287 if (pmd_swp_uffd_wp(*pvmw
->pmd
))
3288 pmde
= pmd_mkuffd_wp(pmde
);
3289 if (!is_migration_entry_young(entry
))
3290 pmde
= pmd_mkold(pmde
);
3291 /* NOTE: this may contain setting soft-dirty on some archs */
3292 if (PageDirty(new) && is_migration_entry_dirty(entry
))
3293 pmde
= pmd_mkdirty(pmde
);
3295 if (PageAnon(new)) {
3296 rmap_t rmap_flags
= RMAP_COMPOUND
;
3298 if (!is_readable_migration_entry(entry
))
3299 rmap_flags
|= RMAP_EXCLUSIVE
;
3301 page_add_anon_rmap(new, vma
, haddr
, rmap_flags
);
3303 page_add_file_rmap(new, vma
, true);
3305 VM_BUG_ON(pmd_write(pmde
) && PageAnon(new) && !PageAnonExclusive(new));
3306 set_pmd_at(mm
, haddr
, pvmw
->pmd
, pmde
);
3308 /* No need to invalidate - it was non-present before */
3309 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);
3310 trace_remove_migration_pmd(address
, pmd_val(pmde
));