1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/mm.h>
11 #include <linux/sched/coredump.h>
12 #include <linux/sched/numa_balancing.h>
13 #include <linux/highmem.h>
14 #include <linux/hugetlb.h>
15 #include <linux/mmu_notifier.h>
16 #include <linux/rmap.h>
17 #include <linux/swap.h>
18 #include <linux/shrinker.h>
19 #include <linux/mm_inline.h>
20 #include <linux/swapops.h>
21 #include <linux/dax.h>
22 #include <linux/khugepaged.h>
23 #include <linux/freezer.h>
24 #include <linux/pfn_t.h>
25 #include <linux/mman.h>
26 #include <linux/memremap.h>
27 #include <linux/pagemap.h>
28 #include <linux/debugfs.h>
29 #include <linux/migrate.h>
30 #include <linux/hashtable.h>
31 #include <linux/userfaultfd_k.h>
32 #include <linux/page_idle.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/oom.h>
35 #include <linux/numa.h>
36 #include <linux/page_owner.h>
39 #include <asm/pgalloc.h>
43 * By default, transparent hugepage support is disabled in order to avoid
44 * risking an increased memory footprint for applications that are not
45 * guaranteed to benefit from it. When transparent hugepage support is
46 * enabled, it is for all mappings, and khugepaged scans all mappings.
47 * Defrag is invoked by khugepaged hugepage allocations and by page faults
48 * for all hugepage allocations.
50 unsigned long transparent_hugepage_flags __read_mostly
=
51 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
52 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
54 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
55 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
58 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
59 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
61 static struct shrinker deferred_split_shrinker
;
63 static atomic_t huge_zero_refcount
;
64 struct page
*huge_zero_page __read_mostly
;
65 unsigned long huge_zero_pfn __read_mostly
= ~0UL;
67 static inline bool file_thp_enabled(struct vm_area_struct
*vma
)
69 return transhuge_vma_enabled(vma
, vma
->vm_flags
) && vma
->vm_file
&&
70 !inode_is_open_for_write(vma
->vm_file
->f_inode
) &&
71 (vma
->vm_flags
& VM_EXEC
);
74 bool transparent_hugepage_active(struct vm_area_struct
*vma
)
76 /* The addr is used to check if the vma size fits */
77 unsigned long addr
= (vma
->vm_end
& HPAGE_PMD_MASK
) - HPAGE_PMD_SIZE
;
79 if (!transhuge_vma_suitable(vma
, addr
))
81 if (vma_is_anonymous(vma
))
82 return __transparent_hugepage_enabled(vma
);
83 if (vma_is_shmem(vma
))
84 return shmem_huge_enabled(vma
);
85 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS
))
86 return file_thp_enabled(vma
);
91 static bool get_huge_zero_page(void)
93 struct page
*zero_page
;
95 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
98 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
101 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
104 count_vm_event(THP_ZERO_PAGE_ALLOC
);
106 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
108 __free_pages(zero_page
, compound_order(zero_page
));
111 WRITE_ONCE(huge_zero_pfn
, page_to_pfn(zero_page
));
113 /* We take additional reference here. It will be put back by shrinker */
114 atomic_set(&huge_zero_refcount
, 2);
119 static void put_huge_zero_page(void)
122 * Counter should never go to zero here. Only shrinker can put
125 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
128 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
130 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
131 return READ_ONCE(huge_zero_page
);
133 if (!get_huge_zero_page())
136 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
137 put_huge_zero_page();
139 return READ_ONCE(huge_zero_page
);
142 void mm_put_huge_zero_page(struct mm_struct
*mm
)
144 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
145 put_huge_zero_page();
148 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
149 struct shrink_control
*sc
)
151 /* we can free zero page only if last reference remains */
152 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
155 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
156 struct shrink_control
*sc
)
158 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
159 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
160 BUG_ON(zero_page
== NULL
);
161 WRITE_ONCE(huge_zero_pfn
, ~0UL);
162 __free_pages(zero_page
, compound_order(zero_page
));
169 static struct shrinker huge_zero_page_shrinker
= {
170 .count_objects
= shrink_huge_zero_page_count
,
171 .scan_objects
= shrink_huge_zero_page_scan
,
172 .seeks
= DEFAULT_SEEKS
,
176 static ssize_t
enabled_show(struct kobject
*kobj
,
177 struct kobj_attribute
*attr
, char *buf
)
181 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
182 output
= "[always] madvise never";
183 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
184 &transparent_hugepage_flags
))
185 output
= "always [madvise] never";
187 output
= "always madvise [never]";
189 return sysfs_emit(buf
, "%s\n", output
);
192 static ssize_t
enabled_store(struct kobject
*kobj
,
193 struct kobj_attribute
*attr
,
194 const char *buf
, size_t count
)
198 if (sysfs_streq(buf
, "always")) {
199 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
200 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
201 } else if (sysfs_streq(buf
, "madvise")) {
202 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
203 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
204 } else if (sysfs_streq(buf
, "never")) {
205 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
206 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
211 int err
= start_stop_khugepaged();
217 static struct kobj_attribute enabled_attr
=
218 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
220 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
221 struct kobj_attribute
*attr
, char *buf
,
222 enum transparent_hugepage_flag flag
)
224 return sysfs_emit(buf
, "%d\n",
225 !!test_bit(flag
, &transparent_hugepage_flags
));
228 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
229 struct kobj_attribute
*attr
,
230 const char *buf
, size_t count
,
231 enum transparent_hugepage_flag flag
)
236 ret
= kstrtoul(buf
, 10, &value
);
243 set_bit(flag
, &transparent_hugepage_flags
);
245 clear_bit(flag
, &transparent_hugepage_flags
);
250 static ssize_t
defrag_show(struct kobject
*kobj
,
251 struct kobj_attribute
*attr
, char *buf
)
255 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
,
256 &transparent_hugepage_flags
))
257 output
= "[always] defer defer+madvise madvise never";
258 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
,
259 &transparent_hugepage_flags
))
260 output
= "always [defer] defer+madvise madvise never";
261 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
,
262 &transparent_hugepage_flags
))
263 output
= "always defer [defer+madvise] madvise never";
264 else if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
,
265 &transparent_hugepage_flags
))
266 output
= "always defer defer+madvise [madvise] never";
268 output
= "always defer defer+madvise madvise [never]";
270 return sysfs_emit(buf
, "%s\n", output
);
273 static ssize_t
defrag_store(struct kobject
*kobj
,
274 struct kobj_attribute
*attr
,
275 const char *buf
, size_t count
)
277 if (sysfs_streq(buf
, "always")) {
278 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
279 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
280 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
281 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
282 } else if (sysfs_streq(buf
, "defer+madvise")) {
283 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
284 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
285 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
286 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
287 } else if (sysfs_streq(buf
, "defer")) {
288 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
289 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
290 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
291 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
292 } else if (sysfs_streq(buf
, "madvise")) {
293 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
294 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
295 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
296 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
297 } else if (sysfs_streq(buf
, "never")) {
298 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
299 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
300 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
301 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
307 static struct kobj_attribute defrag_attr
=
308 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
310 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
311 struct kobj_attribute
*attr
, char *buf
)
313 return single_hugepage_flag_show(kobj
, attr
, buf
,
314 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
316 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
317 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
319 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
320 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
322 static struct kobj_attribute use_zero_page_attr
=
323 __ATTR(use_zero_page
, 0644, use_zero_page_show
, use_zero_page_store
);
325 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
326 struct kobj_attribute
*attr
, char *buf
)
328 return sysfs_emit(buf
, "%lu\n", HPAGE_PMD_SIZE
);
330 static struct kobj_attribute hpage_pmd_size_attr
=
331 __ATTR_RO(hpage_pmd_size
);
333 static struct attribute
*hugepage_attr
[] = {
336 &use_zero_page_attr
.attr
,
337 &hpage_pmd_size_attr
.attr
,
339 &shmem_enabled_attr
.attr
,
344 static const struct attribute_group hugepage_attr_group
= {
345 .attrs
= hugepage_attr
,
348 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
352 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
353 if (unlikely(!*hugepage_kobj
)) {
354 pr_err("failed to create transparent hugepage kobject\n");
358 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
360 pr_err("failed to register transparent hugepage group\n");
364 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
366 pr_err("failed to register transparent hugepage group\n");
367 goto remove_hp_group
;
373 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
375 kobject_put(*hugepage_kobj
);
379 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
381 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
382 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
383 kobject_put(hugepage_kobj
);
386 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
391 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
394 #endif /* CONFIG_SYSFS */
396 static int __init
hugepage_init(void)
399 struct kobject
*hugepage_kobj
;
401 if (!has_transparent_hugepage()) {
403 * Hardware doesn't support hugepages, hence disable
406 transparent_hugepage_flags
= 1 << TRANSPARENT_HUGEPAGE_NEVER_DAX
;
411 * hugepages can't be allocated by the buddy allocator
413 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
415 * we use page->mapping and page->index in second tail page
416 * as list_head: assuming THP order >= 2
418 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
420 err
= hugepage_init_sysfs(&hugepage_kobj
);
424 err
= khugepaged_init();
428 err
= register_shrinker(&huge_zero_page_shrinker
);
430 goto err_hzp_shrinker
;
431 err
= register_shrinker(&deferred_split_shrinker
);
433 goto err_split_shrinker
;
436 * By default disable transparent hugepages on smaller systems,
437 * where the extra memory used could hurt more than TLB overhead
438 * is likely to save. The admin can still enable it through /sys.
440 if (totalram_pages() < (512 << (20 - PAGE_SHIFT
))) {
441 transparent_hugepage_flags
= 0;
445 err
= start_stop_khugepaged();
451 unregister_shrinker(&deferred_split_shrinker
);
453 unregister_shrinker(&huge_zero_page_shrinker
);
455 khugepaged_destroy();
457 hugepage_exit_sysfs(hugepage_kobj
);
461 subsys_initcall(hugepage_init
);
463 static int __init
setup_transparent_hugepage(char *str
)
468 if (!strcmp(str
, "always")) {
469 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
470 &transparent_hugepage_flags
);
471 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
472 &transparent_hugepage_flags
);
474 } else if (!strcmp(str
, "madvise")) {
475 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
476 &transparent_hugepage_flags
);
477 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
478 &transparent_hugepage_flags
);
480 } else if (!strcmp(str
, "never")) {
481 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
482 &transparent_hugepage_flags
);
483 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
484 &transparent_hugepage_flags
);
489 pr_warn("transparent_hugepage= cannot parse, ignored\n");
492 __setup("transparent_hugepage=", setup_transparent_hugepage
);
494 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
496 if (likely(vma
->vm_flags
& VM_WRITE
))
497 pmd
= pmd_mkwrite(pmd
);
502 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
504 struct mem_cgroup
*memcg
= page_memcg(compound_head(page
));
505 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
508 return &memcg
->deferred_split_queue
;
510 return &pgdat
->deferred_split_queue
;
513 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
515 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
517 return &pgdat
->deferred_split_queue
;
521 void prep_transhuge_page(struct page
*page
)
524 * we use page->mapping and page->indexlru in second tail page
525 * as list_head: assuming THP order >= 2
528 INIT_LIST_HEAD(page_deferred_list(page
));
529 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
532 bool is_transparent_hugepage(struct page
*page
)
534 if (!PageCompound(page
))
537 page
= compound_head(page
);
538 return is_huge_zero_page(page
) ||
539 page
[1].compound_dtor
== TRANSHUGE_PAGE_DTOR
;
541 EXPORT_SYMBOL_GPL(is_transparent_hugepage
);
543 static unsigned long __thp_get_unmapped_area(struct file
*filp
,
544 unsigned long addr
, unsigned long len
,
545 loff_t off
, unsigned long flags
, unsigned long size
)
547 loff_t off_end
= off
+ len
;
548 loff_t off_align
= round_up(off
, size
);
549 unsigned long len_pad
, ret
;
551 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
554 len_pad
= len
+ size
;
555 if (len_pad
< len
|| (off
+ len_pad
) < off
)
558 ret
= current
->mm
->get_unmapped_area(filp
, addr
, len_pad
,
559 off
>> PAGE_SHIFT
, flags
);
562 * The failure might be due to length padding. The caller will retry
563 * without the padding.
565 if (IS_ERR_VALUE(ret
))
569 * Do not try to align to THP boundary if allocation at the address
575 ret
+= (off
- ret
) & (size
- 1);
579 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
580 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
583 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
585 if (!IS_DAX(filp
->f_mapping
->host
) || !IS_ENABLED(CONFIG_FS_DAX_PMD
))
588 ret
= __thp_get_unmapped_area(filp
, addr
, len
, off
, flags
, PMD_SIZE
);
592 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
594 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
596 static vm_fault_t
__do_huge_pmd_anonymous_page(struct vm_fault
*vmf
,
597 struct page
*page
, gfp_t gfp
)
599 struct vm_area_struct
*vma
= vmf
->vma
;
601 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
604 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
606 if (mem_cgroup_charge(page
, vma
->vm_mm
, gfp
)) {
608 count_vm_event(THP_FAULT_FALLBACK
);
609 count_vm_event(THP_FAULT_FALLBACK_CHARGE
);
610 return VM_FAULT_FALLBACK
;
612 cgroup_throttle_swaprate(page
, gfp
);
614 pgtable
= pte_alloc_one(vma
->vm_mm
);
615 if (unlikely(!pgtable
)) {
620 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
622 * The memory barrier inside __SetPageUptodate makes sure that
623 * clear_huge_page writes become visible before the set_pmd_at()
626 __SetPageUptodate(page
);
628 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
629 if (unlikely(!pmd_none(*vmf
->pmd
))) {
634 ret
= check_stable_address_space(vma
->vm_mm
);
638 /* Deliver the page fault to userland */
639 if (userfaultfd_missing(vma
)) {
640 spin_unlock(vmf
->ptl
);
642 pte_free(vma
->vm_mm
, pgtable
);
643 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
644 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
648 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
649 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
650 page_add_new_anon_rmap(page
, vma
, haddr
, true);
651 lru_cache_add_inactive_or_unevictable(page
, vma
);
652 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
653 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
654 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
655 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
656 mm_inc_nr_ptes(vma
->vm_mm
);
657 spin_unlock(vmf
->ptl
);
658 count_vm_event(THP_FAULT_ALLOC
);
659 count_memcg_event_mm(vma
->vm_mm
, THP_FAULT_ALLOC
);
664 spin_unlock(vmf
->ptl
);
667 pte_free(vma
->vm_mm
, pgtable
);
674 * always: directly stall for all thp allocations
675 * defer: wake kswapd and fail if not immediately available
676 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
677 * fail if not immediately available
678 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
680 * never: never stall for any thp allocation
682 gfp_t
vma_thp_gfp_mask(struct vm_area_struct
*vma
)
684 const bool vma_madvised
= vma
&& (vma
->vm_flags
& VM_HUGEPAGE
);
686 /* Always do synchronous compaction */
687 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
688 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
690 /* Kick kcompactd and fail quickly */
691 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
692 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
694 /* Synchronous compaction if madvised, otherwise kick kcompactd */
695 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
696 return GFP_TRANSHUGE_LIGHT
|
697 (vma_madvised
? __GFP_DIRECT_RECLAIM
:
698 __GFP_KSWAPD_RECLAIM
);
700 /* Only do synchronous compaction if madvised */
701 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
702 return GFP_TRANSHUGE_LIGHT
|
703 (vma_madvised
? __GFP_DIRECT_RECLAIM
: 0);
705 return GFP_TRANSHUGE_LIGHT
;
708 /* Caller must hold page table lock. */
709 static void set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
710 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
711 struct page
*zero_page
)
716 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
717 entry
= pmd_mkhuge(entry
);
719 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
720 set_pmd_at(mm
, haddr
, pmd
, entry
);
724 vm_fault_t
do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
726 struct vm_area_struct
*vma
= vmf
->vma
;
729 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
731 if (!transhuge_vma_suitable(vma
, haddr
))
732 return VM_FAULT_FALLBACK
;
733 if (unlikely(anon_vma_prepare(vma
)))
735 if (unlikely(khugepaged_enter(vma
, vma
->vm_flags
)))
737 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
738 !mm_forbids_zeropage(vma
->vm_mm
) &&
739 transparent_hugepage_use_zero_page()) {
741 struct page
*zero_page
;
743 pgtable
= pte_alloc_one(vma
->vm_mm
);
744 if (unlikely(!pgtable
))
746 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
747 if (unlikely(!zero_page
)) {
748 pte_free(vma
->vm_mm
, pgtable
);
749 count_vm_event(THP_FAULT_FALLBACK
);
750 return VM_FAULT_FALLBACK
;
752 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
754 if (pmd_none(*vmf
->pmd
)) {
755 ret
= check_stable_address_space(vma
->vm_mm
);
757 spin_unlock(vmf
->ptl
);
758 pte_free(vma
->vm_mm
, pgtable
);
759 } else if (userfaultfd_missing(vma
)) {
760 spin_unlock(vmf
->ptl
);
761 pte_free(vma
->vm_mm
, pgtable
);
762 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
763 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
765 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
766 haddr
, vmf
->pmd
, zero_page
);
767 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
768 spin_unlock(vmf
->ptl
);
771 spin_unlock(vmf
->ptl
);
772 pte_free(vma
->vm_mm
, pgtable
);
776 gfp
= vma_thp_gfp_mask(vma
);
777 page
= alloc_hugepage_vma(gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
778 if (unlikely(!page
)) {
779 count_vm_event(THP_FAULT_FALLBACK
);
780 return VM_FAULT_FALLBACK
;
782 prep_transhuge_page(page
);
783 return __do_huge_pmd_anonymous_page(vmf
, page
, gfp
);
786 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
787 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
790 struct mm_struct
*mm
= vma
->vm_mm
;
794 ptl
= pmd_lock(mm
, pmd
);
795 if (!pmd_none(*pmd
)) {
797 if (pmd_pfn(*pmd
) != pfn_t_to_pfn(pfn
)) {
798 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd
));
801 entry
= pmd_mkyoung(*pmd
);
802 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
803 if (pmdp_set_access_flags(vma
, addr
, pmd
, entry
, 1))
804 update_mmu_cache_pmd(vma
, addr
, pmd
);
810 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
811 if (pfn_t_devmap(pfn
))
812 entry
= pmd_mkdevmap(entry
);
814 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
815 entry
= maybe_pmd_mkwrite(entry
, vma
);
819 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
824 set_pmd_at(mm
, addr
, pmd
, entry
);
825 update_mmu_cache_pmd(vma
, addr
, pmd
);
830 pte_free(mm
, pgtable
);
834 * vmf_insert_pfn_pmd_prot - insert a pmd size pfn
835 * @vmf: Structure describing the fault
836 * @pfn: pfn to insert
837 * @pgprot: page protection to use
838 * @write: whether it's a write fault
840 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info and
841 * also consult the vmf_insert_mixed_prot() documentation when
842 * @pgprot != @vmf->vma->vm_page_prot.
844 * Return: vm_fault_t value.
846 vm_fault_t
vmf_insert_pfn_pmd_prot(struct vm_fault
*vmf
, pfn_t pfn
,
847 pgprot_t pgprot
, bool write
)
849 unsigned long addr
= vmf
->address
& PMD_MASK
;
850 struct vm_area_struct
*vma
= vmf
->vma
;
851 pgtable_t pgtable
= NULL
;
854 * If we had pmd_special, we could avoid all these restrictions,
855 * but we need to be consistent with PTEs and architectures that
856 * can't support a 'special' bit.
858 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
860 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
861 (VM_PFNMAP
|VM_MIXEDMAP
));
862 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
864 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
865 return VM_FAULT_SIGBUS
;
867 if (arch_needs_pgtable_deposit()) {
868 pgtable
= pte_alloc_one(vma
->vm_mm
);
873 track_pfn_insert(vma
, &pgprot
, pfn
);
875 insert_pfn_pmd(vma
, addr
, vmf
->pmd
, pfn
, pgprot
, write
, pgtable
);
876 return VM_FAULT_NOPAGE
;
878 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd_prot
);
880 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
881 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
883 if (likely(vma
->vm_flags
& VM_WRITE
))
884 pud
= pud_mkwrite(pud
);
888 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
889 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
891 struct mm_struct
*mm
= vma
->vm_mm
;
895 ptl
= pud_lock(mm
, pud
);
896 if (!pud_none(*pud
)) {
898 if (pud_pfn(*pud
) != pfn_t_to_pfn(pfn
)) {
899 WARN_ON_ONCE(!is_huge_zero_pud(*pud
));
902 entry
= pud_mkyoung(*pud
);
903 entry
= maybe_pud_mkwrite(pud_mkdirty(entry
), vma
);
904 if (pudp_set_access_flags(vma
, addr
, pud
, entry
, 1))
905 update_mmu_cache_pud(vma
, addr
, pud
);
910 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
911 if (pfn_t_devmap(pfn
))
912 entry
= pud_mkdevmap(entry
);
914 entry
= pud_mkyoung(pud_mkdirty(entry
));
915 entry
= maybe_pud_mkwrite(entry
, vma
);
917 set_pud_at(mm
, addr
, pud
, entry
);
918 update_mmu_cache_pud(vma
, addr
, pud
);
925 * vmf_insert_pfn_pud_prot - insert a pud size pfn
926 * @vmf: Structure describing the fault
927 * @pfn: pfn to insert
928 * @pgprot: page protection to use
929 * @write: whether it's a write fault
931 * Insert a pud size pfn. See vmf_insert_pfn() for additional info and
932 * also consult the vmf_insert_mixed_prot() documentation when
933 * @pgprot != @vmf->vma->vm_page_prot.
935 * Return: vm_fault_t value.
937 vm_fault_t
vmf_insert_pfn_pud_prot(struct vm_fault
*vmf
, pfn_t pfn
,
938 pgprot_t pgprot
, bool write
)
940 unsigned long addr
= vmf
->address
& PUD_MASK
;
941 struct vm_area_struct
*vma
= vmf
->vma
;
944 * If we had pud_special, we could avoid all these restrictions,
945 * but we need to be consistent with PTEs and architectures that
946 * can't support a 'special' bit.
948 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
950 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
951 (VM_PFNMAP
|VM_MIXEDMAP
));
952 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
954 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
955 return VM_FAULT_SIGBUS
;
957 track_pfn_insert(vma
, &pgprot
, pfn
);
959 insert_pfn_pud(vma
, addr
, vmf
->pud
, pfn
, pgprot
, write
);
960 return VM_FAULT_NOPAGE
;
962 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud_prot
);
963 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
965 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
966 pmd_t
*pmd
, int flags
)
970 _pmd
= pmd_mkyoung(*pmd
);
971 if (flags
& FOLL_WRITE
)
972 _pmd
= pmd_mkdirty(_pmd
);
973 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
974 pmd
, _pmd
, flags
& FOLL_WRITE
))
975 update_mmu_cache_pmd(vma
, addr
, pmd
);
978 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
979 pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
)
981 unsigned long pfn
= pmd_pfn(*pmd
);
982 struct mm_struct
*mm
= vma
->vm_mm
;
985 assert_spin_locked(pmd_lockptr(mm
, pmd
));
988 * When we COW a devmap PMD entry, we split it into PTEs, so we should
989 * not be in this function with `flags & FOLL_COW` set.
991 WARN_ONCE(flags
& FOLL_COW
, "mm: In follow_devmap_pmd with FOLL_COW set");
993 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
994 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
995 (FOLL_PIN
| FOLL_GET
)))
998 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
1001 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
1006 if (flags
& FOLL_TOUCH
)
1007 touch_pmd(vma
, addr
, pmd
, flags
);
1010 * device mapped pages can only be returned if the
1011 * caller will manage the page reference count.
1013 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1014 return ERR_PTR(-EEXIST
);
1016 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
1017 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1019 return ERR_PTR(-EFAULT
);
1020 page
= pfn_to_page(pfn
);
1021 if (!try_grab_page(page
, flags
))
1022 page
= ERR_PTR(-ENOMEM
);
1027 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1028 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
1029 struct vm_area_struct
*dst_vma
, struct vm_area_struct
*src_vma
)
1031 spinlock_t
*dst_ptl
, *src_ptl
;
1032 struct page
*src_page
;
1034 pgtable_t pgtable
= NULL
;
1037 /* Skip if can be re-fill on fault */
1038 if (!vma_is_anonymous(dst_vma
))
1041 pgtable
= pte_alloc_one(dst_mm
);
1042 if (unlikely(!pgtable
))
1045 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
1046 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
1047 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1052 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1053 if (unlikely(is_swap_pmd(pmd
))) {
1054 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1056 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1057 if (is_writable_migration_entry(entry
)) {
1058 entry
= make_readable_migration_entry(
1060 pmd
= swp_entry_to_pmd(entry
);
1061 if (pmd_swp_soft_dirty(*src_pmd
))
1062 pmd
= pmd_swp_mksoft_dirty(pmd
);
1063 if (pmd_swp_uffd_wp(*src_pmd
))
1064 pmd
= pmd_swp_mkuffd_wp(pmd
);
1065 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
1067 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1068 mm_inc_nr_ptes(dst_mm
);
1069 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1070 if (!userfaultfd_wp(dst_vma
))
1071 pmd
= pmd_swp_clear_uffd_wp(pmd
);
1072 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1078 if (unlikely(!pmd_trans_huge(pmd
))) {
1079 pte_free(dst_mm
, pgtable
);
1083 * When page table lock is held, the huge zero pmd should not be
1084 * under splitting since we don't split the page itself, only pmd to
1087 if (is_huge_zero_pmd(pmd
)) {
1089 * get_huge_zero_page() will never allocate a new page here,
1090 * since we already have a zero page to copy. It just takes a
1093 mm_get_huge_zero_page(dst_mm
);
1097 src_page
= pmd_page(pmd
);
1098 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
1101 * If this page is a potentially pinned page, split and retry the fault
1102 * with smaller page size. Normally this should not happen because the
1103 * userspace should use MADV_DONTFORK upon pinned regions. This is a
1104 * best effort that the pinned pages won't be replaced by another
1105 * random page during the coming copy-on-write.
1107 if (unlikely(page_needs_cow_for_dma(src_vma
, src_page
))) {
1108 pte_free(dst_mm
, pgtable
);
1109 spin_unlock(src_ptl
);
1110 spin_unlock(dst_ptl
);
1111 __split_huge_pmd(src_vma
, src_pmd
, addr
, false, NULL
);
1116 page_dup_rmap(src_page
, true);
1117 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1119 mm_inc_nr_ptes(dst_mm
);
1120 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1121 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
1122 if (!userfaultfd_wp(dst_vma
))
1123 pmd
= pmd_clear_uffd_wp(pmd
);
1124 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
1125 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1129 spin_unlock(src_ptl
);
1130 spin_unlock(dst_ptl
);
1135 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1136 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1137 pud_t
*pud
, int flags
)
1141 _pud
= pud_mkyoung(*pud
);
1142 if (flags
& FOLL_WRITE
)
1143 _pud
= pud_mkdirty(_pud
);
1144 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1145 pud
, _pud
, flags
& FOLL_WRITE
))
1146 update_mmu_cache_pud(vma
, addr
, pud
);
1149 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1150 pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
)
1152 unsigned long pfn
= pud_pfn(*pud
);
1153 struct mm_struct
*mm
= vma
->vm_mm
;
1156 assert_spin_locked(pud_lockptr(mm
, pud
));
1158 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1161 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
1162 if (WARN_ON_ONCE((flags
& (FOLL_PIN
| FOLL_GET
)) ==
1163 (FOLL_PIN
| FOLL_GET
)))
1166 if (pud_present(*pud
) && pud_devmap(*pud
))
1171 if (flags
& FOLL_TOUCH
)
1172 touch_pud(vma
, addr
, pud
, flags
);
1175 * device mapped pages can only be returned if the
1176 * caller will manage the page reference count.
1178 * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here:
1180 if (!(flags
& (FOLL_GET
| FOLL_PIN
)))
1181 return ERR_PTR(-EEXIST
);
1183 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1184 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1186 return ERR_PTR(-EFAULT
);
1187 page
= pfn_to_page(pfn
);
1188 if (!try_grab_page(page
, flags
))
1189 page
= ERR_PTR(-ENOMEM
);
1194 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1195 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1196 struct vm_area_struct
*vma
)
1198 spinlock_t
*dst_ptl
, *src_ptl
;
1202 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1203 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1204 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1208 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1212 * When page table lock is held, the huge zero pud should not be
1213 * under splitting since we don't split the page itself, only pud to
1216 if (is_huge_zero_pud(pud
)) {
1217 /* No huge zero pud yet */
1220 /* Please refer to comments in copy_huge_pmd() */
1221 if (unlikely(page_needs_cow_for_dma(vma
, pud_page(pud
)))) {
1222 spin_unlock(src_ptl
);
1223 spin_unlock(dst_ptl
);
1224 __split_huge_pud(vma
, src_pud
, addr
);
1228 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1229 pud
= pud_mkold(pud_wrprotect(pud
));
1230 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1234 spin_unlock(src_ptl
);
1235 spin_unlock(dst_ptl
);
1239 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1242 unsigned long haddr
;
1243 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1245 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1246 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1249 entry
= pud_mkyoung(orig_pud
);
1251 entry
= pud_mkdirty(entry
);
1252 haddr
= vmf
->address
& HPAGE_PUD_MASK
;
1253 if (pudp_set_access_flags(vmf
->vma
, haddr
, vmf
->pud
, entry
, write
))
1254 update_mmu_cache_pud(vmf
->vma
, vmf
->address
, vmf
->pud
);
1257 spin_unlock(vmf
->ptl
);
1259 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1261 void huge_pmd_set_accessed(struct vm_fault
*vmf
)
1264 unsigned long haddr
;
1265 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1266 pmd_t orig_pmd
= vmf
->orig_pmd
;
1268 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1269 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1272 entry
= pmd_mkyoung(orig_pmd
);
1274 entry
= pmd_mkdirty(entry
);
1275 haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1276 if (pmdp_set_access_flags(vmf
->vma
, haddr
, vmf
->pmd
, entry
, write
))
1277 update_mmu_cache_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
);
1280 spin_unlock(vmf
->ptl
);
1283 vm_fault_t
do_huge_pmd_wp_page(struct vm_fault
*vmf
)
1285 struct vm_area_struct
*vma
= vmf
->vma
;
1287 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1288 pmd_t orig_pmd
= vmf
->orig_pmd
;
1290 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1291 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1293 if (is_huge_zero_pmd(orig_pmd
))
1296 spin_lock(vmf
->ptl
);
1298 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1299 spin_unlock(vmf
->ptl
);
1303 page
= pmd_page(orig_pmd
);
1304 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1306 /* Lock page for reuse_swap_page() */
1307 if (!trylock_page(page
)) {
1309 spin_unlock(vmf
->ptl
);
1311 spin_lock(vmf
->ptl
);
1312 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1313 spin_unlock(vmf
->ptl
);
1322 * We can only reuse the page if nobody else maps the huge page or it's
1325 if (reuse_swap_page(page
, NULL
)) {
1327 entry
= pmd_mkyoung(orig_pmd
);
1328 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1329 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1330 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1332 spin_unlock(vmf
->ptl
);
1333 return VM_FAULT_WRITE
;
1337 spin_unlock(vmf
->ptl
);
1339 __split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
, false, NULL
);
1340 return VM_FAULT_FALLBACK
;
1344 * FOLL_FORCE can write to even unwritable pmd's, but only
1345 * after we've gone through a COW cycle and they are dirty.
1347 static inline bool can_follow_write_pmd(pmd_t pmd
, unsigned int flags
)
1349 return pmd_write(pmd
) ||
1350 ((flags
& FOLL_FORCE
) && (flags
& FOLL_COW
) && pmd_dirty(pmd
));
1353 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1358 struct mm_struct
*mm
= vma
->vm_mm
;
1359 struct page
*page
= NULL
;
1361 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1363 if (flags
& FOLL_WRITE
&& !can_follow_write_pmd(*pmd
, flags
))
1366 /* Avoid dumping huge zero page */
1367 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1368 return ERR_PTR(-EFAULT
);
1370 /* Full NUMA hinting faults to serialise migration in fault paths */
1371 if ((flags
& FOLL_NUMA
) && pmd_protnone(*pmd
))
1374 page
= pmd_page(*pmd
);
1375 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1377 if (!try_grab_page(page
, flags
))
1378 return ERR_PTR(-ENOMEM
);
1380 if (flags
& FOLL_TOUCH
)
1381 touch_pmd(vma
, addr
, pmd
, flags
);
1383 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1385 * We don't mlock() pte-mapped THPs. This way we can avoid
1386 * leaking mlocked pages into non-VM_LOCKED VMAs.
1390 * In most cases the pmd is the only mapping of the page as we
1391 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1392 * writable private mappings in populate_vma_page_range().
1394 * The only scenario when we have the page shared here is if we
1395 * mlocking read-only mapping shared over fork(). We skip
1396 * mlocking such pages.
1400 * We can expect PageDoubleMap() to be stable under page lock:
1401 * for file pages we set it in page_add_file_rmap(), which
1402 * requires page to be locked.
1405 if (PageAnon(page
) && compound_mapcount(page
) != 1)
1407 if (PageDoubleMap(page
) || !page
->mapping
)
1409 if (!trylock_page(page
))
1411 if (page
->mapping
&& !PageDoubleMap(page
))
1412 mlock_vma_page(page
);
1416 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1417 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1423 /* NUMA hinting page fault entry point for trans huge pmds */
1424 vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
)
1426 struct vm_area_struct
*vma
= vmf
->vma
;
1427 pmd_t oldpmd
= vmf
->orig_pmd
;
1430 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1431 int page_nid
= NUMA_NO_NODE
;
1432 int target_nid
, last_cpupid
= -1;
1433 bool migrated
= false;
1434 bool was_writable
= pmd_savedwrite(oldpmd
);
1437 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1438 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1439 spin_unlock(vmf
->ptl
);
1444 * Since we took the NUMA fault, we must have observed the !accessible
1445 * bit. Make sure all other CPUs agree with that, to avoid them
1446 * modifying the page we're about to migrate.
1448 * Must be done under PTL such that we'll observe the relevant
1449 * inc_tlb_flush_pending().
1451 * We are not sure a pending tlb flush here is for a huge page
1452 * mapping or not. Hence use the tlb range variant
1454 if (mm_tlb_flush_pending(vma
->vm_mm
)) {
1455 flush_tlb_range(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
1457 * change_huge_pmd() released the pmd lock before
1458 * invalidating the secondary MMUs sharing the primary
1459 * MMU pagetables (with ->invalidate_range()). The
1460 * mmu_notifier_invalidate_range_end() (which
1461 * internally calls ->invalidate_range()) in
1462 * change_pmd_range() will run after us, so we can't
1463 * rely on it here and we need an explicit invalidate.
1465 mmu_notifier_invalidate_range(vma
->vm_mm
, haddr
,
1466 haddr
+ HPAGE_PMD_SIZE
);
1469 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1470 page
= vm_normal_page_pmd(vma
, haddr
, pmd
);
1474 /* See similar comment in do_numa_page for explanation */
1476 flags
|= TNF_NO_GROUP
;
1478 page_nid
= page_to_nid(page
);
1479 last_cpupid
= page_cpupid_last(page
);
1480 target_nid
= numa_migrate_prep(page
, vma
, haddr
, page_nid
,
1483 if (target_nid
== NUMA_NO_NODE
) {
1488 spin_unlock(vmf
->ptl
);
1490 migrated
= migrate_misplaced_page(page
, vma
, target_nid
);
1492 flags
|= TNF_MIGRATED
;
1493 page_nid
= target_nid
;
1495 flags
|= TNF_MIGRATE_FAIL
;
1496 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1497 if (unlikely(!pmd_same(oldpmd
, *vmf
->pmd
))) {
1498 spin_unlock(vmf
->ptl
);
1505 if (page_nid
!= NUMA_NO_NODE
)
1506 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1512 /* Restore the PMD */
1513 pmd
= pmd_modify(oldpmd
, vma
->vm_page_prot
);
1514 pmd
= pmd_mkyoung(pmd
);
1516 pmd
= pmd_mkwrite(pmd
);
1517 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1518 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1519 spin_unlock(vmf
->ptl
);
1524 * Return true if we do MADV_FREE successfully on entire pmd page.
1525 * Otherwise, return false.
1527 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1528 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1533 struct mm_struct
*mm
= tlb
->mm
;
1536 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1538 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1543 if (is_huge_zero_pmd(orig_pmd
))
1546 if (unlikely(!pmd_present(orig_pmd
))) {
1547 VM_BUG_ON(thp_migration_supported() &&
1548 !is_pmd_migration_entry(orig_pmd
));
1552 page
= pmd_page(orig_pmd
);
1554 * If other processes are mapping this page, we couldn't discard
1555 * the page unless they all do MADV_FREE so let's skip the page.
1557 if (total_mapcount(page
) != 1)
1560 if (!trylock_page(page
))
1564 * If user want to discard part-pages of THP, split it so MADV_FREE
1565 * will deactivate only them.
1567 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1570 split_huge_page(page
);
1576 if (PageDirty(page
))
1577 ClearPageDirty(page
);
1580 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1581 pmdp_invalidate(vma
, addr
, pmd
);
1582 orig_pmd
= pmd_mkold(orig_pmd
);
1583 orig_pmd
= pmd_mkclean(orig_pmd
);
1585 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1586 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1589 mark_page_lazyfree(page
);
1597 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1601 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1602 pte_free(mm
, pgtable
);
1606 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1607 pmd_t
*pmd
, unsigned long addr
)
1612 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1614 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1618 * For architectures like ppc64 we look at deposited pgtable
1619 * when calling pmdp_huge_get_and_clear. So do the
1620 * pgtable_trans_huge_withdraw after finishing pmdp related
1623 orig_pmd
= pmdp_huge_get_and_clear_full(vma
, addr
, pmd
,
1625 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1626 if (vma_is_special_huge(vma
)) {
1627 if (arch_needs_pgtable_deposit())
1628 zap_deposited_table(tlb
->mm
, pmd
);
1630 } else if (is_huge_zero_pmd(orig_pmd
)) {
1631 zap_deposited_table(tlb
->mm
, pmd
);
1634 struct page
*page
= NULL
;
1635 int flush_needed
= 1;
1637 if (pmd_present(orig_pmd
)) {
1638 page
= pmd_page(orig_pmd
);
1639 page_remove_rmap(page
, true);
1640 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1641 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1642 } else if (thp_migration_supported()) {
1645 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1646 entry
= pmd_to_swp_entry(orig_pmd
);
1647 page
= pfn_swap_entry_to_page(entry
);
1650 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1652 if (PageAnon(page
)) {
1653 zap_deposited_table(tlb
->mm
, pmd
);
1654 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1656 if (arch_needs_pgtable_deposit())
1657 zap_deposited_table(tlb
->mm
, pmd
);
1658 add_mm_counter(tlb
->mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
1663 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1668 #ifndef pmd_move_must_withdraw
1669 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1670 spinlock_t
*old_pmd_ptl
,
1671 struct vm_area_struct
*vma
)
1674 * With split pmd lock we also need to move preallocated
1675 * PTE page table if new_pmd is on different PMD page table.
1677 * We also don't deposit and withdraw tables for file pages.
1679 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1683 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1685 #ifdef CONFIG_MEM_SOFT_DIRTY
1686 if (unlikely(is_pmd_migration_entry(pmd
)))
1687 pmd
= pmd_swp_mksoft_dirty(pmd
);
1688 else if (pmd_present(pmd
))
1689 pmd
= pmd_mksoft_dirty(pmd
);
1694 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1695 unsigned long new_addr
, pmd_t
*old_pmd
, pmd_t
*new_pmd
)
1697 spinlock_t
*old_ptl
, *new_ptl
;
1699 struct mm_struct
*mm
= vma
->vm_mm
;
1700 bool force_flush
= false;
1703 * The destination pmd shouldn't be established, free_pgtables()
1704 * should have release it.
1706 if (WARN_ON(!pmd_none(*new_pmd
))) {
1707 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1712 * We don't have to worry about the ordering of src and dst
1713 * ptlocks because exclusive mmap_lock prevents deadlock.
1715 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1717 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1718 if (new_ptl
!= old_ptl
)
1719 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1720 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1721 if (pmd_present(pmd
))
1723 VM_BUG_ON(!pmd_none(*new_pmd
));
1725 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1727 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1728 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1730 pmd
= move_soft_dirty_pmd(pmd
);
1731 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1733 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1734 if (new_ptl
!= old_ptl
)
1735 spin_unlock(new_ptl
);
1736 spin_unlock(old_ptl
);
1744 * - 0 if PMD could not be locked
1745 * - 1 if PMD was locked but protections unchanged and TLB flush unnecessary
1746 * or if prot_numa but THP migration is not supported
1747 * - HPAGE_PMD_NR if protections changed and TLB flush necessary
1749 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1750 unsigned long addr
, pgprot_t newprot
, unsigned long cp_flags
)
1752 struct mm_struct
*mm
= vma
->vm_mm
;
1755 bool preserve_write
;
1757 bool prot_numa
= cp_flags
& MM_CP_PROT_NUMA
;
1758 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
1759 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
1761 if (prot_numa
&& !thp_migration_supported())
1764 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1768 preserve_write
= prot_numa
&& pmd_write(*pmd
);
1771 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1772 if (is_swap_pmd(*pmd
)) {
1773 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1775 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1776 if (is_writable_migration_entry(entry
)) {
1779 * A protection check is difficult so
1780 * just be safe and disable write
1782 entry
= make_readable_migration_entry(
1784 newpmd
= swp_entry_to_pmd(entry
);
1785 if (pmd_swp_soft_dirty(*pmd
))
1786 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1787 if (pmd_swp_uffd_wp(*pmd
))
1788 newpmd
= pmd_swp_mkuffd_wp(newpmd
);
1789 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1796 * Avoid trapping faults against the zero page. The read-only
1797 * data is likely to be read-cached on the local CPU and
1798 * local/remote hits to the zero page are not interesting.
1800 if (prot_numa
&& is_huge_zero_pmd(*pmd
))
1803 if (prot_numa
&& pmd_protnone(*pmd
))
1807 * In case prot_numa, we are under mmap_read_lock(mm). It's critical
1808 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1809 * which is also under mmap_read_lock(mm):
1812 * change_huge_pmd(prot_numa=1)
1813 * pmdp_huge_get_and_clear_notify()
1814 * madvise_dontneed()
1816 * pmd_trans_huge(*pmd) == 0 (without ptl)
1819 * // pmd is re-established
1821 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1822 * which may break userspace.
1824 * pmdp_invalidate() is required to make sure we don't miss
1825 * dirty/young flags set by hardware.
1827 entry
= pmdp_invalidate(vma
, addr
, pmd
);
1829 entry
= pmd_modify(entry
, newprot
);
1831 entry
= pmd_mk_savedwrite(entry
);
1833 entry
= pmd_wrprotect(entry
);
1834 entry
= pmd_mkuffd_wp(entry
);
1835 } else if (uffd_wp_resolve
) {
1837 * Leave the write bit to be handled by PF interrupt
1838 * handler, then things like COW could be properly
1841 entry
= pmd_clear_uffd_wp(entry
);
1844 set_pmd_at(mm
, addr
, pmd
, entry
);
1845 BUG_ON(vma_is_anonymous(vma
) && !preserve_write
&& pmd_write(entry
));
1852 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1854 * Note that if it returns page table lock pointer, this routine returns without
1855 * unlocking page table lock. So callers must unlock it.
1857 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1860 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1861 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
1869 * Returns true if a given pud maps a thp, false otherwise.
1871 * Note that if it returns true, this routine returns without unlocking page
1872 * table lock. So callers must unlock it.
1874 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1878 ptl
= pud_lock(vma
->vm_mm
, pud
);
1879 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
1885 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1886 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1887 pud_t
*pud
, unsigned long addr
)
1891 ptl
= __pud_trans_huge_lock(pud
, vma
);
1895 * For architectures like ppc64 we look at deposited pgtable
1896 * when calling pudp_huge_get_and_clear. So do the
1897 * pgtable_trans_huge_withdraw after finishing pudp related
1900 pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
, tlb
->fullmm
);
1901 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
1902 if (vma_is_special_huge(vma
)) {
1904 /* No zero page support yet */
1906 /* No support for anonymous PUD pages yet */
1912 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
1913 unsigned long haddr
)
1915 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
1916 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1917 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
1918 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
1920 count_vm_event(THP_SPLIT_PUD
);
1922 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
1925 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
1926 unsigned long address
)
1929 struct mmu_notifier_range range
;
1931 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
1932 address
& HPAGE_PUD_MASK
,
1933 (address
& HPAGE_PUD_MASK
) + HPAGE_PUD_SIZE
);
1934 mmu_notifier_invalidate_range_start(&range
);
1935 ptl
= pud_lock(vma
->vm_mm
, pud
);
1936 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
1938 __split_huge_pud_locked(vma
, pud
, range
.start
);
1943 * No need to double call mmu_notifier->invalidate_range() callback as
1944 * the above pudp_huge_clear_flush_notify() did already call it.
1946 mmu_notifier_invalidate_range_only_end(&range
);
1948 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1950 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
1951 unsigned long haddr
, pmd_t
*pmd
)
1953 struct mm_struct
*mm
= vma
->vm_mm
;
1959 * Leave pmd empty until pte is filled note that it is fine to delay
1960 * notification until mmu_notifier_invalidate_range_end() as we are
1961 * replacing a zero pmd write protected page with a zero pte write
1964 * See Documentation/vm/mmu_notifier.rst
1966 pmdp_huge_clear_flush(vma
, haddr
, pmd
);
1968 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1969 pmd_populate(mm
, &_pmd
, pgtable
);
1971 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1973 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
1974 entry
= pte_mkspecial(entry
);
1975 pte
= pte_offset_map(&_pmd
, haddr
);
1976 VM_BUG_ON(!pte_none(*pte
));
1977 set_pte_at(mm
, haddr
, pte
, entry
);
1980 smp_wmb(); /* make pte visible before pmd */
1981 pmd_populate(mm
, pmd
, pgtable
);
1984 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1985 unsigned long haddr
, bool freeze
)
1987 struct mm_struct
*mm
= vma
->vm_mm
;
1990 pmd_t old_pmd
, _pmd
;
1991 bool young
, write
, soft_dirty
, pmd_migration
= false, uffd_wp
= false;
1995 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
1996 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1997 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
1998 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
1999 && !pmd_devmap(*pmd
));
2001 count_vm_event(THP_SPLIT_PMD
);
2003 if (!vma_is_anonymous(vma
)) {
2004 old_pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2006 * We are going to unmap this huge page. So
2007 * just go ahead and zap it
2009 if (arch_needs_pgtable_deposit())
2010 zap_deposited_table(mm
, pmd
);
2011 if (vma_is_special_huge(vma
))
2013 if (unlikely(is_pmd_migration_entry(old_pmd
))) {
2016 entry
= pmd_to_swp_entry(old_pmd
);
2017 page
= pfn_swap_entry_to_page(entry
);
2019 page
= pmd_page(old_pmd
);
2020 if (!PageDirty(page
) && pmd_dirty(old_pmd
))
2021 set_page_dirty(page
);
2022 if (!PageReferenced(page
) && pmd_young(old_pmd
))
2023 SetPageReferenced(page
);
2024 page_remove_rmap(page
, true);
2027 add_mm_counter(mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
2031 if (is_huge_zero_pmd(*pmd
)) {
2033 * FIXME: Do we want to invalidate secondary mmu by calling
2034 * mmu_notifier_invalidate_range() see comments below inside
2035 * __split_huge_pmd() ?
2037 * We are going from a zero huge page write protected to zero
2038 * small page also write protected so it does not seems useful
2039 * to invalidate secondary mmu at this time.
2041 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2045 * Up to this point the pmd is present and huge and userland has the
2046 * whole access to the hugepage during the split (which happens in
2047 * place). If we overwrite the pmd with the not-huge version pointing
2048 * to the pte here (which of course we could if all CPUs were bug
2049 * free), userland could trigger a small page size TLB miss on the
2050 * small sized TLB while the hugepage TLB entry is still established in
2051 * the huge TLB. Some CPU doesn't like that.
2052 * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
2053 * 383 on page 105. Intel should be safe but is also warns that it's
2054 * only safe if the permission and cache attributes of the two entries
2055 * loaded in the two TLB is identical (which should be the case here).
2056 * But it is generally safer to never allow small and huge TLB entries
2057 * for the same virtual address to be loaded simultaneously. So instead
2058 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2059 * current pmd notpresent (atomically because here the pmd_trans_huge
2060 * must remain set at all times on the pmd until the split is complete
2061 * for this pmd), then we flush the SMP TLB and finally we write the
2062 * non-huge version of the pmd entry with pmd_populate.
2064 old_pmd
= pmdp_invalidate(vma
, haddr
, pmd
);
2066 pmd_migration
= is_pmd_migration_entry(old_pmd
);
2067 if (unlikely(pmd_migration
)) {
2070 entry
= pmd_to_swp_entry(old_pmd
);
2071 page
= pfn_swap_entry_to_page(entry
);
2072 write
= is_writable_migration_entry(entry
);
2074 soft_dirty
= pmd_swp_soft_dirty(old_pmd
);
2075 uffd_wp
= pmd_swp_uffd_wp(old_pmd
);
2077 page
= pmd_page(old_pmd
);
2078 if (pmd_dirty(old_pmd
))
2080 write
= pmd_write(old_pmd
);
2081 young
= pmd_young(old_pmd
);
2082 soft_dirty
= pmd_soft_dirty(old_pmd
);
2083 uffd_wp
= pmd_uffd_wp(old_pmd
);
2085 VM_BUG_ON_PAGE(!page_count(page
), page
);
2086 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2089 * Withdraw the table only after we mark the pmd entry invalid.
2090 * This's critical for some architectures (Power).
2092 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2093 pmd_populate(mm
, &_pmd
, pgtable
);
2095 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2098 * Note that NUMA hinting access restrictions are not
2099 * transferred to avoid any possibility of altering
2100 * permissions across VMAs.
2102 if (freeze
|| pmd_migration
) {
2103 swp_entry_t swp_entry
;
2105 swp_entry
= make_writable_migration_entry(
2106 page_to_pfn(page
+ i
));
2108 swp_entry
= make_readable_migration_entry(
2109 page_to_pfn(page
+ i
));
2110 entry
= swp_entry_to_pte(swp_entry
);
2112 entry
= pte_swp_mksoft_dirty(entry
);
2114 entry
= pte_swp_mkuffd_wp(entry
);
2116 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2117 entry
= maybe_mkwrite(entry
, vma
);
2119 entry
= pte_wrprotect(entry
);
2121 entry
= pte_mkold(entry
);
2123 entry
= pte_mksoft_dirty(entry
);
2125 entry
= pte_mkuffd_wp(entry
);
2127 pte
= pte_offset_map(&_pmd
, addr
);
2128 BUG_ON(!pte_none(*pte
));
2129 set_pte_at(mm
, addr
, pte
, entry
);
2131 atomic_inc(&page
[i
]._mapcount
);
2135 if (!pmd_migration
) {
2137 * Set PG_double_map before dropping compound_mapcount to avoid
2138 * false-negative page_mapped().
2140 if (compound_mapcount(page
) > 1 &&
2141 !TestSetPageDoubleMap(page
)) {
2142 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2143 atomic_inc(&page
[i
]._mapcount
);
2146 lock_page_memcg(page
);
2147 if (atomic_add_negative(-1, compound_mapcount_ptr(page
))) {
2148 /* Last compound_mapcount is gone. */
2149 __mod_lruvec_page_state(page
, NR_ANON_THPS
,
2151 if (TestClearPageDoubleMap(page
)) {
2152 /* No need in mapcount reference anymore */
2153 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2154 atomic_dec(&page
[i
]._mapcount
);
2157 unlock_page_memcg(page
);
2160 smp_wmb(); /* make pte visible before pmd */
2161 pmd_populate(mm
, pmd
, pgtable
);
2164 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2165 page_remove_rmap(page
+ i
, false);
2171 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2172 unsigned long address
, bool freeze
, struct page
*page
)
2175 struct mmu_notifier_range range
;
2176 bool do_unlock_page
= false;
2179 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
2180 address
& HPAGE_PMD_MASK
,
2181 (address
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
);
2182 mmu_notifier_invalidate_range_start(&range
);
2183 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
2186 * If caller asks to setup a migration entries, we need a page to check
2187 * pmd against. Otherwise we can end up replacing wrong page.
2189 VM_BUG_ON(freeze
&& !page
);
2191 VM_WARN_ON_ONCE(!PageLocked(page
));
2192 if (page
!= pmd_page(*pmd
))
2197 if (pmd_trans_huge(*pmd
)) {
2199 page
= pmd_page(*pmd
);
2201 * An anonymous page must be locked, to ensure that a
2202 * concurrent reuse_swap_page() sees stable mapcount;
2203 * but reuse_swap_page() is not used on shmem or file,
2204 * and page lock must not be taken when zap_pmd_range()
2205 * calls __split_huge_pmd() while i_mmap_lock is held.
2207 if (PageAnon(page
)) {
2208 if (unlikely(!trylock_page(page
))) {
2214 if (unlikely(!pmd_same(*pmd
, _pmd
))) {
2222 do_unlock_page
= true;
2225 if (PageMlocked(page
))
2226 clear_page_mlock(page
);
2227 } else if (!(pmd_devmap(*pmd
) || is_pmd_migration_entry(*pmd
)))
2229 __split_huge_pmd_locked(vma
, pmd
, range
.start
, freeze
);
2235 * No need to double call mmu_notifier->invalidate_range() callback.
2236 * They are 3 cases to consider inside __split_huge_pmd_locked():
2237 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2238 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2239 * fault will trigger a flush_notify before pointing to a new page
2240 * (it is fine if the secondary mmu keeps pointing to the old zero
2241 * page in the meantime)
2242 * 3) Split a huge pmd into pte pointing to the same page. No need
2243 * to invalidate secondary tlb entry they are all still valid.
2244 * any further changes to individual pte will notify. So no need
2245 * to call mmu_notifier->invalidate_range()
2247 mmu_notifier_invalidate_range_only_end(&range
);
2250 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2251 bool freeze
, struct page
*page
)
2258 pgd
= pgd_offset(vma
->vm_mm
, address
);
2259 if (!pgd_present(*pgd
))
2262 p4d
= p4d_offset(pgd
, address
);
2263 if (!p4d_present(*p4d
))
2266 pud
= pud_offset(p4d
, address
);
2267 if (!pud_present(*pud
))
2270 pmd
= pmd_offset(pud
, address
);
2272 __split_huge_pmd(vma
, pmd
, address
, freeze
, page
);
2275 static inline void split_huge_pmd_if_needed(struct vm_area_struct
*vma
, unsigned long address
)
2278 * If the new address isn't hpage aligned and it could previously
2279 * contain an hugepage: check if we need to split an huge pmd.
2281 if (!IS_ALIGNED(address
, HPAGE_PMD_SIZE
) &&
2282 range_in_vma(vma
, ALIGN_DOWN(address
, HPAGE_PMD_SIZE
),
2283 ALIGN(address
, HPAGE_PMD_SIZE
)))
2284 split_huge_pmd_address(vma
, address
, false, NULL
);
2287 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2288 unsigned long start
,
2292 /* Check if we need to split start first. */
2293 split_huge_pmd_if_needed(vma
, start
);
2295 /* Check if we need to split end next. */
2296 split_huge_pmd_if_needed(vma
, end
);
2299 * If we're also updating the vma->vm_next->vm_start,
2300 * check if we need to split it.
2302 if (adjust_next
> 0) {
2303 struct vm_area_struct
*next
= vma
->vm_next
;
2304 unsigned long nstart
= next
->vm_start
;
2305 nstart
+= adjust_next
;
2306 split_huge_pmd_if_needed(next
, nstart
);
2310 static void unmap_page(struct page
*page
)
2312 enum ttu_flags ttu_flags
= TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
|
2315 VM_BUG_ON_PAGE(!PageHead(page
), page
);
2318 * Anon pages need migration entries to preserve them, but file
2319 * pages can simply be left unmapped, then faulted back on demand.
2320 * If that is ever changed (perhaps for mlock), update remap_page().
2323 try_to_migrate(page
, ttu_flags
);
2325 try_to_unmap(page
, ttu_flags
| TTU_IGNORE_MLOCK
);
2327 VM_WARN_ON_ONCE_PAGE(page_mapped(page
), page
);
2330 static void remap_page(struct page
*page
, unsigned int nr
)
2334 /* If unmap_page() uses try_to_migrate() on file, remove this check */
2335 if (!PageAnon(page
))
2337 if (PageTransHuge(page
)) {
2338 remove_migration_ptes(page
, page
, true);
2340 for (i
= 0; i
< nr
; i
++)
2341 remove_migration_ptes(page
+ i
, page
+ i
, true);
2345 static void lru_add_page_tail(struct page
*head
, struct page
*tail
,
2346 struct lruvec
*lruvec
, struct list_head
*list
)
2348 VM_BUG_ON_PAGE(!PageHead(head
), head
);
2349 VM_BUG_ON_PAGE(PageCompound(tail
), head
);
2350 VM_BUG_ON_PAGE(PageLRU(tail
), head
);
2351 lockdep_assert_held(&lruvec
->lru_lock
);
2354 /* page reclaim is reclaiming a huge page */
2355 VM_WARN_ON(PageLRU(head
));
2357 list_add_tail(&tail
->lru
, list
);
2359 /* head is still on lru (and we have it frozen) */
2360 VM_WARN_ON(!PageLRU(head
));
2362 list_add_tail(&tail
->lru
, &head
->lru
);
2366 static void __split_huge_page_tail(struct page
*head
, int tail
,
2367 struct lruvec
*lruvec
, struct list_head
*list
)
2369 struct page
*page_tail
= head
+ tail
;
2371 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2374 * Clone page flags before unfreezing refcount.
2376 * After successful get_page_unless_zero() might follow flags change,
2377 * for example lock_page() which set PG_waiters.
2379 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2380 page_tail
->flags
|= (head
->flags
&
2381 ((1L << PG_referenced
) |
2382 (1L << PG_swapbacked
) |
2383 (1L << PG_swapcache
) |
2384 (1L << PG_mlocked
) |
2385 (1L << PG_uptodate
) |
2387 (1L << PG_workingset
) |
2389 (1L << PG_unevictable
) |
2395 /* ->mapping in first tail page is compound_mapcount */
2396 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2398 page_tail
->mapping
= head
->mapping
;
2399 page_tail
->index
= head
->index
+ tail
;
2401 /* Page flags must be visible before we make the page non-compound. */
2405 * Clear PageTail before unfreezing page refcount.
2407 * After successful get_page_unless_zero() might follow put_page()
2408 * which needs correct compound_head().
2410 clear_compound_head(page_tail
);
2412 /* Finally unfreeze refcount. Additional reference from page cache. */
2413 page_ref_unfreeze(page_tail
, 1 + (!PageAnon(head
) ||
2414 PageSwapCache(head
)));
2416 if (page_is_young(head
))
2417 set_page_young(page_tail
);
2418 if (page_is_idle(head
))
2419 set_page_idle(page_tail
);
2421 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2424 * always add to the tail because some iterators expect new
2425 * pages to show after the currently processed elements - e.g.
2428 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2431 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2434 struct page
*head
= compound_head(page
);
2435 struct lruvec
*lruvec
;
2436 struct address_space
*swap_cache
= NULL
;
2437 unsigned long offset
= 0;
2438 unsigned int nr
= thp_nr_pages(head
);
2441 /* complete memcg works before add pages to LRU */
2442 split_page_memcg(head
, nr
);
2444 if (PageAnon(head
) && PageSwapCache(head
)) {
2445 swp_entry_t entry
= { .val
= page_private(head
) };
2447 offset
= swp_offset(entry
);
2448 swap_cache
= swap_address_space(entry
);
2449 xa_lock(&swap_cache
->i_pages
);
2452 /* lock lru list/PageCompound, ref frozen by page_ref_freeze */
2453 lruvec
= lock_page_lruvec(head
);
2455 for (i
= nr
- 1; i
>= 1; i
--) {
2456 __split_huge_page_tail(head
, i
, lruvec
, list
);
2457 /* Some pages can be beyond i_size: drop them from page cache */
2458 if (head
[i
].index
>= end
) {
2459 ClearPageDirty(head
+ i
);
2460 __delete_from_page_cache(head
+ i
, NULL
);
2461 if (IS_ENABLED(CONFIG_SHMEM
) && PageSwapBacked(head
))
2462 shmem_uncharge(head
->mapping
->host
, 1);
2464 } else if (!PageAnon(page
)) {
2465 __xa_store(&head
->mapping
->i_pages
, head
[i
].index
,
2467 } else if (swap_cache
) {
2468 __xa_store(&swap_cache
->i_pages
, offset
+ i
,
2473 ClearPageCompound(head
);
2474 unlock_page_lruvec(lruvec
);
2475 /* Caller disabled irqs, so they are still disabled here */
2477 split_page_owner(head
, nr
);
2479 /* See comment in __split_huge_page_tail() */
2480 if (PageAnon(head
)) {
2481 /* Additional pin to swap cache */
2482 if (PageSwapCache(head
)) {
2483 page_ref_add(head
, 2);
2484 xa_unlock(&swap_cache
->i_pages
);
2489 /* Additional pin to page cache */
2490 page_ref_add(head
, 2);
2491 xa_unlock(&head
->mapping
->i_pages
);
2495 remap_page(head
, nr
);
2497 if (PageSwapCache(head
)) {
2498 swp_entry_t entry
= { .val
= page_private(head
) };
2500 split_swap_cluster(entry
);
2503 for (i
= 0; i
< nr
; i
++) {
2504 struct page
*subpage
= head
+ i
;
2505 if (subpage
== page
)
2507 unlock_page(subpage
);
2510 * Subpages may be freed if there wasn't any mapping
2511 * like if add_to_swap() is running on a lru page that
2512 * had its mapping zapped. And freeing these pages
2513 * requires taking the lru_lock so we do the put_page
2514 * of the tail pages after the split is complete.
2520 int total_mapcount(struct page
*page
)
2522 int i
, compound
, nr
, ret
;
2524 VM_BUG_ON_PAGE(PageTail(page
), page
);
2526 if (likely(!PageCompound(page
)))
2527 return atomic_read(&page
->_mapcount
) + 1;
2529 compound
= compound_mapcount(page
);
2530 nr
= compound_nr(page
);
2534 for (i
= 0; i
< nr
; i
++)
2535 ret
+= atomic_read(&page
[i
]._mapcount
) + 1;
2536 /* File pages has compound_mapcount included in _mapcount */
2537 if (!PageAnon(page
))
2538 return ret
- compound
* nr
;
2539 if (PageDoubleMap(page
))
2545 * This calculates accurately how many mappings a transparent hugepage
2546 * has (unlike page_mapcount() which isn't fully accurate). This full
2547 * accuracy is primarily needed to know if copy-on-write faults can
2548 * reuse the page and change the mapping to read-write instead of
2549 * copying them. At the same time this returns the total_mapcount too.
2551 * The function returns the highest mapcount any one of the subpages
2552 * has. If the return value is one, even if different processes are
2553 * mapping different subpages of the transparent hugepage, they can
2554 * all reuse it, because each process is reusing a different subpage.
2556 * The total_mapcount is instead counting all virtual mappings of the
2557 * subpages. If the total_mapcount is equal to "one", it tells the
2558 * caller all mappings belong to the same "mm" and in turn the
2559 * anon_vma of the transparent hugepage can become the vma->anon_vma
2560 * local one as no other process may be mapping any of the subpages.
2562 * It would be more accurate to replace page_mapcount() with
2563 * page_trans_huge_mapcount(), however we only use
2564 * page_trans_huge_mapcount() in the copy-on-write faults where we
2565 * need full accuracy to avoid breaking page pinning, because
2566 * page_trans_huge_mapcount() is slower than page_mapcount().
2568 int page_trans_huge_mapcount(struct page
*page
, int *total_mapcount
)
2570 int i
, ret
, _total_mapcount
, mapcount
;
2572 /* hugetlbfs shouldn't call it */
2573 VM_BUG_ON_PAGE(PageHuge(page
), page
);
2575 if (likely(!PageTransCompound(page
))) {
2576 mapcount
= atomic_read(&page
->_mapcount
) + 1;
2578 *total_mapcount
= mapcount
;
2582 page
= compound_head(page
);
2584 _total_mapcount
= ret
= 0;
2585 for (i
= 0; i
< thp_nr_pages(page
); i
++) {
2586 mapcount
= atomic_read(&page
[i
]._mapcount
) + 1;
2587 ret
= max(ret
, mapcount
);
2588 _total_mapcount
+= mapcount
;
2590 if (PageDoubleMap(page
)) {
2592 _total_mapcount
-= thp_nr_pages(page
);
2594 mapcount
= compound_mapcount(page
);
2596 _total_mapcount
+= mapcount
;
2598 *total_mapcount
= _total_mapcount
;
2602 /* Racy check whether the huge page can be split */
2603 bool can_split_huge_page(struct page
*page
, int *pextra_pins
)
2607 /* Additional pins from page cache */
2609 extra_pins
= PageSwapCache(page
) ? thp_nr_pages(page
) : 0;
2611 extra_pins
= thp_nr_pages(page
);
2613 *pextra_pins
= extra_pins
;
2614 return total_mapcount(page
) == page_count(page
) - extra_pins
- 1;
2618 * This function splits huge page into normal pages. @page can point to any
2619 * subpage of huge page to split. Split doesn't change the position of @page.
2621 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2622 * The huge page must be locked.
2624 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2626 * Both head page and tail pages will inherit mapping, flags, and so on from
2629 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2630 * they are not mapped.
2632 * Returns 0 if the hugepage is split successfully.
2633 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2636 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2638 struct page
*head
= compound_head(page
);
2639 struct deferred_split
*ds_queue
= get_deferred_split_queue(head
);
2640 struct anon_vma
*anon_vma
= NULL
;
2641 struct address_space
*mapping
= NULL
;
2642 int extra_pins
, ret
;
2645 VM_BUG_ON_PAGE(is_huge_zero_page(head
), head
);
2646 VM_BUG_ON_PAGE(!PageLocked(head
), head
);
2647 VM_BUG_ON_PAGE(!PageCompound(head
), head
);
2649 if (PageWriteback(head
))
2652 if (PageAnon(head
)) {
2654 * The caller does not necessarily hold an mmap_lock that would
2655 * prevent the anon_vma disappearing so we first we take a
2656 * reference to it and then lock the anon_vma for write. This
2657 * is similar to page_lock_anon_vma_read except the write lock
2658 * is taken to serialise against parallel split or collapse
2661 anon_vma
= page_get_anon_vma(head
);
2668 anon_vma_lock_write(anon_vma
);
2670 mapping
= head
->mapping
;
2679 i_mmap_lock_read(mapping
);
2682 *__split_huge_page() may need to trim off pages beyond EOF:
2683 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2684 * which cannot be nested inside the page tree lock. So note
2685 * end now: i_size itself may be changed at any moment, but
2686 * head page lock is good enough to serialize the trimming.
2688 end
= DIV_ROUND_UP(i_size_read(mapping
->host
), PAGE_SIZE
);
2692 * Racy check if we can split the page, before unmap_page() will
2695 if (!can_split_huge_page(head
, &extra_pins
)) {
2702 /* block interrupt reentry in xa_lock and spinlock */
2703 local_irq_disable();
2705 XA_STATE(xas
, &mapping
->i_pages
, page_index(head
));
2708 * Check if the head page is present in page cache.
2709 * We assume all tail are present too, if head is there.
2711 xa_lock(&mapping
->i_pages
);
2712 if (xas_load(&xas
) != head
)
2716 /* Prevent deferred_split_scan() touching ->_refcount */
2717 spin_lock(&ds_queue
->split_queue_lock
);
2718 if (page_ref_freeze(head
, 1 + extra_pins
)) {
2719 if (!list_empty(page_deferred_list(head
))) {
2720 ds_queue
->split_queue_len
--;
2721 list_del(page_deferred_list(head
));
2723 spin_unlock(&ds_queue
->split_queue_lock
);
2725 int nr
= thp_nr_pages(head
);
2727 if (PageSwapBacked(head
))
2728 __mod_lruvec_page_state(head
, NR_SHMEM_THPS
,
2731 __mod_lruvec_page_state(head
, NR_FILE_THPS
,
2735 __split_huge_page(page
, list
, end
);
2738 spin_unlock(&ds_queue
->split_queue_lock
);
2741 xa_unlock(&mapping
->i_pages
);
2743 remap_page(head
, thp_nr_pages(head
));
2749 anon_vma_unlock_write(anon_vma
);
2750 put_anon_vma(anon_vma
);
2753 i_mmap_unlock_read(mapping
);
2755 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2759 void free_transhuge_page(struct page
*page
)
2761 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2762 unsigned long flags
;
2764 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2765 if (!list_empty(page_deferred_list(page
))) {
2766 ds_queue
->split_queue_len
--;
2767 list_del(page_deferred_list(page
));
2769 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2770 free_compound_page(page
);
2773 void deferred_split_huge_page(struct page
*page
)
2775 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2777 struct mem_cgroup
*memcg
= page_memcg(compound_head(page
));
2779 unsigned long flags
;
2781 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2784 * The try_to_unmap() in page reclaim path might reach here too,
2785 * this may cause a race condition to corrupt deferred split queue.
2786 * And, if page reclaim is already handling the same page, it is
2787 * unnecessary to handle it again in shrinker.
2789 * Check PageSwapCache to determine if the page is being
2790 * handled by page reclaim since THP swap would add the page into
2791 * swap cache before calling try_to_unmap().
2793 if (PageSwapCache(page
))
2796 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2797 if (list_empty(page_deferred_list(page
))) {
2798 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2799 list_add_tail(page_deferred_list(page
), &ds_queue
->split_queue
);
2800 ds_queue
->split_queue_len
++;
2803 set_shrinker_bit(memcg
, page_to_nid(page
),
2804 deferred_split_shrinker
.id
);
2807 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2810 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2811 struct shrink_control
*sc
)
2813 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2814 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2818 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2820 return READ_ONCE(ds_queue
->split_queue_len
);
2823 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2824 struct shrink_control
*sc
)
2826 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2827 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2828 unsigned long flags
;
2829 LIST_HEAD(list
), *pos
, *next
;
2835 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2838 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2839 /* Take pin on all head pages to avoid freeing them under us */
2840 list_for_each_safe(pos
, next
, &ds_queue
->split_queue
) {
2841 page
= list_entry((void *)pos
, struct page
, deferred_list
);
2842 page
= compound_head(page
);
2843 if (get_page_unless_zero(page
)) {
2844 list_move(page_deferred_list(page
), &list
);
2846 /* We lost race with put_compound_page() */
2847 list_del_init(page_deferred_list(page
));
2848 ds_queue
->split_queue_len
--;
2850 if (!--sc
->nr_to_scan
)
2853 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2855 list_for_each_safe(pos
, next
, &list
) {
2856 page
= list_entry((void *)pos
, struct page
, deferred_list
);
2857 if (!trylock_page(page
))
2859 /* split_huge_page() removes page from list on success */
2860 if (!split_huge_page(page
))
2867 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2868 list_splice_tail(&list
, &ds_queue
->split_queue
);
2869 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2872 * Stop shrinker if we didn't split any page, but the queue is empty.
2873 * This can happen if pages were freed under us.
2875 if (!split
&& list_empty(&ds_queue
->split_queue
))
2880 static struct shrinker deferred_split_shrinker
= {
2881 .count_objects
= deferred_split_count
,
2882 .scan_objects
= deferred_split_scan
,
2883 .seeks
= DEFAULT_SEEKS
,
2884 .flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
|
2888 #ifdef CONFIG_DEBUG_FS
2889 static void split_huge_pages_all(void)
2893 unsigned long pfn
, max_zone_pfn
;
2894 unsigned long total
= 0, split
= 0;
2896 pr_debug("Split all THPs\n");
2897 for_each_populated_zone(zone
) {
2898 max_zone_pfn
= zone_end_pfn(zone
);
2899 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2900 if (!pfn_valid(pfn
))
2903 page
= pfn_to_page(pfn
);
2904 if (!get_page_unless_zero(page
))
2907 if (zone
!= page_zone(page
))
2910 if (!PageHead(page
) || PageHuge(page
) || !PageLRU(page
))
2915 if (!split_huge_page(page
))
2924 pr_debug("%lu of %lu THP split\n", split
, total
);
2927 static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct
*vma
)
2929 return vma_is_special_huge(vma
) || (vma
->vm_flags
& VM_IO
) ||
2930 is_vm_hugetlb_page(vma
);
2933 static int split_huge_pages_pid(int pid
, unsigned long vaddr_start
,
2934 unsigned long vaddr_end
)
2937 struct task_struct
*task
;
2938 struct mm_struct
*mm
;
2939 unsigned long total
= 0, split
= 0;
2942 vaddr_start
&= PAGE_MASK
;
2943 vaddr_end
&= PAGE_MASK
;
2945 /* Find the task_struct from pid */
2947 task
= find_task_by_vpid(pid
);
2953 get_task_struct(task
);
2956 /* Find the mm_struct */
2957 mm
= get_task_mm(task
);
2958 put_task_struct(task
);
2965 pr_debug("Split huge pages in pid: %d, vaddr: [0x%lx - 0x%lx]\n",
2966 pid
, vaddr_start
, vaddr_end
);
2970 * always increase addr by PAGE_SIZE, since we could have a PTE page
2971 * table filled with PTE-mapped THPs, each of which is distinct.
2973 for (addr
= vaddr_start
; addr
< vaddr_end
; addr
+= PAGE_SIZE
) {
2974 struct vm_area_struct
*vma
= find_vma(mm
, addr
);
2975 unsigned int follflags
;
2978 if (!vma
|| addr
< vma
->vm_start
)
2981 /* skip special VMA and hugetlb VMA */
2982 if (vma_not_suitable_for_thp_split(vma
)) {
2987 /* FOLL_DUMP to ignore special (like zero) pages */
2988 follflags
= FOLL_GET
| FOLL_DUMP
;
2989 page
= follow_page(vma
, addr
, follflags
);
2996 if (!is_transparent_hugepage(page
))
3000 if (!can_split_huge_page(compound_head(page
), NULL
))
3003 if (!trylock_page(page
))
3006 if (!split_huge_page(page
))
3014 mmap_read_unlock(mm
);
3017 pr_debug("%lu of %lu THP split\n", split
, total
);
3023 static int split_huge_pages_in_file(const char *file_path
, pgoff_t off_start
,
3026 struct filename
*file
;
3027 struct file
*candidate
;
3028 struct address_space
*mapping
;
3032 unsigned long total
= 0, split
= 0;
3034 file
= getname_kernel(file_path
);
3038 candidate
= file_open_name(file
, O_RDONLY
, 0);
3039 if (IS_ERR(candidate
))
3042 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3043 file_path
, off_start
, off_end
);
3045 mapping
= candidate
->f_mapping
;
3047 for (index
= off_start
; index
< off_end
; index
+= nr_pages
) {
3048 struct page
*fpage
= pagecache_get_page(mapping
, index
,
3049 FGP_ENTRY
| FGP_HEAD
, 0);
3052 if (xa_is_value(fpage
) || !fpage
)
3055 if (!is_transparent_hugepage(fpage
))
3059 nr_pages
= thp_nr_pages(fpage
);
3061 if (!trylock_page(fpage
))
3064 if (!split_huge_page(fpage
))
3073 filp_close(candidate
, NULL
);
3076 pr_debug("%lu of %lu file-backed THP split\n", split
, total
);
3082 #define MAX_INPUT_BUF_SZ 255
3084 static ssize_t
split_huge_pages_write(struct file
*file
, const char __user
*buf
,
3085 size_t count
, loff_t
*ppops
)
3087 static DEFINE_MUTEX(split_debug_mutex
);
3089 /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */
3090 char input_buf
[MAX_INPUT_BUF_SZ
];
3092 unsigned long vaddr_start
, vaddr_end
;
3094 ret
= mutex_lock_interruptible(&split_debug_mutex
);
3100 memset(input_buf
, 0, MAX_INPUT_BUF_SZ
);
3101 if (copy_from_user(input_buf
, buf
, min_t(size_t, count
, MAX_INPUT_BUF_SZ
)))
3104 input_buf
[MAX_INPUT_BUF_SZ
- 1] = '\0';
3106 if (input_buf
[0] == '/') {
3108 char *buf
= input_buf
;
3109 char file_path
[MAX_INPUT_BUF_SZ
];
3110 pgoff_t off_start
= 0, off_end
= 0;
3111 size_t input_len
= strlen(input_buf
);
3113 tok
= strsep(&buf
, ",");
3115 strcpy(file_path
, tok
);
3121 ret
= sscanf(buf
, "0x%lx,0x%lx", &off_start
, &off_end
);
3126 ret
= split_huge_pages_in_file(file_path
, off_start
, off_end
);
3133 ret
= sscanf(input_buf
, "%d,0x%lx,0x%lx", &pid
, &vaddr_start
, &vaddr_end
);
3134 if (ret
== 1 && pid
== 1) {
3135 split_huge_pages_all();
3136 ret
= strlen(input_buf
);
3138 } else if (ret
!= 3) {
3143 ret
= split_huge_pages_pid(pid
, vaddr_start
, vaddr_end
);
3145 ret
= strlen(input_buf
);
3147 mutex_unlock(&split_debug_mutex
);
3152 static const struct file_operations split_huge_pages_fops
= {
3153 .owner
= THIS_MODULE
,
3154 .write
= split_huge_pages_write
,
3155 .llseek
= no_llseek
,
3158 static int __init
split_huge_pages_debugfs(void)
3160 debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
3161 &split_huge_pages_fops
);
3164 late_initcall(split_huge_pages_debugfs
);
3167 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3168 void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
3171 struct vm_area_struct
*vma
= pvmw
->vma
;
3172 struct mm_struct
*mm
= vma
->vm_mm
;
3173 unsigned long address
= pvmw
->address
;
3178 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3181 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
3182 pmdval
= pmdp_invalidate(vma
, address
, pvmw
->pmd
);
3183 if (pmd_dirty(pmdval
))
3184 set_page_dirty(page
);
3185 if (pmd_write(pmdval
))
3186 entry
= make_writable_migration_entry(page_to_pfn(page
));
3188 entry
= make_readable_migration_entry(page_to_pfn(page
));
3189 pmdswp
= swp_entry_to_pmd(entry
);
3190 if (pmd_soft_dirty(pmdval
))
3191 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
3192 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
3193 page_remove_rmap(page
, true);
3197 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
3199 struct vm_area_struct
*vma
= pvmw
->vma
;
3200 struct mm_struct
*mm
= vma
->vm_mm
;
3201 unsigned long address
= pvmw
->address
;
3202 unsigned long mmun_start
= address
& HPAGE_PMD_MASK
;
3206 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3209 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
3211 pmde
= pmd_mkold(mk_huge_pmd(new, vma
->vm_page_prot
));
3212 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
3213 pmde
= pmd_mksoft_dirty(pmde
);
3214 if (is_writable_migration_entry(entry
))
3215 pmde
= maybe_pmd_mkwrite(pmde
, vma
);
3216 if (pmd_swp_uffd_wp(*pvmw
->pmd
))
3217 pmde
= pmd_wrprotect(pmd_mkuffd_wp(pmde
));
3219 flush_cache_range(vma
, mmun_start
, mmun_start
+ HPAGE_PMD_SIZE
);
3221 page_add_anon_rmap(new, vma
, mmun_start
, true);
3223 page_add_file_rmap(new, true);
3224 set_pmd_at(mm
, mmun_start
, pvmw
->pmd
, pmde
);
3225 if ((vma
->vm_flags
& VM_LOCKED
) && !PageDoubleMap(new))
3226 mlock_vma_page(new);
3227 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);