1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009 Red Hat, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/sched.h>
10 #include <linux/sched/coredump.h>
11 #include <linux/sched/numa_balancing.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/dax.h>
21 #include <linux/khugepaged.h>
22 #include <linux/freezer.h>
23 #include <linux/pfn_t.h>
24 #include <linux/mman.h>
25 #include <linux/memremap.h>
26 #include <linux/pagemap.h>
27 #include <linux/debugfs.h>
28 #include <linux/migrate.h>
29 #include <linux/hashtable.h>
30 #include <linux/userfaultfd_k.h>
31 #include <linux/page_idle.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/oom.h>
34 #include <linux/numa.h>
35 #include <linux/page_owner.h>
38 #include <asm/pgalloc.h>
42 * By default, transparent hugepage support is disabled in order to avoid
43 * risking an increased memory footprint for applications that are not
44 * guaranteed to benefit from it. When transparent hugepage support is
45 * enabled, it is for all mappings, and khugepaged scans all mappings.
46 * Defrag is invoked by khugepaged hugepage allocations and by page faults
47 * for all hugepage allocations.
49 unsigned long transparent_hugepage_flags __read_mostly
=
50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
51 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
53 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
54 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
56 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
57 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
58 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
60 static struct shrinker deferred_split_shrinker
;
62 static atomic_t huge_zero_refcount
;
63 struct page
*huge_zero_page __read_mostly
;
64 unsigned long huge_zero_pfn __read_mostly
= ~0UL;
66 bool transparent_hugepage_enabled(struct vm_area_struct
*vma
)
68 /* The addr is used to check if the vma size fits */
69 unsigned long addr
= (vma
->vm_end
& HPAGE_PMD_MASK
) - HPAGE_PMD_SIZE
;
71 if (!transhuge_vma_suitable(vma
, addr
))
73 if (vma_is_anonymous(vma
))
74 return __transparent_hugepage_enabled(vma
);
75 if (vma_is_shmem(vma
))
76 return shmem_huge_enabled(vma
);
81 static struct page
*get_huge_zero_page(void)
83 struct page
*zero_page
;
85 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
86 return READ_ONCE(huge_zero_page
);
88 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
91 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
94 count_vm_event(THP_ZERO_PAGE_ALLOC
);
96 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
98 __free_pages(zero_page
, compound_order(zero_page
));
101 WRITE_ONCE(huge_zero_pfn
, page_to_pfn(zero_page
));
103 /* We take additional reference here. It will be put back by shrinker */
104 atomic_set(&huge_zero_refcount
, 2);
106 return READ_ONCE(huge_zero_page
);
109 static void put_huge_zero_page(void)
112 * Counter should never go to zero here. Only shrinker can put
115 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
118 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
120 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
121 return READ_ONCE(huge_zero_page
);
123 if (!get_huge_zero_page())
126 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
127 put_huge_zero_page();
129 return READ_ONCE(huge_zero_page
);
132 void mm_put_huge_zero_page(struct mm_struct
*mm
)
134 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
135 put_huge_zero_page();
138 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
139 struct shrink_control
*sc
)
141 /* we can free zero page only if last reference remains */
142 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
145 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
146 struct shrink_control
*sc
)
148 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
149 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
150 BUG_ON(zero_page
== NULL
);
151 WRITE_ONCE(huge_zero_pfn
, ~0UL);
152 __free_pages(zero_page
, compound_order(zero_page
));
159 static struct shrinker huge_zero_page_shrinker
= {
160 .count_objects
= shrink_huge_zero_page_count
,
161 .scan_objects
= shrink_huge_zero_page_scan
,
162 .seeks
= DEFAULT_SEEKS
,
166 static ssize_t
enabled_show(struct kobject
*kobj
,
167 struct kobj_attribute
*attr
, char *buf
)
169 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
170 return sprintf(buf
, "[always] madvise never\n");
171 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
172 return sprintf(buf
, "always [madvise] never\n");
174 return sprintf(buf
, "always madvise [never]\n");
177 static ssize_t
enabled_store(struct kobject
*kobj
,
178 struct kobj_attribute
*attr
,
179 const char *buf
, size_t count
)
183 if (sysfs_streq(buf
, "always")) {
184 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
185 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
186 } else if (sysfs_streq(buf
, "madvise")) {
187 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
188 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
189 } else if (sysfs_streq(buf
, "never")) {
190 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
191 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
196 int err
= start_stop_khugepaged();
202 static struct kobj_attribute enabled_attr
=
203 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
205 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
206 struct kobj_attribute
*attr
, char *buf
,
207 enum transparent_hugepage_flag flag
)
209 return sprintf(buf
, "%d\n",
210 !!test_bit(flag
, &transparent_hugepage_flags
));
213 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
214 struct kobj_attribute
*attr
,
215 const char *buf
, size_t count
,
216 enum transparent_hugepage_flag flag
)
221 ret
= kstrtoul(buf
, 10, &value
);
228 set_bit(flag
, &transparent_hugepage_flags
);
230 clear_bit(flag
, &transparent_hugepage_flags
);
235 static ssize_t
defrag_show(struct kobject
*kobj
,
236 struct kobj_attribute
*attr
, char *buf
)
238 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
239 return sprintf(buf
, "[always] defer defer+madvise madvise never\n");
240 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
241 return sprintf(buf
, "always [defer] defer+madvise madvise never\n");
242 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
243 return sprintf(buf
, "always defer [defer+madvise] madvise never\n");
244 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
245 return sprintf(buf
, "always defer defer+madvise [madvise] never\n");
246 return sprintf(buf
, "always defer defer+madvise madvise [never]\n");
249 static ssize_t
defrag_store(struct kobject
*kobj
,
250 struct kobj_attribute
*attr
,
251 const char *buf
, size_t count
)
253 if (sysfs_streq(buf
, "always")) {
254 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
255 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
256 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
257 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
258 } else if (sysfs_streq(buf
, "defer+madvise")) {
259 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
260 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
261 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
262 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
263 } else if (sysfs_streq(buf
, "defer")) {
264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
265 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
266 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
267 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
268 } else if (sysfs_streq(buf
, "madvise")) {
269 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
270 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
271 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
272 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
273 } else if (sysfs_streq(buf
, "never")) {
274 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
275 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
276 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
277 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
283 static struct kobj_attribute defrag_attr
=
284 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
286 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
287 struct kobj_attribute
*attr
, char *buf
)
289 return single_hugepage_flag_show(kobj
, attr
, buf
,
290 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
292 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
293 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
295 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
296 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
298 static struct kobj_attribute use_zero_page_attr
=
299 __ATTR(use_zero_page
, 0644, use_zero_page_show
, use_zero_page_store
);
301 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
302 struct kobj_attribute
*attr
, char *buf
)
304 return sprintf(buf
, "%lu\n", HPAGE_PMD_SIZE
);
306 static struct kobj_attribute hpage_pmd_size_attr
=
307 __ATTR_RO(hpage_pmd_size
);
309 #ifdef CONFIG_DEBUG_VM
310 static ssize_t
debug_cow_show(struct kobject
*kobj
,
311 struct kobj_attribute
*attr
, char *buf
)
313 return single_hugepage_flag_show(kobj
, attr
, buf
,
314 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
316 static ssize_t
debug_cow_store(struct kobject
*kobj
,
317 struct kobj_attribute
*attr
,
318 const char *buf
, size_t count
)
320 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
321 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
323 static struct kobj_attribute debug_cow_attr
=
324 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
325 #endif /* CONFIG_DEBUG_VM */
327 static struct attribute
*hugepage_attr
[] = {
330 &use_zero_page_attr
.attr
,
331 &hpage_pmd_size_attr
.attr
,
332 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
333 &shmem_enabled_attr
.attr
,
335 #ifdef CONFIG_DEBUG_VM
336 &debug_cow_attr
.attr
,
341 static const struct attribute_group hugepage_attr_group
= {
342 .attrs
= hugepage_attr
,
345 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
349 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
350 if (unlikely(!*hugepage_kobj
)) {
351 pr_err("failed to create transparent hugepage kobject\n");
355 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
357 pr_err("failed to register transparent hugepage group\n");
361 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
363 pr_err("failed to register transparent hugepage group\n");
364 goto remove_hp_group
;
370 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
372 kobject_put(*hugepage_kobj
);
376 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
378 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
379 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
380 kobject_put(hugepage_kobj
);
383 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
388 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
391 #endif /* CONFIG_SYSFS */
393 static int __init
hugepage_init(void)
396 struct kobject
*hugepage_kobj
;
398 if (!has_transparent_hugepage()) {
399 transparent_hugepage_flags
= 0;
404 * hugepages can't be allocated by the buddy allocator
406 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
408 * we use page->mapping and page->index in second tail page
409 * as list_head: assuming THP order >= 2
411 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
413 err
= hugepage_init_sysfs(&hugepage_kobj
);
417 err
= khugepaged_init();
421 err
= register_shrinker(&huge_zero_page_shrinker
);
423 goto err_hzp_shrinker
;
424 err
= register_shrinker(&deferred_split_shrinker
);
426 goto err_split_shrinker
;
429 * By default disable transparent hugepages on smaller systems,
430 * where the extra memory used could hurt more than TLB overhead
431 * is likely to save. The admin can still enable it through /sys.
433 if (totalram_pages() < (512 << (20 - PAGE_SHIFT
))) {
434 transparent_hugepage_flags
= 0;
438 err
= start_stop_khugepaged();
444 unregister_shrinker(&deferred_split_shrinker
);
446 unregister_shrinker(&huge_zero_page_shrinker
);
448 khugepaged_destroy();
450 hugepage_exit_sysfs(hugepage_kobj
);
454 subsys_initcall(hugepage_init
);
456 static int __init
setup_transparent_hugepage(char *str
)
461 if (!strcmp(str
, "always")) {
462 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
463 &transparent_hugepage_flags
);
464 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
465 &transparent_hugepage_flags
);
467 } else if (!strcmp(str
, "madvise")) {
468 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
469 &transparent_hugepage_flags
);
470 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
471 &transparent_hugepage_flags
);
473 } else if (!strcmp(str
, "never")) {
474 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
475 &transparent_hugepage_flags
);
476 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
477 &transparent_hugepage_flags
);
482 pr_warn("transparent_hugepage= cannot parse, ignored\n");
485 __setup("transparent_hugepage=", setup_transparent_hugepage
);
487 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
489 if (likely(vma
->vm_flags
& VM_WRITE
))
490 pmd
= pmd_mkwrite(pmd
);
495 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
497 struct mem_cgroup
*memcg
= compound_head(page
)->mem_cgroup
;
498 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
501 return &memcg
->deferred_split_queue
;
503 return &pgdat
->deferred_split_queue
;
506 static inline struct deferred_split
*get_deferred_split_queue(struct page
*page
)
508 struct pglist_data
*pgdat
= NODE_DATA(page_to_nid(page
));
510 return &pgdat
->deferred_split_queue
;
514 void prep_transhuge_page(struct page
*page
)
517 * we use page->mapping and page->indexlru in second tail page
518 * as list_head: assuming THP order >= 2
521 INIT_LIST_HEAD(page_deferred_list(page
));
522 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
525 static unsigned long __thp_get_unmapped_area(struct file
*filp
,
526 unsigned long addr
, unsigned long len
,
527 loff_t off
, unsigned long flags
, unsigned long size
)
529 loff_t off_end
= off
+ len
;
530 loff_t off_align
= round_up(off
, size
);
531 unsigned long len_pad
, ret
;
533 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
536 len_pad
= len
+ size
;
537 if (len_pad
< len
|| (off
+ len_pad
) < off
)
540 ret
= current
->mm
->get_unmapped_area(filp
, addr
, len_pad
,
541 off
>> PAGE_SHIFT
, flags
);
544 * The failure might be due to length padding. The caller will retry
545 * without the padding.
547 if (IS_ERR_VALUE(ret
))
551 * Do not try to align to THP boundary if allocation at the address
557 ret
+= (off
- ret
) & (size
- 1);
561 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
562 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
565 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
567 if (!IS_DAX(filp
->f_mapping
->host
) || !IS_ENABLED(CONFIG_FS_DAX_PMD
))
570 ret
= __thp_get_unmapped_area(filp
, addr
, len
, off
, flags
, PMD_SIZE
);
574 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
576 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
578 static vm_fault_t
__do_huge_pmd_anonymous_page(struct vm_fault
*vmf
,
579 struct page
*page
, gfp_t gfp
)
581 struct vm_area_struct
*vma
= vmf
->vma
;
582 struct mem_cgroup
*memcg
;
584 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
587 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
589 if (mem_cgroup_try_charge_delay(page
, vma
->vm_mm
, gfp
, &memcg
, true)) {
591 count_vm_event(THP_FAULT_FALLBACK
);
592 return VM_FAULT_FALLBACK
;
595 pgtable
= pte_alloc_one(vma
->vm_mm
);
596 if (unlikely(!pgtable
)) {
601 clear_huge_page(page
, vmf
->address
, HPAGE_PMD_NR
);
603 * The memory barrier inside __SetPageUptodate makes sure that
604 * clear_huge_page writes become visible before the set_pmd_at()
607 __SetPageUptodate(page
);
609 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
610 if (unlikely(!pmd_none(*vmf
->pmd
))) {
615 ret
= check_stable_address_space(vma
->vm_mm
);
619 /* Deliver the page fault to userland */
620 if (userfaultfd_missing(vma
)) {
623 spin_unlock(vmf
->ptl
);
624 mem_cgroup_cancel_charge(page
, memcg
, true);
626 pte_free(vma
->vm_mm
, pgtable
);
627 ret2
= handle_userfault(vmf
, VM_UFFD_MISSING
);
628 VM_BUG_ON(ret2
& VM_FAULT_FALLBACK
);
632 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
633 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
634 page_add_new_anon_rmap(page
, vma
, haddr
, true);
635 mem_cgroup_commit_charge(page
, memcg
, false, true);
636 lru_cache_add_active_or_unevictable(page
, vma
);
637 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
638 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
639 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
640 mm_inc_nr_ptes(vma
->vm_mm
);
641 spin_unlock(vmf
->ptl
);
642 count_vm_event(THP_FAULT_ALLOC
);
643 count_memcg_events(memcg
, THP_FAULT_ALLOC
, 1);
648 spin_unlock(vmf
->ptl
);
651 pte_free(vma
->vm_mm
, pgtable
);
652 mem_cgroup_cancel_charge(page
, memcg
, true);
659 * always: directly stall for all thp allocations
660 * defer: wake kswapd and fail if not immediately available
661 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
662 * fail if not immediately available
663 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
665 * never: never stall for any thp allocation
667 static inline gfp_t
alloc_hugepage_direct_gfpmask(struct vm_area_struct
*vma
)
669 const bool vma_madvised
= !!(vma
->vm_flags
& VM_HUGEPAGE
);
671 /* Always do synchronous compaction */
672 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
673 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
675 /* Kick kcompactd and fail quickly */
676 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
677 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
679 /* Synchronous compaction if madvised, otherwise kick kcompactd */
680 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
681 return GFP_TRANSHUGE_LIGHT
|
682 (vma_madvised
? __GFP_DIRECT_RECLAIM
:
683 __GFP_KSWAPD_RECLAIM
);
685 /* Only do synchronous compaction if madvised */
686 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
687 return GFP_TRANSHUGE_LIGHT
|
688 (vma_madvised
? __GFP_DIRECT_RECLAIM
: 0);
690 return GFP_TRANSHUGE_LIGHT
;
693 /* Caller must hold page table lock. */
694 static bool set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
695 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
696 struct page
*zero_page
)
701 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
702 entry
= pmd_mkhuge(entry
);
704 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
705 set_pmd_at(mm
, haddr
, pmd
, entry
);
710 vm_fault_t
do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
712 struct vm_area_struct
*vma
= vmf
->vma
;
715 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
717 if (!transhuge_vma_suitable(vma
, haddr
))
718 return VM_FAULT_FALLBACK
;
719 if (unlikely(anon_vma_prepare(vma
)))
721 if (unlikely(khugepaged_enter(vma
, vma
->vm_flags
)))
723 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
724 !mm_forbids_zeropage(vma
->vm_mm
) &&
725 transparent_hugepage_use_zero_page()) {
727 struct page
*zero_page
;
729 pgtable
= pte_alloc_one(vma
->vm_mm
);
730 if (unlikely(!pgtable
))
732 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
733 if (unlikely(!zero_page
)) {
734 pte_free(vma
->vm_mm
, pgtable
);
735 count_vm_event(THP_FAULT_FALLBACK
);
736 return VM_FAULT_FALLBACK
;
738 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
740 if (pmd_none(*vmf
->pmd
)) {
741 ret
= check_stable_address_space(vma
->vm_mm
);
743 spin_unlock(vmf
->ptl
);
744 pte_free(vma
->vm_mm
, pgtable
);
745 } else if (userfaultfd_missing(vma
)) {
746 spin_unlock(vmf
->ptl
);
747 pte_free(vma
->vm_mm
, pgtable
);
748 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
749 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
751 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
752 haddr
, vmf
->pmd
, zero_page
);
753 spin_unlock(vmf
->ptl
);
756 spin_unlock(vmf
->ptl
);
757 pte_free(vma
->vm_mm
, pgtable
);
761 gfp
= alloc_hugepage_direct_gfpmask(vma
);
762 page
= alloc_hugepage_vma(gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
763 if (unlikely(!page
)) {
764 count_vm_event(THP_FAULT_FALLBACK
);
765 return VM_FAULT_FALLBACK
;
767 prep_transhuge_page(page
);
768 return __do_huge_pmd_anonymous_page(vmf
, page
, gfp
);
771 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
772 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
,
775 struct mm_struct
*mm
= vma
->vm_mm
;
779 ptl
= pmd_lock(mm
, pmd
);
780 if (!pmd_none(*pmd
)) {
782 if (pmd_pfn(*pmd
) != pfn_t_to_pfn(pfn
)) {
783 WARN_ON_ONCE(!is_huge_zero_pmd(*pmd
));
786 entry
= pmd_mkyoung(*pmd
);
787 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
788 if (pmdp_set_access_flags(vma
, addr
, pmd
, entry
, 1))
789 update_mmu_cache_pmd(vma
, addr
, pmd
);
795 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
796 if (pfn_t_devmap(pfn
))
797 entry
= pmd_mkdevmap(entry
);
799 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
800 entry
= maybe_pmd_mkwrite(entry
, vma
);
804 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
809 set_pmd_at(mm
, addr
, pmd
, entry
);
810 update_mmu_cache_pmd(vma
, addr
, pmd
);
815 pte_free(mm
, pgtable
);
818 vm_fault_t
vmf_insert_pfn_pmd(struct vm_fault
*vmf
, pfn_t pfn
, bool write
)
820 unsigned long addr
= vmf
->address
& PMD_MASK
;
821 struct vm_area_struct
*vma
= vmf
->vma
;
822 pgprot_t pgprot
= vma
->vm_page_prot
;
823 pgtable_t pgtable
= NULL
;
826 * If we had pmd_special, we could avoid all these restrictions,
827 * but we need to be consistent with PTEs and architectures that
828 * can't support a 'special' bit.
830 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
832 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
833 (VM_PFNMAP
|VM_MIXEDMAP
));
834 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
836 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
837 return VM_FAULT_SIGBUS
;
839 if (arch_needs_pgtable_deposit()) {
840 pgtable
= pte_alloc_one(vma
->vm_mm
);
845 track_pfn_insert(vma
, &pgprot
, pfn
);
847 insert_pfn_pmd(vma
, addr
, vmf
->pmd
, pfn
, pgprot
, write
, pgtable
);
848 return VM_FAULT_NOPAGE
;
850 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd
);
852 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
853 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
855 if (likely(vma
->vm_flags
& VM_WRITE
))
856 pud
= pud_mkwrite(pud
);
860 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
861 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
863 struct mm_struct
*mm
= vma
->vm_mm
;
867 ptl
= pud_lock(mm
, pud
);
868 if (!pud_none(*pud
)) {
870 if (pud_pfn(*pud
) != pfn_t_to_pfn(pfn
)) {
871 WARN_ON_ONCE(!is_huge_zero_pud(*pud
));
874 entry
= pud_mkyoung(*pud
);
875 entry
= maybe_pud_mkwrite(pud_mkdirty(entry
), vma
);
876 if (pudp_set_access_flags(vma
, addr
, pud
, entry
, 1))
877 update_mmu_cache_pud(vma
, addr
, pud
);
882 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
883 if (pfn_t_devmap(pfn
))
884 entry
= pud_mkdevmap(entry
);
886 entry
= pud_mkyoung(pud_mkdirty(entry
));
887 entry
= maybe_pud_mkwrite(entry
, vma
);
889 set_pud_at(mm
, addr
, pud
, entry
);
890 update_mmu_cache_pud(vma
, addr
, pud
);
896 vm_fault_t
vmf_insert_pfn_pud(struct vm_fault
*vmf
, pfn_t pfn
, bool write
)
898 unsigned long addr
= vmf
->address
& PUD_MASK
;
899 struct vm_area_struct
*vma
= vmf
->vma
;
900 pgprot_t pgprot
= vma
->vm_page_prot
;
903 * If we had pud_special, we could avoid all these restrictions,
904 * but we need to be consistent with PTEs and architectures that
905 * can't support a 'special' bit.
907 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) &&
909 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
910 (VM_PFNMAP
|VM_MIXEDMAP
));
911 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
913 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
914 return VM_FAULT_SIGBUS
;
916 track_pfn_insert(vma
, &pgprot
, pfn
);
918 insert_pfn_pud(vma
, addr
, vmf
->pud
, pfn
, pgprot
, write
);
919 return VM_FAULT_NOPAGE
;
921 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud
);
922 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
924 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
925 pmd_t
*pmd
, int flags
)
929 _pmd
= pmd_mkyoung(*pmd
);
930 if (flags
& FOLL_WRITE
)
931 _pmd
= pmd_mkdirty(_pmd
);
932 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
933 pmd
, _pmd
, flags
& FOLL_WRITE
))
934 update_mmu_cache_pmd(vma
, addr
, pmd
);
937 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
938 pmd_t
*pmd
, int flags
, struct dev_pagemap
**pgmap
)
940 unsigned long pfn
= pmd_pfn(*pmd
);
941 struct mm_struct
*mm
= vma
->vm_mm
;
944 assert_spin_locked(pmd_lockptr(mm
, pmd
));
947 * When we COW a devmap PMD entry, we split it into PTEs, so we should
948 * not be in this function with `flags & FOLL_COW` set.
950 WARN_ONCE(flags
& FOLL_COW
, "mm: In follow_devmap_pmd with FOLL_COW set");
952 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
955 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
960 if (flags
& FOLL_TOUCH
)
961 touch_pmd(vma
, addr
, pmd
, flags
);
964 * device mapped pages can only be returned if the
965 * caller will manage the page reference count.
967 if (!(flags
& FOLL_GET
))
968 return ERR_PTR(-EEXIST
);
970 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
971 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
973 return ERR_PTR(-EFAULT
);
974 page
= pfn_to_page(pfn
);
980 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
981 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
982 struct vm_area_struct
*vma
)
984 spinlock_t
*dst_ptl
, *src_ptl
;
985 struct page
*src_page
;
987 pgtable_t pgtable
= NULL
;
990 /* Skip if can be re-fill on fault */
991 if (!vma_is_anonymous(vma
))
994 pgtable
= pte_alloc_one(dst_mm
);
995 if (unlikely(!pgtable
))
998 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
999 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
1000 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1005 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1006 if (unlikely(is_swap_pmd(pmd
))) {
1007 swp_entry_t entry
= pmd_to_swp_entry(pmd
);
1009 VM_BUG_ON(!is_pmd_migration_entry(pmd
));
1010 if (is_write_migration_entry(entry
)) {
1011 make_migration_entry_read(&entry
);
1012 pmd
= swp_entry_to_pmd(entry
);
1013 if (pmd_swp_soft_dirty(*src_pmd
))
1014 pmd
= pmd_swp_mksoft_dirty(pmd
);
1015 set_pmd_at(src_mm
, addr
, src_pmd
, pmd
);
1017 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1018 mm_inc_nr_ptes(dst_mm
);
1019 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1020 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1026 if (unlikely(!pmd_trans_huge(pmd
))) {
1027 pte_free(dst_mm
, pgtable
);
1031 * When page table lock is held, the huge zero pmd should not be
1032 * under splitting since we don't split the page itself, only pmd to
1035 if (is_huge_zero_pmd(pmd
)) {
1036 struct page
*zero_page
;
1038 * get_huge_zero_page() will never allocate a new page here,
1039 * since we already have a zero page to copy. It just takes a
1042 zero_page
= mm_get_huge_zero_page(dst_mm
);
1043 set_huge_zero_page(pgtable
, dst_mm
, vma
, addr
, dst_pmd
,
1049 src_page
= pmd_page(pmd
);
1050 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
1052 page_dup_rmap(src_page
, true);
1053 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1054 mm_inc_nr_ptes(dst_mm
);
1055 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
1057 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
1058 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
1059 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
1063 spin_unlock(src_ptl
);
1064 spin_unlock(dst_ptl
);
1069 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1070 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1071 pud_t
*pud
, int flags
)
1075 _pud
= pud_mkyoung(*pud
);
1076 if (flags
& FOLL_WRITE
)
1077 _pud
= pud_mkdirty(_pud
);
1078 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
1079 pud
, _pud
, flags
& FOLL_WRITE
))
1080 update_mmu_cache_pud(vma
, addr
, pud
);
1083 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
1084 pud_t
*pud
, int flags
, struct dev_pagemap
**pgmap
)
1086 unsigned long pfn
= pud_pfn(*pud
);
1087 struct mm_struct
*mm
= vma
->vm_mm
;
1090 assert_spin_locked(pud_lockptr(mm
, pud
));
1092 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
1095 if (pud_present(*pud
) && pud_devmap(*pud
))
1100 if (flags
& FOLL_TOUCH
)
1101 touch_pud(vma
, addr
, pud
, flags
);
1104 * device mapped pages can only be returned if the
1105 * caller will manage the page reference count.
1107 if (!(flags
& FOLL_GET
))
1108 return ERR_PTR(-EEXIST
);
1110 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
1111 *pgmap
= get_dev_pagemap(pfn
, *pgmap
);
1113 return ERR_PTR(-EFAULT
);
1114 page
= pfn_to_page(pfn
);
1120 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1121 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1122 struct vm_area_struct
*vma
)
1124 spinlock_t
*dst_ptl
, *src_ptl
;
1128 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1129 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1130 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1134 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1138 * When page table lock is held, the huge zero pud should not be
1139 * under splitting since we don't split the page itself, only pud to
1142 if (is_huge_zero_pud(pud
)) {
1143 /* No huge zero pud yet */
1146 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1147 pud
= pud_mkold(pud_wrprotect(pud
));
1148 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1152 spin_unlock(src_ptl
);
1153 spin_unlock(dst_ptl
);
1157 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1160 unsigned long haddr
;
1161 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1163 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1164 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1167 entry
= pud_mkyoung(orig_pud
);
1169 entry
= pud_mkdirty(entry
);
1170 haddr
= vmf
->address
& HPAGE_PUD_MASK
;
1171 if (pudp_set_access_flags(vmf
->vma
, haddr
, vmf
->pud
, entry
, write
))
1172 update_mmu_cache_pud(vmf
->vma
, vmf
->address
, vmf
->pud
);
1175 spin_unlock(vmf
->ptl
);
1177 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1179 void huge_pmd_set_accessed(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1182 unsigned long haddr
;
1183 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1185 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1186 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1189 entry
= pmd_mkyoung(orig_pmd
);
1191 entry
= pmd_mkdirty(entry
);
1192 haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1193 if (pmdp_set_access_flags(vmf
->vma
, haddr
, vmf
->pmd
, entry
, write
))
1194 update_mmu_cache_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
);
1197 spin_unlock(vmf
->ptl
);
1200 static vm_fault_t
do_huge_pmd_wp_page_fallback(struct vm_fault
*vmf
,
1201 pmd_t orig_pmd
, struct page
*page
)
1203 struct vm_area_struct
*vma
= vmf
->vma
;
1204 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1205 struct mem_cgroup
*memcg
;
1210 struct page
**pages
;
1211 struct mmu_notifier_range range
;
1213 pages
= kmalloc_array(HPAGE_PMD_NR
, sizeof(struct page
*),
1215 if (unlikely(!pages
)) {
1216 ret
|= VM_FAULT_OOM
;
1220 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1221 pages
[i
] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE
, vma
,
1222 vmf
->address
, page_to_nid(page
));
1223 if (unlikely(!pages
[i
] ||
1224 mem_cgroup_try_charge_delay(pages
[i
], vma
->vm_mm
,
1225 GFP_KERNEL
, &memcg
, false))) {
1229 memcg
= (void *)page_private(pages
[i
]);
1230 set_page_private(pages
[i
], 0);
1231 mem_cgroup_cancel_charge(pages
[i
], memcg
,
1236 ret
|= VM_FAULT_OOM
;
1239 set_page_private(pages
[i
], (unsigned long)memcg
);
1242 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1243 copy_user_highpage(pages
[i
], page
+ i
,
1244 haddr
+ PAGE_SIZE
* i
, vma
);
1245 __SetPageUptodate(pages
[i
]);
1249 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
1250 haddr
, haddr
+ HPAGE_PMD_SIZE
);
1251 mmu_notifier_invalidate_range_start(&range
);
1253 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1254 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1255 goto out_free_pages
;
1256 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1259 * Leave pmd empty until pte is filled note we must notify here as
1260 * concurrent CPU thread might write to new page before the call to
1261 * mmu_notifier_invalidate_range_end() happens which can lead to a
1262 * device seeing memory write in different order than CPU.
1264 * See Documentation/vm/mmu_notifier.rst
1266 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1268 pgtable
= pgtable_trans_huge_withdraw(vma
->vm_mm
, vmf
->pmd
);
1269 pmd_populate(vma
->vm_mm
, &_pmd
, pgtable
);
1271 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1273 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
1274 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1275 memcg
= (void *)page_private(pages
[i
]);
1276 set_page_private(pages
[i
], 0);
1277 page_add_new_anon_rmap(pages
[i
], vmf
->vma
, haddr
, false);
1278 mem_cgroup_commit_charge(pages
[i
], memcg
, false, false);
1279 lru_cache_add_active_or_unevictable(pages
[i
], vma
);
1280 vmf
->pte
= pte_offset_map(&_pmd
, haddr
);
1281 VM_BUG_ON(!pte_none(*vmf
->pte
));
1282 set_pte_at(vma
->vm_mm
, haddr
, vmf
->pte
, entry
);
1283 pte_unmap(vmf
->pte
);
1287 smp_wmb(); /* make pte visible before pmd */
1288 pmd_populate(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1289 page_remove_rmap(page
, true);
1290 spin_unlock(vmf
->ptl
);
1293 * No need to double call mmu_notifier->invalidate_range() callback as
1294 * the above pmdp_huge_clear_flush_notify() did already call it.
1296 mmu_notifier_invalidate_range_only_end(&range
);
1298 ret
|= VM_FAULT_WRITE
;
1305 spin_unlock(vmf
->ptl
);
1306 mmu_notifier_invalidate_range_end(&range
);
1307 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1308 memcg
= (void *)page_private(pages
[i
]);
1309 set_page_private(pages
[i
], 0);
1310 mem_cgroup_cancel_charge(pages
[i
], memcg
, false);
1317 vm_fault_t
do_huge_pmd_wp_page(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1319 struct vm_area_struct
*vma
= vmf
->vma
;
1320 struct page
*page
= NULL
, *new_page
;
1321 struct mem_cgroup
*memcg
;
1322 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1323 struct mmu_notifier_range range
;
1324 gfp_t huge_gfp
; /* for allocation and charge */
1327 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1328 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1329 if (is_huge_zero_pmd(orig_pmd
))
1331 spin_lock(vmf
->ptl
);
1332 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1335 page
= pmd_page(orig_pmd
);
1336 VM_BUG_ON_PAGE(!PageCompound(page
) || !PageHead(page
), page
);
1338 * We can only reuse the page if nobody else maps the huge page or it's
1341 if (!trylock_page(page
)) {
1343 spin_unlock(vmf
->ptl
);
1345 spin_lock(vmf
->ptl
);
1346 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1353 if (reuse_swap_page(page
, NULL
)) {
1355 entry
= pmd_mkyoung(orig_pmd
);
1356 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1357 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1358 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1359 ret
|= VM_FAULT_WRITE
;
1365 spin_unlock(vmf
->ptl
);
1367 if (__transparent_hugepage_enabled(vma
) &&
1368 !transparent_hugepage_debug_cow()) {
1369 huge_gfp
= alloc_hugepage_direct_gfpmask(vma
);
1370 new_page
= alloc_hugepage_vma(huge_gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
1374 if (likely(new_page
)) {
1375 prep_transhuge_page(new_page
);
1378 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1379 ret
|= VM_FAULT_FALLBACK
;
1381 ret
= do_huge_pmd_wp_page_fallback(vmf
, orig_pmd
, page
);
1382 if (ret
& VM_FAULT_OOM
) {
1383 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1384 ret
|= VM_FAULT_FALLBACK
;
1388 count_vm_event(THP_FAULT_FALLBACK
);
1392 if (unlikely(mem_cgroup_try_charge_delay(new_page
, vma
->vm_mm
,
1393 huge_gfp
, &memcg
, true))) {
1395 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1398 ret
|= VM_FAULT_FALLBACK
;
1399 count_vm_event(THP_FAULT_FALLBACK
);
1403 count_vm_event(THP_FAULT_ALLOC
);
1404 count_memcg_events(memcg
, THP_FAULT_ALLOC
, 1);
1407 clear_huge_page(new_page
, vmf
->address
, HPAGE_PMD_NR
);
1409 copy_user_huge_page(new_page
, page
, vmf
->address
,
1411 __SetPageUptodate(new_page
);
1413 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
1414 haddr
, haddr
+ HPAGE_PMD_SIZE
);
1415 mmu_notifier_invalidate_range_start(&range
);
1417 spin_lock(vmf
->ptl
);
1420 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1421 spin_unlock(vmf
->ptl
);
1422 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1427 entry
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1428 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1429 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1430 page_add_new_anon_rmap(new_page
, vma
, haddr
, true);
1431 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1432 lru_cache_add_active_or_unevictable(new_page
, vma
);
1433 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
1434 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1436 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1438 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1439 page_remove_rmap(page
, true);
1442 ret
|= VM_FAULT_WRITE
;
1444 spin_unlock(vmf
->ptl
);
1447 * No need to double call mmu_notifier->invalidate_range() callback as
1448 * the above pmdp_huge_clear_flush_notify() did already call it.
1450 mmu_notifier_invalidate_range_only_end(&range
);
1454 spin_unlock(vmf
->ptl
);
1459 * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
1460 * but only after we've gone through a COW cycle and they are dirty.
1462 static inline bool can_follow_write_pmd(pmd_t pmd
, unsigned int flags
)
1464 return pmd_write(pmd
) || ((flags
& FOLL_COW
) && pmd_dirty(pmd
));
1467 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1472 struct mm_struct
*mm
= vma
->vm_mm
;
1473 struct page
*page
= NULL
;
1475 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1477 if (flags
& FOLL_WRITE
&& !can_follow_write_pmd(*pmd
, flags
))
1480 /* Avoid dumping huge zero page */
1481 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1482 return ERR_PTR(-EFAULT
);
1484 /* Full NUMA hinting faults to serialise migration in fault paths */
1485 if ((flags
& FOLL_NUMA
) && pmd_protnone(*pmd
))
1488 page
= pmd_page(*pmd
);
1489 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1490 if (flags
& FOLL_TOUCH
)
1491 touch_pmd(vma
, addr
, pmd
, flags
);
1492 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1494 * We don't mlock() pte-mapped THPs. This way we can avoid
1495 * leaking mlocked pages into non-VM_LOCKED VMAs.
1499 * In most cases the pmd is the only mapping of the page as we
1500 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1501 * writable private mappings in populate_vma_page_range().
1503 * The only scenario when we have the page shared here is if we
1504 * mlocking read-only mapping shared over fork(). We skip
1505 * mlocking such pages.
1509 * We can expect PageDoubleMap() to be stable under page lock:
1510 * for file pages we set it in page_add_file_rmap(), which
1511 * requires page to be locked.
1514 if (PageAnon(page
) && compound_mapcount(page
) != 1)
1516 if (PageDoubleMap(page
) || !page
->mapping
)
1518 if (!trylock_page(page
))
1521 if (page
->mapping
&& !PageDoubleMap(page
))
1522 mlock_vma_page(page
);
1526 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1527 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1528 if (flags
& FOLL_GET
)
1535 /* NUMA hinting page fault entry point for trans huge pmds */
1536 vm_fault_t
do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t pmd
)
1538 struct vm_area_struct
*vma
= vmf
->vma
;
1539 struct anon_vma
*anon_vma
= NULL
;
1541 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1542 int page_nid
= NUMA_NO_NODE
, this_nid
= numa_node_id();
1543 int target_nid
, last_cpupid
= -1;
1545 bool migrated
= false;
1549 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1550 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
)))
1554 * If there are potential migrations, wait for completion and retry
1555 * without disrupting NUMA hinting information. Do not relock and
1556 * check_same as the page may no longer be mapped.
1558 if (unlikely(pmd_trans_migrating(*vmf
->pmd
))) {
1559 page
= pmd_page(*vmf
->pmd
);
1560 if (!get_page_unless_zero(page
))
1562 spin_unlock(vmf
->ptl
);
1563 put_and_wait_on_page_locked(page
);
1567 page
= pmd_page(pmd
);
1568 BUG_ON(is_huge_zero_page(page
));
1569 page_nid
= page_to_nid(page
);
1570 last_cpupid
= page_cpupid_last(page
);
1571 count_vm_numa_event(NUMA_HINT_FAULTS
);
1572 if (page_nid
== this_nid
) {
1573 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL
);
1574 flags
|= TNF_FAULT_LOCAL
;
1577 /* See similar comment in do_numa_page for explanation */
1578 if (!pmd_savedwrite(pmd
))
1579 flags
|= TNF_NO_GROUP
;
1582 * Acquire the page lock to serialise THP migrations but avoid dropping
1583 * page_table_lock if at all possible
1585 page_locked
= trylock_page(page
);
1586 target_nid
= mpol_misplaced(page
, vma
, haddr
);
1587 if (target_nid
== NUMA_NO_NODE
) {
1588 /* If the page was locked, there are no parallel migrations */
1593 /* Migration could have started since the pmd_trans_migrating check */
1595 page_nid
= NUMA_NO_NODE
;
1596 if (!get_page_unless_zero(page
))
1598 spin_unlock(vmf
->ptl
);
1599 put_and_wait_on_page_locked(page
);
1604 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1605 * to serialises splits
1608 spin_unlock(vmf
->ptl
);
1609 anon_vma
= page_lock_anon_vma_read(page
);
1611 /* Confirm the PMD did not change while page_table_lock was released */
1612 spin_lock(vmf
->ptl
);
1613 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
))) {
1616 page_nid
= NUMA_NO_NODE
;
1620 /* Bail if we fail to protect against THP splits for any reason */
1621 if (unlikely(!anon_vma
)) {
1623 page_nid
= NUMA_NO_NODE
;
1628 * Since we took the NUMA fault, we must have observed the !accessible
1629 * bit. Make sure all other CPUs agree with that, to avoid them
1630 * modifying the page we're about to migrate.
1632 * Must be done under PTL such that we'll observe the relevant
1633 * inc_tlb_flush_pending().
1635 * We are not sure a pending tlb flush here is for a huge page
1636 * mapping or not. Hence use the tlb range variant
1638 if (mm_tlb_flush_pending(vma
->vm_mm
)) {
1639 flush_tlb_range(vma
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
1641 * change_huge_pmd() released the pmd lock before
1642 * invalidating the secondary MMUs sharing the primary
1643 * MMU pagetables (with ->invalidate_range()). The
1644 * mmu_notifier_invalidate_range_end() (which
1645 * internally calls ->invalidate_range()) in
1646 * change_pmd_range() will run after us, so we can't
1647 * rely on it here and we need an explicit invalidate.
1649 mmu_notifier_invalidate_range(vma
->vm_mm
, haddr
,
1650 haddr
+ HPAGE_PMD_SIZE
);
1654 * Migrate the THP to the requested node, returns with page unlocked
1655 * and access rights restored.
1657 spin_unlock(vmf
->ptl
);
1659 migrated
= migrate_misplaced_transhuge_page(vma
->vm_mm
, vma
,
1660 vmf
->pmd
, pmd
, vmf
->address
, page
, target_nid
);
1662 flags
|= TNF_MIGRATED
;
1663 page_nid
= target_nid
;
1665 flags
|= TNF_MIGRATE_FAIL
;
1669 BUG_ON(!PageLocked(page
));
1670 was_writable
= pmd_savedwrite(pmd
);
1671 pmd
= pmd_modify(pmd
, vma
->vm_page_prot
);
1672 pmd
= pmd_mkyoung(pmd
);
1674 pmd
= pmd_mkwrite(pmd
);
1675 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1676 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1679 spin_unlock(vmf
->ptl
);
1683 page_unlock_anon_vma_read(anon_vma
);
1685 if (page_nid
!= NUMA_NO_NODE
)
1686 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1693 * Return true if we do MADV_FREE successfully on entire pmd page.
1694 * Otherwise, return false.
1696 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1697 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1702 struct mm_struct
*mm
= tlb
->mm
;
1705 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1707 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1712 if (is_huge_zero_pmd(orig_pmd
))
1715 if (unlikely(!pmd_present(orig_pmd
))) {
1716 VM_BUG_ON(thp_migration_supported() &&
1717 !is_pmd_migration_entry(orig_pmd
));
1721 page
= pmd_page(orig_pmd
);
1723 * If other processes are mapping this page, we couldn't discard
1724 * the page unless they all do MADV_FREE so let's skip the page.
1726 if (total_mapcount(page
) != 1)
1729 if (!trylock_page(page
))
1733 * If user want to discard part-pages of THP, split it so MADV_FREE
1734 * will deactivate only them.
1736 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1739 split_huge_page(page
);
1745 if (PageDirty(page
))
1746 ClearPageDirty(page
);
1749 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1750 pmdp_invalidate(vma
, addr
, pmd
);
1751 orig_pmd
= pmd_mkold(orig_pmd
);
1752 orig_pmd
= pmd_mkclean(orig_pmd
);
1754 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1755 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1758 mark_page_lazyfree(page
);
1766 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1770 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1771 pte_free(mm
, pgtable
);
1775 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1776 pmd_t
*pmd
, unsigned long addr
)
1781 tlb_change_page_size(tlb
, HPAGE_PMD_SIZE
);
1783 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1787 * For architectures like ppc64 we look at deposited pgtable
1788 * when calling pmdp_huge_get_and_clear. So do the
1789 * pgtable_trans_huge_withdraw after finishing pmdp related
1792 orig_pmd
= pmdp_huge_get_and_clear_full(tlb
->mm
, addr
, pmd
,
1794 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1795 if (vma_is_dax(vma
)) {
1796 if (arch_needs_pgtable_deposit())
1797 zap_deposited_table(tlb
->mm
, pmd
);
1799 if (is_huge_zero_pmd(orig_pmd
))
1800 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1801 } else if (is_huge_zero_pmd(orig_pmd
)) {
1802 zap_deposited_table(tlb
->mm
, pmd
);
1804 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1806 struct page
*page
= NULL
;
1807 int flush_needed
= 1;
1809 if (pmd_present(orig_pmd
)) {
1810 page
= pmd_page(orig_pmd
);
1811 page_remove_rmap(page
, true);
1812 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1813 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1814 } else if (thp_migration_supported()) {
1817 VM_BUG_ON(!is_pmd_migration_entry(orig_pmd
));
1818 entry
= pmd_to_swp_entry(orig_pmd
);
1819 page
= pfn_to_page(swp_offset(entry
));
1822 WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!");
1824 if (PageAnon(page
)) {
1825 zap_deposited_table(tlb
->mm
, pmd
);
1826 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1828 if (arch_needs_pgtable_deposit())
1829 zap_deposited_table(tlb
->mm
, pmd
);
1830 add_mm_counter(tlb
->mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
1835 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1840 #ifndef pmd_move_must_withdraw
1841 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1842 spinlock_t
*old_pmd_ptl
,
1843 struct vm_area_struct
*vma
)
1846 * With split pmd lock we also need to move preallocated
1847 * PTE page table if new_pmd is on different PMD page table.
1849 * We also don't deposit and withdraw tables for file pages.
1851 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1855 static pmd_t
move_soft_dirty_pmd(pmd_t pmd
)
1857 #ifdef CONFIG_MEM_SOFT_DIRTY
1858 if (unlikely(is_pmd_migration_entry(pmd
)))
1859 pmd
= pmd_swp_mksoft_dirty(pmd
);
1860 else if (pmd_present(pmd
))
1861 pmd
= pmd_mksoft_dirty(pmd
);
1866 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1867 unsigned long new_addr
, unsigned long old_end
,
1868 pmd_t
*old_pmd
, pmd_t
*new_pmd
)
1870 spinlock_t
*old_ptl
, *new_ptl
;
1872 struct mm_struct
*mm
= vma
->vm_mm
;
1873 bool force_flush
= false;
1875 if ((old_addr
& ~HPAGE_PMD_MASK
) ||
1876 (new_addr
& ~HPAGE_PMD_MASK
) ||
1877 old_end
- old_addr
< HPAGE_PMD_SIZE
)
1881 * The destination pmd shouldn't be established, free_pgtables()
1882 * should have release it.
1884 if (WARN_ON(!pmd_none(*new_pmd
))) {
1885 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1890 * We don't have to worry about the ordering of src and dst
1891 * ptlocks because exclusive mmap_sem prevents deadlock.
1893 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1895 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1896 if (new_ptl
!= old_ptl
)
1897 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1898 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1899 if (pmd_present(pmd
))
1901 VM_BUG_ON(!pmd_none(*new_pmd
));
1903 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1905 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1906 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1908 pmd
= move_soft_dirty_pmd(pmd
);
1909 set_pmd_at(mm
, new_addr
, new_pmd
, pmd
);
1911 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1912 if (new_ptl
!= old_ptl
)
1913 spin_unlock(new_ptl
);
1914 spin_unlock(old_ptl
);
1922 * - 0 if PMD could not be locked
1923 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1924 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1926 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1927 unsigned long addr
, pgprot_t newprot
, int prot_numa
)
1929 struct mm_struct
*mm
= vma
->vm_mm
;
1932 bool preserve_write
;
1935 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1939 preserve_write
= prot_numa
&& pmd_write(*pmd
);
1942 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1943 if (is_swap_pmd(*pmd
)) {
1944 swp_entry_t entry
= pmd_to_swp_entry(*pmd
);
1946 VM_BUG_ON(!is_pmd_migration_entry(*pmd
));
1947 if (is_write_migration_entry(entry
)) {
1950 * A protection check is difficult so
1951 * just be safe and disable write
1953 make_migration_entry_read(&entry
);
1954 newpmd
= swp_entry_to_pmd(entry
);
1955 if (pmd_swp_soft_dirty(*pmd
))
1956 newpmd
= pmd_swp_mksoft_dirty(newpmd
);
1957 set_pmd_at(mm
, addr
, pmd
, newpmd
);
1964 * Avoid trapping faults against the zero page. The read-only
1965 * data is likely to be read-cached on the local CPU and
1966 * local/remote hits to the zero page are not interesting.
1968 if (prot_numa
&& is_huge_zero_pmd(*pmd
))
1971 if (prot_numa
&& pmd_protnone(*pmd
))
1975 * In case prot_numa, we are under down_read(mmap_sem). It's critical
1976 * to not clear pmd intermittently to avoid race with MADV_DONTNEED
1977 * which is also under down_read(mmap_sem):
1980 * change_huge_pmd(prot_numa=1)
1981 * pmdp_huge_get_and_clear_notify()
1982 * madvise_dontneed()
1984 * pmd_trans_huge(*pmd) == 0 (without ptl)
1987 * // pmd is re-established
1989 * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
1990 * which may break userspace.
1992 * pmdp_invalidate() is required to make sure we don't miss
1993 * dirty/young flags set by hardware.
1995 entry
= pmdp_invalidate(vma
, addr
, pmd
);
1997 entry
= pmd_modify(entry
, newprot
);
1999 entry
= pmd_mk_savedwrite(entry
);
2001 set_pmd_at(mm
, addr
, pmd
, entry
);
2002 BUG_ON(vma_is_anonymous(vma
) && !preserve_write
&& pmd_write(entry
));
2009 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
2011 * Note that if it returns page table lock pointer, this routine returns without
2012 * unlocking page table lock. So callers must unlock it.
2014 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
2017 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
2018 if (likely(is_swap_pmd(*pmd
) || pmd_trans_huge(*pmd
) ||
2026 * Returns true if a given pud maps a thp, false otherwise.
2028 * Note that if it returns true, this routine returns without unlocking page
2029 * table lock. So callers must unlock it.
2031 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
2035 ptl
= pud_lock(vma
->vm_mm
, pud
);
2036 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
2042 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
2043 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
2044 pud_t
*pud
, unsigned long addr
)
2048 ptl
= __pud_trans_huge_lock(pud
, vma
);
2052 * For architectures like ppc64 we look at deposited pgtable
2053 * when calling pudp_huge_get_and_clear. So do the
2054 * pgtable_trans_huge_withdraw after finishing pudp related
2057 pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
, tlb
->fullmm
);
2058 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
2059 if (vma_is_dax(vma
)) {
2061 /* No zero page support yet */
2063 /* No support for anonymous PUD pages yet */
2069 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
2070 unsigned long haddr
)
2072 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
2073 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2074 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
2075 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
2077 count_vm_event(THP_SPLIT_PUD
);
2079 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
2082 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
2083 unsigned long address
)
2086 struct mmu_notifier_range range
;
2088 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
2089 address
& HPAGE_PUD_MASK
,
2090 (address
& HPAGE_PUD_MASK
) + HPAGE_PUD_SIZE
);
2091 mmu_notifier_invalidate_range_start(&range
);
2092 ptl
= pud_lock(vma
->vm_mm
, pud
);
2093 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
2095 __split_huge_pud_locked(vma
, pud
, range
.start
);
2100 * No need to double call mmu_notifier->invalidate_range() callback as
2101 * the above pudp_huge_clear_flush_notify() did already call it.
2103 mmu_notifier_invalidate_range_only_end(&range
);
2105 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
2107 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
2108 unsigned long haddr
, pmd_t
*pmd
)
2110 struct mm_struct
*mm
= vma
->vm_mm
;
2116 * Leave pmd empty until pte is filled note that it is fine to delay
2117 * notification until mmu_notifier_invalidate_range_end() as we are
2118 * replacing a zero pmd write protected page with a zero pte write
2121 * See Documentation/vm/mmu_notifier.rst
2123 pmdp_huge_clear_flush(vma
, haddr
, pmd
);
2125 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2126 pmd_populate(mm
, &_pmd
, pgtable
);
2128 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
2130 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
2131 entry
= pte_mkspecial(entry
);
2132 pte
= pte_offset_map(&_pmd
, haddr
);
2133 VM_BUG_ON(!pte_none(*pte
));
2134 set_pte_at(mm
, haddr
, pte
, entry
);
2137 smp_wmb(); /* make pte visible before pmd */
2138 pmd_populate(mm
, pmd
, pgtable
);
2141 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2142 unsigned long haddr
, bool freeze
)
2144 struct mm_struct
*mm
= vma
->vm_mm
;
2147 pmd_t old_pmd
, _pmd
;
2148 bool young
, write
, soft_dirty
, pmd_migration
= false;
2152 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
2153 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
2154 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
2155 VM_BUG_ON(!is_pmd_migration_entry(*pmd
) && !pmd_trans_huge(*pmd
)
2156 && !pmd_devmap(*pmd
));
2158 count_vm_event(THP_SPLIT_PMD
);
2160 if (!vma_is_anonymous(vma
)) {
2161 old_pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
2163 * We are going to unmap this huge page. So
2164 * just go ahead and zap it
2166 if (arch_needs_pgtable_deposit())
2167 zap_deposited_table(mm
, pmd
);
2168 if (vma_is_dax(vma
))
2170 if (unlikely(is_pmd_migration_entry(old_pmd
))) {
2173 entry
= pmd_to_swp_entry(old_pmd
);
2174 page
= migration_entry_to_page(entry
);
2176 page
= pmd_page(old_pmd
);
2177 if (!PageDirty(page
) && pmd_dirty(old_pmd
))
2178 set_page_dirty(page
);
2179 if (!PageReferenced(page
) && pmd_young(old_pmd
))
2180 SetPageReferenced(page
);
2181 page_remove_rmap(page
, true);
2184 add_mm_counter(mm
, mm_counter_file(page
), -HPAGE_PMD_NR
);
2188 if (is_huge_zero_pmd(*pmd
)) {
2190 * FIXME: Do we want to invalidate secondary mmu by calling
2191 * mmu_notifier_invalidate_range() see comments below inside
2192 * __split_huge_pmd() ?
2194 * We are going from a zero huge page write protected to zero
2195 * small page also write protected so it does not seems useful
2196 * to invalidate secondary mmu at this time.
2198 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
2202 * Up to this point the pmd is present and huge and userland has the
2203 * whole access to the hugepage during the split (which happens in
2204 * place). If we overwrite the pmd with the not-huge version pointing
2205 * to the pte here (which of course we could if all CPUs were bug
2206 * free), userland could trigger a small page size TLB miss on the
2207 * small sized TLB while the hugepage TLB entry is still established in
2208 * the huge TLB. Some CPU doesn't like that.
2209 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2210 * 383 on page 93. Intel should be safe but is also warns that it's
2211 * only safe if the permission and cache attributes of the two entries
2212 * loaded in the two TLB is identical (which should be the case here).
2213 * But it is generally safer to never allow small and huge TLB entries
2214 * for the same virtual address to be loaded simultaneously. So instead
2215 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2216 * current pmd notpresent (atomically because here the pmd_trans_huge
2217 * must remain set at all times on the pmd until the split is complete
2218 * for this pmd), then we flush the SMP TLB and finally we write the
2219 * non-huge version of the pmd entry with pmd_populate.
2221 old_pmd
= pmdp_invalidate(vma
, haddr
, pmd
);
2223 pmd_migration
= is_pmd_migration_entry(old_pmd
);
2224 if (unlikely(pmd_migration
)) {
2227 entry
= pmd_to_swp_entry(old_pmd
);
2228 page
= pfn_to_page(swp_offset(entry
));
2229 write
= is_write_migration_entry(entry
);
2231 soft_dirty
= pmd_swp_soft_dirty(old_pmd
);
2233 page
= pmd_page(old_pmd
);
2234 if (pmd_dirty(old_pmd
))
2236 write
= pmd_write(old_pmd
);
2237 young
= pmd_young(old_pmd
);
2238 soft_dirty
= pmd_soft_dirty(old_pmd
);
2240 VM_BUG_ON_PAGE(!page_count(page
), page
);
2241 page_ref_add(page
, HPAGE_PMD_NR
- 1);
2244 * Withdraw the table only after we mark the pmd entry invalid.
2245 * This's critical for some architectures (Power).
2247 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
2248 pmd_populate(mm
, &_pmd
, pgtable
);
2250 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
2253 * Note that NUMA hinting access restrictions are not
2254 * transferred to avoid any possibility of altering
2255 * permissions across VMAs.
2257 if (freeze
|| pmd_migration
) {
2258 swp_entry_t swp_entry
;
2259 swp_entry
= make_migration_entry(page
+ i
, write
);
2260 entry
= swp_entry_to_pte(swp_entry
);
2262 entry
= pte_swp_mksoft_dirty(entry
);
2264 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
2265 entry
= maybe_mkwrite(entry
, vma
);
2267 entry
= pte_wrprotect(entry
);
2269 entry
= pte_mkold(entry
);
2271 entry
= pte_mksoft_dirty(entry
);
2273 pte
= pte_offset_map(&_pmd
, addr
);
2274 BUG_ON(!pte_none(*pte
));
2275 set_pte_at(mm
, addr
, pte
, entry
);
2277 atomic_inc(&page
[i
]._mapcount
);
2281 if (!pmd_migration
) {
2283 * Set PG_double_map before dropping compound_mapcount to avoid
2284 * false-negative page_mapped().
2286 if (compound_mapcount(page
) > 1 &&
2287 !TestSetPageDoubleMap(page
)) {
2288 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2289 atomic_inc(&page
[i
]._mapcount
);
2292 lock_page_memcg(page
);
2293 if (atomic_add_negative(-1, compound_mapcount_ptr(page
))) {
2294 /* Last compound_mapcount is gone. */
2295 __dec_lruvec_page_state(page
, NR_ANON_THPS
);
2296 if (TestClearPageDoubleMap(page
)) {
2297 /* No need in mapcount reference anymore */
2298 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2299 atomic_dec(&page
[i
]._mapcount
);
2302 unlock_page_memcg(page
);
2305 smp_wmb(); /* make pte visible before pmd */
2306 pmd_populate(mm
, pmd
, pgtable
);
2309 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2310 page_remove_rmap(page
+ i
, false);
2316 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2317 unsigned long address
, bool freeze
, struct page
*page
)
2320 struct mmu_notifier_range range
;
2321 bool do_unlock_page
= false;
2324 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, vma
->vm_mm
,
2325 address
& HPAGE_PMD_MASK
,
2326 (address
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
);
2327 mmu_notifier_invalidate_range_start(&range
);
2328 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
2331 * If caller asks to setup a migration entries, we need a page to check
2332 * pmd against. Otherwise we can end up replacing wrong page.
2334 VM_BUG_ON(freeze
&& !page
);
2336 VM_WARN_ON_ONCE(!PageLocked(page
));
2337 if (page
!= pmd_page(*pmd
))
2342 if (pmd_trans_huge(*pmd
)) {
2344 page
= pmd_page(*pmd
);
2346 * An anonymous page must be locked, to ensure that a
2347 * concurrent reuse_swap_page() sees stable mapcount;
2348 * but reuse_swap_page() is not used on shmem or file,
2349 * and page lock must not be taken when zap_pmd_range()
2350 * calls __split_huge_pmd() while i_mmap_lock is held.
2352 if (PageAnon(page
)) {
2353 if (unlikely(!trylock_page(page
))) {
2359 if (unlikely(!pmd_same(*pmd
, _pmd
))) {
2367 do_unlock_page
= true;
2370 if (PageMlocked(page
))
2371 clear_page_mlock(page
);
2372 } else if (!(pmd_devmap(*pmd
) || is_pmd_migration_entry(*pmd
)))
2374 __split_huge_pmd_locked(vma
, pmd
, range
.start
, freeze
);
2380 * No need to double call mmu_notifier->invalidate_range() callback.
2381 * They are 3 cases to consider inside __split_huge_pmd_locked():
2382 * 1) pmdp_huge_clear_flush_notify() call invalidate_range() obvious
2383 * 2) __split_huge_zero_page_pmd() read only zero page and any write
2384 * fault will trigger a flush_notify before pointing to a new page
2385 * (it is fine if the secondary mmu keeps pointing to the old zero
2386 * page in the meantime)
2387 * 3) Split a huge pmd into pte pointing to the same page. No need
2388 * to invalidate secondary tlb entry they are all still valid.
2389 * any further changes to individual pte will notify. So no need
2390 * to call mmu_notifier->invalidate_range()
2392 mmu_notifier_invalidate_range_only_end(&range
);
2395 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2396 bool freeze
, struct page
*page
)
2403 pgd
= pgd_offset(vma
->vm_mm
, address
);
2404 if (!pgd_present(*pgd
))
2407 p4d
= p4d_offset(pgd
, address
);
2408 if (!p4d_present(*p4d
))
2411 pud
= pud_offset(p4d
, address
);
2412 if (!pud_present(*pud
))
2415 pmd
= pmd_offset(pud
, address
);
2417 __split_huge_pmd(vma
, pmd
, address
, freeze
, page
);
2420 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2421 unsigned long start
,
2426 * If the new start address isn't hpage aligned and it could
2427 * previously contain an hugepage: check if we need to split
2430 if (start
& ~HPAGE_PMD_MASK
&&
2431 (start
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2432 (start
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2433 split_huge_pmd_address(vma
, start
, false, NULL
);
2436 * If the new end address isn't hpage aligned and it could
2437 * previously contain an hugepage: check if we need to split
2440 if (end
& ~HPAGE_PMD_MASK
&&
2441 (end
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2442 (end
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2443 split_huge_pmd_address(vma
, end
, false, NULL
);
2446 * If we're also updating the vma->vm_next->vm_start, if the new
2447 * vm_next->vm_start isn't page aligned and it could previously
2448 * contain an hugepage: check if we need to split an huge pmd.
2450 if (adjust_next
> 0) {
2451 struct vm_area_struct
*next
= vma
->vm_next
;
2452 unsigned long nstart
= next
->vm_start
;
2453 nstart
+= adjust_next
<< PAGE_SHIFT
;
2454 if (nstart
& ~HPAGE_PMD_MASK
&&
2455 (nstart
& HPAGE_PMD_MASK
) >= next
->vm_start
&&
2456 (nstart
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= next
->vm_end
)
2457 split_huge_pmd_address(next
, nstart
, false, NULL
);
2461 static void unmap_page(struct page
*page
)
2463 enum ttu_flags ttu_flags
= TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
|
2464 TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
| TTU_SYNC
;
2466 VM_BUG_ON_PAGE(!PageHead(page
), page
);
2469 ttu_flags
|= TTU_SPLIT_FREEZE
;
2471 try_to_unmap(page
, ttu_flags
);
2473 VM_WARN_ON_ONCE_PAGE(page_mapped(page
), page
);
2476 static void remap_page(struct page
*page
)
2479 if (PageTransHuge(page
)) {
2480 remove_migration_ptes(page
, page
, true);
2482 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2483 remove_migration_ptes(page
+ i
, page
+ i
, true);
2487 static void __split_huge_page_tail(struct page
*head
, int tail
,
2488 struct lruvec
*lruvec
, struct list_head
*list
)
2490 struct page
*page_tail
= head
+ tail
;
2492 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2495 * Clone page flags before unfreezing refcount.
2497 * After successful get_page_unless_zero() might follow flags change,
2498 * for exmaple lock_page() which set PG_waiters.
2500 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2501 page_tail
->flags
|= (head
->flags
&
2502 ((1L << PG_referenced
) |
2503 (1L << PG_swapbacked
) |
2504 (1L << PG_swapcache
) |
2505 (1L << PG_mlocked
) |
2506 (1L << PG_uptodate
) |
2508 (1L << PG_workingset
) |
2510 (1L << PG_unevictable
) |
2513 /* ->mapping in first tail page is compound_mapcount */
2514 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2516 page_tail
->mapping
= head
->mapping
;
2517 page_tail
->index
= head
->index
+ tail
;
2519 /* Page flags must be visible before we make the page non-compound. */
2523 * Clear PageTail before unfreezing page refcount.
2525 * After successful get_page_unless_zero() might follow put_page()
2526 * which needs correct compound_head().
2528 clear_compound_head(page_tail
);
2530 /* Finally unfreeze refcount. Additional reference from page cache. */
2531 page_ref_unfreeze(page_tail
, 1 + (!PageAnon(head
) ||
2532 PageSwapCache(head
)));
2534 if (page_is_young(head
))
2535 set_page_young(page_tail
);
2536 if (page_is_idle(head
))
2537 set_page_idle(page_tail
);
2539 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2542 * always add to the tail because some iterators expect new
2543 * pages to show after the currently processed elements - e.g.
2546 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2549 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2550 pgoff_t end
, unsigned long flags
)
2552 struct page
*head
= compound_head(page
);
2553 pg_data_t
*pgdat
= page_pgdat(head
);
2554 struct lruvec
*lruvec
;
2555 struct address_space
*swap_cache
= NULL
;
2556 unsigned long offset
= 0;
2559 lruvec
= mem_cgroup_page_lruvec(head
, pgdat
);
2561 /* complete memcg works before add pages to LRU */
2562 mem_cgroup_split_huge_fixup(head
);
2564 if (PageAnon(head
) && PageSwapCache(head
)) {
2565 swp_entry_t entry
= { .val
= page_private(head
) };
2567 offset
= swp_offset(entry
);
2568 swap_cache
= swap_address_space(entry
);
2569 xa_lock(&swap_cache
->i_pages
);
2572 for (i
= HPAGE_PMD_NR
- 1; i
>= 1; i
--) {
2573 __split_huge_page_tail(head
, i
, lruvec
, list
);
2574 /* Some pages can be beyond i_size: drop them from page cache */
2575 if (head
[i
].index
>= end
) {
2576 ClearPageDirty(head
+ i
);
2577 __delete_from_page_cache(head
+ i
, NULL
);
2578 if (IS_ENABLED(CONFIG_SHMEM
) && PageSwapBacked(head
))
2579 shmem_uncharge(head
->mapping
->host
, 1);
2581 } else if (!PageAnon(page
)) {
2582 __xa_store(&head
->mapping
->i_pages
, head
[i
].index
,
2584 } else if (swap_cache
) {
2585 __xa_store(&swap_cache
->i_pages
, offset
+ i
,
2590 ClearPageCompound(head
);
2592 split_page_owner(head
, HPAGE_PMD_NR
);
2594 /* See comment in __split_huge_page_tail() */
2595 if (PageAnon(head
)) {
2596 /* Additional pin to swap cache */
2597 if (PageSwapCache(head
)) {
2598 page_ref_add(head
, 2);
2599 xa_unlock(&swap_cache
->i_pages
);
2604 /* Additional pin to page cache */
2605 page_ref_add(head
, 2);
2606 xa_unlock(&head
->mapping
->i_pages
);
2609 spin_unlock_irqrestore(&pgdat
->lru_lock
, flags
);
2613 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2614 struct page
*subpage
= head
+ i
;
2615 if (subpage
== page
)
2617 unlock_page(subpage
);
2620 * Subpages may be freed if there wasn't any mapping
2621 * like if add_to_swap() is running on a lru page that
2622 * had its mapping zapped. And freeing these pages
2623 * requires taking the lru_lock so we do the put_page
2624 * of the tail pages after the split is complete.
2630 int total_mapcount(struct page
*page
)
2632 int i
, compound
, ret
;
2634 VM_BUG_ON_PAGE(PageTail(page
), page
);
2636 if (likely(!PageCompound(page
)))
2637 return atomic_read(&page
->_mapcount
) + 1;
2639 compound
= compound_mapcount(page
);
2643 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2644 ret
+= atomic_read(&page
[i
]._mapcount
) + 1;
2645 /* File pages has compound_mapcount included in _mapcount */
2646 if (!PageAnon(page
))
2647 return ret
- compound
* HPAGE_PMD_NR
;
2648 if (PageDoubleMap(page
))
2649 ret
-= HPAGE_PMD_NR
;
2654 * This calculates accurately how many mappings a transparent hugepage
2655 * has (unlike page_mapcount() which isn't fully accurate). This full
2656 * accuracy is primarily needed to know if copy-on-write faults can
2657 * reuse the page and change the mapping to read-write instead of
2658 * copying them. At the same time this returns the total_mapcount too.
2660 * The function returns the highest mapcount any one of the subpages
2661 * has. If the return value is one, even if different processes are
2662 * mapping different subpages of the transparent hugepage, they can
2663 * all reuse it, because each process is reusing a different subpage.
2665 * The total_mapcount is instead counting all virtual mappings of the
2666 * subpages. If the total_mapcount is equal to "one", it tells the
2667 * caller all mappings belong to the same "mm" and in turn the
2668 * anon_vma of the transparent hugepage can become the vma->anon_vma
2669 * local one as no other process may be mapping any of the subpages.
2671 * It would be more accurate to replace page_mapcount() with
2672 * page_trans_huge_mapcount(), however we only use
2673 * page_trans_huge_mapcount() in the copy-on-write faults where we
2674 * need full accuracy to avoid breaking page pinning, because
2675 * page_trans_huge_mapcount() is slower than page_mapcount().
2677 int page_trans_huge_mapcount(struct page
*page
, int *total_mapcount
)
2679 int i
, ret
, _total_mapcount
, mapcount
;
2681 /* hugetlbfs shouldn't call it */
2682 VM_BUG_ON_PAGE(PageHuge(page
), page
);
2684 if (likely(!PageTransCompound(page
))) {
2685 mapcount
= atomic_read(&page
->_mapcount
) + 1;
2687 *total_mapcount
= mapcount
;
2691 page
= compound_head(page
);
2693 _total_mapcount
= ret
= 0;
2694 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2695 mapcount
= atomic_read(&page
[i
]._mapcount
) + 1;
2696 ret
= max(ret
, mapcount
);
2697 _total_mapcount
+= mapcount
;
2699 if (PageDoubleMap(page
)) {
2701 _total_mapcount
-= HPAGE_PMD_NR
;
2703 mapcount
= compound_mapcount(page
);
2705 _total_mapcount
+= mapcount
;
2707 *total_mapcount
= _total_mapcount
;
2711 /* Racy check whether the huge page can be split */
2712 bool can_split_huge_page(struct page
*page
, int *pextra_pins
)
2716 /* Additional pins from page cache */
2718 extra_pins
= PageSwapCache(page
) ? HPAGE_PMD_NR
: 0;
2720 extra_pins
= HPAGE_PMD_NR
;
2722 *pextra_pins
= extra_pins
;
2723 return total_mapcount(page
) == page_count(page
) - extra_pins
- 1;
2727 * This function splits huge page into normal pages. @page can point to any
2728 * subpage of huge page to split. Split doesn't change the position of @page.
2730 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2731 * The huge page must be locked.
2733 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2735 * Both head page and tail pages will inherit mapping, flags, and so on from
2738 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2739 * they are not mapped.
2741 * Returns 0 if the hugepage is split successfully.
2742 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2745 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2747 struct page
*head
= compound_head(page
);
2748 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(head
));
2749 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2750 struct anon_vma
*anon_vma
= NULL
;
2751 struct address_space
*mapping
= NULL
;
2752 int extra_pins
, ret
;
2754 unsigned long flags
;
2757 VM_BUG_ON_PAGE(is_huge_zero_page(head
), head
);
2758 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
2759 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
2761 if (PageWriteback(page
))
2764 if (PageAnon(head
)) {
2766 * The caller does not necessarily hold an mmap_sem that would
2767 * prevent the anon_vma disappearing so we first we take a
2768 * reference to it and then lock the anon_vma for write. This
2769 * is similar to page_lock_anon_vma_read except the write lock
2770 * is taken to serialise against parallel split or collapse
2773 anon_vma
= page_get_anon_vma(head
);
2780 anon_vma_lock_write(anon_vma
);
2782 mapping
= head
->mapping
;
2791 i_mmap_lock_read(mapping
);
2794 *__split_huge_page() may need to trim off pages beyond EOF:
2795 * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
2796 * which cannot be nested inside the page tree lock. So note
2797 * end now: i_size itself may be changed at any moment, but
2798 * head page lock is good enough to serialize the trimming.
2800 end
= DIV_ROUND_UP(i_size_read(mapping
->host
), PAGE_SIZE
);
2804 * Racy check if we can split the page, before unmap_page() will
2807 if (!can_split_huge_page(head
, &extra_pins
)) {
2812 mlocked
= PageMlocked(page
);
2815 /* Make sure the page is not on per-CPU pagevec as it takes pin */
2819 /* prevent PageLRU to go away from under us, and freeze lru stats */
2820 spin_lock_irqsave(&pgdata
->lru_lock
, flags
);
2823 XA_STATE(xas
, &mapping
->i_pages
, page_index(head
));
2826 * Check if the head page is present in page cache.
2827 * We assume all tail are present too, if head is there.
2829 xa_lock(&mapping
->i_pages
);
2830 if (xas_load(&xas
) != head
)
2834 /* Prevent deferred_split_scan() touching ->_refcount */
2835 spin_lock(&ds_queue
->split_queue_lock
);
2836 if (page_ref_freeze(head
, 1 + extra_pins
)) {
2837 if (!list_empty(page_deferred_list(head
))) {
2838 ds_queue
->split_queue_len
--;
2839 list_del(page_deferred_list(head
));
2842 if (PageSwapBacked(page
))
2843 __dec_node_page_state(page
, NR_SHMEM_THPS
);
2845 __dec_node_page_state(page
, NR_FILE_THPS
);
2848 spin_unlock(&ds_queue
->split_queue_lock
);
2849 __split_huge_page(page
, list
, end
, flags
);
2850 if (PageSwapCache(head
)) {
2851 swp_entry_t entry
= { .val
= page_private(head
) };
2853 ret
= split_swap_cluster(entry
);
2857 spin_unlock(&ds_queue
->split_queue_lock
);
2860 xa_unlock(&mapping
->i_pages
);
2861 spin_unlock_irqrestore(&pgdata
->lru_lock
, flags
);
2868 anon_vma_unlock_write(anon_vma
);
2869 put_anon_vma(anon_vma
);
2872 i_mmap_unlock_read(mapping
);
2874 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2878 void free_transhuge_page(struct page
*page
)
2880 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2881 unsigned long flags
;
2883 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2884 if (!list_empty(page_deferred_list(page
))) {
2885 ds_queue
->split_queue_len
--;
2886 list_del(page_deferred_list(page
));
2888 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2889 free_compound_page(page
);
2892 void deferred_split_huge_page(struct page
*page
)
2894 struct deferred_split
*ds_queue
= get_deferred_split_queue(page
);
2896 struct mem_cgroup
*memcg
= compound_head(page
)->mem_cgroup
;
2898 unsigned long flags
;
2900 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2903 * The try_to_unmap() in page reclaim path might reach here too,
2904 * this may cause a race condition to corrupt deferred split queue.
2905 * And, if page reclaim is already handling the same page, it is
2906 * unnecessary to handle it again in shrinker.
2908 * Check PageSwapCache to determine if the page is being
2909 * handled by page reclaim since THP swap would add the page into
2910 * swap cache before calling try_to_unmap().
2912 if (PageSwapCache(page
))
2915 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2916 if (list_empty(page_deferred_list(page
))) {
2917 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2918 list_add_tail(page_deferred_list(page
), &ds_queue
->split_queue
);
2919 ds_queue
->split_queue_len
++;
2922 memcg_set_shrinker_bit(memcg
, page_to_nid(page
),
2923 deferred_split_shrinker
.id
);
2926 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2929 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2930 struct shrink_control
*sc
)
2932 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2933 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2937 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2939 return READ_ONCE(ds_queue
->split_queue_len
);
2942 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2943 struct shrink_control
*sc
)
2945 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2946 struct deferred_split
*ds_queue
= &pgdata
->deferred_split_queue
;
2947 unsigned long flags
;
2948 LIST_HEAD(list
), *pos
, *next
;
2954 ds_queue
= &sc
->memcg
->deferred_split_queue
;
2957 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2958 /* Take pin on all head pages to avoid freeing them under us */
2959 list_for_each_safe(pos
, next
, &ds_queue
->split_queue
) {
2960 page
= list_entry((void *)pos
, struct page
, mapping
);
2961 page
= compound_head(page
);
2962 if (get_page_unless_zero(page
)) {
2963 list_move(page_deferred_list(page
), &list
);
2965 /* We lost race with put_compound_page() */
2966 list_del_init(page_deferred_list(page
));
2967 ds_queue
->split_queue_len
--;
2969 if (!--sc
->nr_to_scan
)
2972 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2974 list_for_each_safe(pos
, next
, &list
) {
2975 page
= list_entry((void *)pos
, struct page
, mapping
);
2976 if (!trylock_page(page
))
2978 /* split_huge_page() removes page from list on success */
2979 if (!split_huge_page(page
))
2986 spin_lock_irqsave(&ds_queue
->split_queue_lock
, flags
);
2987 list_splice_tail(&list
, &ds_queue
->split_queue
);
2988 spin_unlock_irqrestore(&ds_queue
->split_queue_lock
, flags
);
2991 * Stop shrinker if we didn't split any page, but the queue is empty.
2992 * This can happen if pages were freed under us.
2994 if (!split
&& list_empty(&ds_queue
->split_queue
))
2999 static struct shrinker deferred_split_shrinker
= {
3000 .count_objects
= deferred_split_count
,
3001 .scan_objects
= deferred_split_scan
,
3002 .seeks
= DEFAULT_SEEKS
,
3003 .flags
= SHRINKER_NUMA_AWARE
| SHRINKER_MEMCG_AWARE
|
3007 #ifdef CONFIG_DEBUG_FS
3008 static int split_huge_pages_set(void *data
, u64 val
)
3012 unsigned long pfn
, max_zone_pfn
;
3013 unsigned long total
= 0, split
= 0;
3018 for_each_populated_zone(zone
) {
3019 max_zone_pfn
= zone_end_pfn(zone
);
3020 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
3021 if (!pfn_valid(pfn
))
3024 page
= pfn_to_page(pfn
);
3025 if (!get_page_unless_zero(page
))
3028 if (zone
!= page_zone(page
))
3031 if (!PageHead(page
) || PageHuge(page
) || !PageLRU(page
))
3036 if (!split_huge_page(page
))
3044 pr_info("%lu of %lu THP split\n", split
, total
);
3048 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops
, NULL
, split_huge_pages_set
,
3051 static int __init
split_huge_pages_debugfs(void)
3053 debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
3054 &split_huge_pages_fops
);
3057 late_initcall(split_huge_pages_debugfs
);
3060 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
3061 void set_pmd_migration_entry(struct page_vma_mapped_walk
*pvmw
,
3064 struct vm_area_struct
*vma
= pvmw
->vma
;
3065 struct mm_struct
*mm
= vma
->vm_mm
;
3066 unsigned long address
= pvmw
->address
;
3071 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3074 flush_cache_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
3075 pmdval
= pmdp_invalidate(vma
, address
, pvmw
->pmd
);
3076 if (pmd_dirty(pmdval
))
3077 set_page_dirty(page
);
3078 entry
= make_migration_entry(page
, pmd_write(pmdval
));
3079 pmdswp
= swp_entry_to_pmd(entry
);
3080 if (pmd_soft_dirty(pmdval
))
3081 pmdswp
= pmd_swp_mksoft_dirty(pmdswp
);
3082 set_pmd_at(mm
, address
, pvmw
->pmd
, pmdswp
);
3083 page_remove_rmap(page
, true);
3087 void remove_migration_pmd(struct page_vma_mapped_walk
*pvmw
, struct page
*new)
3089 struct vm_area_struct
*vma
= pvmw
->vma
;
3090 struct mm_struct
*mm
= vma
->vm_mm
;
3091 unsigned long address
= pvmw
->address
;
3092 unsigned long mmun_start
= address
& HPAGE_PMD_MASK
;
3096 if (!(pvmw
->pmd
&& !pvmw
->pte
))
3099 entry
= pmd_to_swp_entry(*pvmw
->pmd
);
3101 pmde
= pmd_mkold(mk_huge_pmd(new, vma
->vm_page_prot
));
3102 if (pmd_swp_soft_dirty(*pvmw
->pmd
))
3103 pmde
= pmd_mksoft_dirty(pmde
);
3104 if (is_write_migration_entry(entry
))
3105 pmde
= maybe_pmd_mkwrite(pmde
, vma
);
3107 flush_cache_range(vma
, mmun_start
, mmun_start
+ HPAGE_PMD_SIZE
);
3109 page_add_anon_rmap(new, vma
, mmun_start
, true);
3111 page_add_file_rmap(new, true);
3112 set_pmd_at(mm
, mmun_start
, pvmw
->pmd
, pmde
);
3113 if ((vma
->vm_flags
& VM_LOCKED
) && !PageDoubleMap(new))
3114 mlock_vma_page(new);
3115 update_mmu_cache_pmd(vma
, address
, pvmw
->pmd
);