2 * Copyright (C) 2009 Red Hat, Inc.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/swapops.h>
20 #include <linux/dax.h>
21 #include <linux/khugepaged.h>
22 #include <linux/freezer.h>
23 #include <linux/pfn_t.h>
24 #include <linux/mman.h>
25 #include <linux/memremap.h>
26 #include <linux/pagemap.h>
27 #include <linux/debugfs.h>
28 #include <linux/migrate.h>
29 #include <linux/hashtable.h>
30 #include <linux/userfaultfd_k.h>
31 #include <linux/page_idle.h>
32 #include <linux/shmem_fs.h>
35 #include <asm/pgalloc.h>
39 * By default transparent hugepage support is disabled in order that avoid
40 * to risk increase the memory footprint of applications without a guaranteed
41 * benefit. When transparent hugepage support is enabled, is for all mappings,
42 * and khugepaged scans all mappings.
43 * Defrag is invoked by khugepaged hugepage allocations and by page faults
44 * for all hugepage allocations.
46 unsigned long transparent_hugepage_flags __read_mostly
=
47 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
48 (1<<TRANSPARENT_HUGEPAGE_FLAG
)|
50 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
51 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
)|
53 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
)|
54 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG
)|
55 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
57 static struct shrinker deferred_split_shrinker
;
59 static atomic_t huge_zero_refcount
;
60 struct page
*huge_zero_page __read_mostly
;
62 static struct page
*get_huge_zero_page(void)
64 struct page
*zero_page
;
66 if (likely(atomic_inc_not_zero(&huge_zero_refcount
)))
67 return READ_ONCE(huge_zero_page
);
69 zero_page
= alloc_pages((GFP_TRANSHUGE
| __GFP_ZERO
) & ~__GFP_MOVABLE
,
72 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED
);
75 count_vm_event(THP_ZERO_PAGE_ALLOC
);
77 if (cmpxchg(&huge_zero_page
, NULL
, zero_page
)) {
79 __free_pages(zero_page
, compound_order(zero_page
));
83 /* We take additional reference here. It will be put back by shrinker */
84 atomic_set(&huge_zero_refcount
, 2);
86 return READ_ONCE(huge_zero_page
);
89 static void put_huge_zero_page(void)
92 * Counter should never go to zero here. Only shrinker can put
95 BUG_ON(atomic_dec_and_test(&huge_zero_refcount
));
98 struct page
*mm_get_huge_zero_page(struct mm_struct
*mm
)
100 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
101 return READ_ONCE(huge_zero_page
);
103 if (!get_huge_zero_page())
106 if (test_and_set_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
107 put_huge_zero_page();
109 return READ_ONCE(huge_zero_page
);
112 void mm_put_huge_zero_page(struct mm_struct
*mm
)
114 if (test_bit(MMF_HUGE_ZERO_PAGE
, &mm
->flags
))
115 put_huge_zero_page();
118 static unsigned long shrink_huge_zero_page_count(struct shrinker
*shrink
,
119 struct shrink_control
*sc
)
121 /* we can free zero page only if last reference remains */
122 return atomic_read(&huge_zero_refcount
) == 1 ? HPAGE_PMD_NR
: 0;
125 static unsigned long shrink_huge_zero_page_scan(struct shrinker
*shrink
,
126 struct shrink_control
*sc
)
128 if (atomic_cmpxchg(&huge_zero_refcount
, 1, 0) == 1) {
129 struct page
*zero_page
= xchg(&huge_zero_page
, NULL
);
130 BUG_ON(zero_page
== NULL
);
131 __free_pages(zero_page
, compound_order(zero_page
));
138 static struct shrinker huge_zero_page_shrinker
= {
139 .count_objects
= shrink_huge_zero_page_count
,
140 .scan_objects
= shrink_huge_zero_page_scan
,
141 .seeks
= DEFAULT_SEEKS
,
145 static ssize_t
enabled_show(struct kobject
*kobj
,
146 struct kobj_attribute
*attr
, char *buf
)
148 if (test_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
))
149 return sprintf(buf
, "[always] madvise never\n");
150 else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
151 return sprintf(buf
, "always [madvise] never\n");
153 return sprintf(buf
, "always madvise [never]\n");
156 static ssize_t
enabled_store(struct kobject
*kobj
,
157 struct kobj_attribute
*attr
,
158 const char *buf
, size_t count
)
162 if (!memcmp("always", buf
,
163 min(sizeof("always")-1, count
))) {
164 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
165 set_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
166 } else if (!memcmp("madvise", buf
,
167 min(sizeof("madvise")-1, count
))) {
168 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
169 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
170 } else if (!memcmp("never", buf
,
171 min(sizeof("never")-1, count
))) {
172 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
, &transparent_hugepage_flags
);
173 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
178 int err
= start_stop_khugepaged();
184 static struct kobj_attribute enabled_attr
=
185 __ATTR(enabled
, 0644, enabled_show
, enabled_store
);
187 ssize_t
single_hugepage_flag_show(struct kobject
*kobj
,
188 struct kobj_attribute
*attr
, char *buf
,
189 enum transparent_hugepage_flag flag
)
191 return sprintf(buf
, "%d\n",
192 !!test_bit(flag
, &transparent_hugepage_flags
));
195 ssize_t
single_hugepage_flag_store(struct kobject
*kobj
,
196 struct kobj_attribute
*attr
,
197 const char *buf
, size_t count
,
198 enum transparent_hugepage_flag flag
)
203 ret
= kstrtoul(buf
, 10, &value
);
210 set_bit(flag
, &transparent_hugepage_flags
);
212 clear_bit(flag
, &transparent_hugepage_flags
);
217 static ssize_t
defrag_show(struct kobject
*kobj
,
218 struct kobj_attribute
*attr
, char *buf
)
220 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
221 return sprintf(buf
, "[always] defer defer+madvise madvise never\n");
222 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
223 return sprintf(buf
, "always [defer] defer+madvise madvise never\n");
224 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
225 return sprintf(buf
, "always defer [defer+madvise] madvise never\n");
226 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
227 return sprintf(buf
, "always defer defer+madvise [madvise] never\n");
228 return sprintf(buf
, "always defer defer+madvise madvise [never]\n");
231 static ssize_t
defrag_store(struct kobject
*kobj
,
232 struct kobj_attribute
*attr
,
233 const char *buf
, size_t count
)
235 if (!memcmp("always", buf
,
236 min(sizeof("always")-1, count
))) {
237 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
238 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
239 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
240 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
241 } else if (!memcmp("defer", buf
,
242 min(sizeof("defer")-1, count
))) {
243 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
244 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
245 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
246 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
247 } else if (!memcmp("defer+madvise", buf
,
248 min(sizeof("defer+madvise")-1, count
))) {
249 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
250 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
251 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
252 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
253 } else if (!memcmp("madvise", buf
,
254 min(sizeof("madvise")-1, count
))) {
255 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
256 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
257 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
258 set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
259 } else if (!memcmp("never", buf
,
260 min(sizeof("never")-1, count
))) {
261 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
);
262 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
);
263 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
);
264 clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
);
270 static struct kobj_attribute defrag_attr
=
271 __ATTR(defrag
, 0644, defrag_show
, defrag_store
);
273 static ssize_t
use_zero_page_show(struct kobject
*kobj
,
274 struct kobj_attribute
*attr
, char *buf
)
276 return single_hugepage_flag_show(kobj
, attr
, buf
,
277 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
279 static ssize_t
use_zero_page_store(struct kobject
*kobj
,
280 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
282 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
283 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG
);
285 static struct kobj_attribute use_zero_page_attr
=
286 __ATTR(use_zero_page
, 0644, use_zero_page_show
, use_zero_page_store
);
288 static ssize_t
hpage_pmd_size_show(struct kobject
*kobj
,
289 struct kobj_attribute
*attr
, char *buf
)
291 return sprintf(buf
, "%lu\n", HPAGE_PMD_SIZE
);
293 static struct kobj_attribute hpage_pmd_size_attr
=
294 __ATTR_RO(hpage_pmd_size
);
296 #ifdef CONFIG_DEBUG_VM
297 static ssize_t
debug_cow_show(struct kobject
*kobj
,
298 struct kobj_attribute
*attr
, char *buf
)
300 return single_hugepage_flag_show(kobj
, attr
, buf
,
301 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
303 static ssize_t
debug_cow_store(struct kobject
*kobj
,
304 struct kobj_attribute
*attr
,
305 const char *buf
, size_t count
)
307 return single_hugepage_flag_store(kobj
, attr
, buf
, count
,
308 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG
);
310 static struct kobj_attribute debug_cow_attr
=
311 __ATTR(debug_cow
, 0644, debug_cow_show
, debug_cow_store
);
312 #endif /* CONFIG_DEBUG_VM */
314 static struct attribute
*hugepage_attr
[] = {
317 &use_zero_page_attr
.attr
,
318 &hpage_pmd_size_attr
.attr
,
319 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
320 &shmem_enabled_attr
.attr
,
322 #ifdef CONFIG_DEBUG_VM
323 &debug_cow_attr
.attr
,
328 static struct attribute_group hugepage_attr_group
= {
329 .attrs
= hugepage_attr
,
332 static int __init
hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
336 *hugepage_kobj
= kobject_create_and_add("transparent_hugepage", mm_kobj
);
337 if (unlikely(!*hugepage_kobj
)) {
338 pr_err("failed to create transparent hugepage kobject\n");
342 err
= sysfs_create_group(*hugepage_kobj
, &hugepage_attr_group
);
344 pr_err("failed to register transparent hugepage group\n");
348 err
= sysfs_create_group(*hugepage_kobj
, &khugepaged_attr_group
);
350 pr_err("failed to register transparent hugepage group\n");
351 goto remove_hp_group
;
357 sysfs_remove_group(*hugepage_kobj
, &hugepage_attr_group
);
359 kobject_put(*hugepage_kobj
);
363 static void __init
hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
365 sysfs_remove_group(hugepage_kobj
, &khugepaged_attr_group
);
366 sysfs_remove_group(hugepage_kobj
, &hugepage_attr_group
);
367 kobject_put(hugepage_kobj
);
370 static inline int hugepage_init_sysfs(struct kobject
**hugepage_kobj
)
375 static inline void hugepage_exit_sysfs(struct kobject
*hugepage_kobj
)
378 #endif /* CONFIG_SYSFS */
380 static int __init
hugepage_init(void)
383 struct kobject
*hugepage_kobj
;
385 if (!has_transparent_hugepage()) {
386 transparent_hugepage_flags
= 0;
391 * hugepages can't be allocated by the buddy allocator
393 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
>= MAX_ORDER
);
395 * we use page->mapping and page->index in second tail page
396 * as list_head: assuming THP order >= 2
398 MAYBE_BUILD_BUG_ON(HPAGE_PMD_ORDER
< 2);
400 err
= hugepage_init_sysfs(&hugepage_kobj
);
404 err
= khugepaged_init();
408 err
= register_shrinker(&huge_zero_page_shrinker
);
410 goto err_hzp_shrinker
;
411 err
= register_shrinker(&deferred_split_shrinker
);
413 goto err_split_shrinker
;
416 * By default disable transparent hugepages on smaller systems,
417 * where the extra memory used could hurt more than TLB overhead
418 * is likely to save. The admin can still enable it through /sys.
420 if (totalram_pages
< (512 << (20 - PAGE_SHIFT
))) {
421 transparent_hugepage_flags
= 0;
425 err
= start_stop_khugepaged();
431 unregister_shrinker(&deferred_split_shrinker
);
433 unregister_shrinker(&huge_zero_page_shrinker
);
435 khugepaged_destroy();
437 hugepage_exit_sysfs(hugepage_kobj
);
441 subsys_initcall(hugepage_init
);
443 static int __init
setup_transparent_hugepage(char *str
)
448 if (!strcmp(str
, "always")) {
449 set_bit(TRANSPARENT_HUGEPAGE_FLAG
,
450 &transparent_hugepage_flags
);
451 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
452 &transparent_hugepage_flags
);
454 } else if (!strcmp(str
, "madvise")) {
455 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
456 &transparent_hugepage_flags
);
457 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
458 &transparent_hugepage_flags
);
460 } else if (!strcmp(str
, "never")) {
461 clear_bit(TRANSPARENT_HUGEPAGE_FLAG
,
462 &transparent_hugepage_flags
);
463 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG
,
464 &transparent_hugepage_flags
);
469 pr_warn("transparent_hugepage= cannot parse, ignored\n");
472 __setup("transparent_hugepage=", setup_transparent_hugepage
);
474 pmd_t
maybe_pmd_mkwrite(pmd_t pmd
, struct vm_area_struct
*vma
)
476 if (likely(vma
->vm_flags
& VM_WRITE
))
477 pmd
= pmd_mkwrite(pmd
);
481 static inline struct list_head
*page_deferred_list(struct page
*page
)
484 * ->lru in the tail pages is occupied by compound_head.
485 * Let's use ->mapping + ->index in the second tail page as list_head.
487 return (struct list_head
*)&page
[2].mapping
;
490 void prep_transhuge_page(struct page
*page
)
493 * we use page->mapping and page->indexlru in second tail page
494 * as list_head: assuming THP order >= 2
497 INIT_LIST_HEAD(page_deferred_list(page
));
498 set_compound_page_dtor(page
, TRANSHUGE_PAGE_DTOR
);
501 unsigned long __thp_get_unmapped_area(struct file
*filp
, unsigned long len
,
502 loff_t off
, unsigned long flags
, unsigned long size
)
505 loff_t off_end
= off
+ len
;
506 loff_t off_align
= round_up(off
, size
);
507 unsigned long len_pad
;
509 if (off_end
<= off_align
|| (off_end
- off_align
) < size
)
512 len_pad
= len
+ size
;
513 if (len_pad
< len
|| (off
+ len_pad
) < off
)
516 addr
= current
->mm
->get_unmapped_area(filp
, 0, len_pad
,
517 off
>> PAGE_SHIFT
, flags
);
518 if (IS_ERR_VALUE(addr
))
521 addr
+= (off
- addr
) & (size
- 1);
525 unsigned long thp_get_unmapped_area(struct file
*filp
, unsigned long addr
,
526 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
528 loff_t off
= (loff_t
)pgoff
<< PAGE_SHIFT
;
532 if (!IS_DAX(filp
->f_mapping
->host
) || !IS_ENABLED(CONFIG_FS_DAX_PMD
))
535 addr
= __thp_get_unmapped_area(filp
, len
, off
, flags
, PMD_SIZE
);
540 return current
->mm
->get_unmapped_area(filp
, addr
, len
, pgoff
, flags
);
542 EXPORT_SYMBOL_GPL(thp_get_unmapped_area
);
544 static int __do_huge_pmd_anonymous_page(struct vm_fault
*vmf
, struct page
*page
,
547 struct vm_area_struct
*vma
= vmf
->vma
;
548 struct mem_cgroup
*memcg
;
550 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
552 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
554 if (mem_cgroup_try_charge(page
, vma
->vm_mm
, gfp
, &memcg
, true)) {
556 count_vm_event(THP_FAULT_FALLBACK
);
557 return VM_FAULT_FALLBACK
;
560 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
561 if (unlikely(!pgtable
)) {
562 mem_cgroup_cancel_charge(page
, memcg
, true);
567 clear_huge_page(page
, haddr
, HPAGE_PMD_NR
);
569 * The memory barrier inside __SetPageUptodate makes sure that
570 * clear_huge_page writes become visible before the set_pmd_at()
573 __SetPageUptodate(page
);
575 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
576 if (unlikely(!pmd_none(*vmf
->pmd
))) {
577 spin_unlock(vmf
->ptl
);
578 mem_cgroup_cancel_charge(page
, memcg
, true);
580 pte_free(vma
->vm_mm
, pgtable
);
584 /* Deliver the page fault to userland */
585 if (userfaultfd_missing(vma
)) {
588 spin_unlock(vmf
->ptl
);
589 mem_cgroup_cancel_charge(page
, memcg
, true);
591 pte_free(vma
->vm_mm
, pgtable
);
592 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
593 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
597 entry
= mk_huge_pmd(page
, vma
->vm_page_prot
);
598 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
599 page_add_new_anon_rmap(page
, vma
, haddr
, true);
600 mem_cgroup_commit_charge(page
, memcg
, false, true);
601 lru_cache_add_active_or_unevictable(page
, vma
);
602 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
603 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
604 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
605 atomic_long_inc(&vma
->vm_mm
->nr_ptes
);
606 spin_unlock(vmf
->ptl
);
607 count_vm_event(THP_FAULT_ALLOC
);
614 * always: directly stall for all thp allocations
615 * defer: wake kswapd and fail if not immediately available
616 * defer+madvise: wake kswapd and directly stall for MADV_HUGEPAGE, otherwise
617 * fail if not immediately available
618 * madvise: directly stall for MADV_HUGEPAGE, otherwise fail if not immediately
620 * never: never stall for any thp allocation
622 static inline gfp_t
alloc_hugepage_direct_gfpmask(struct vm_area_struct
*vma
)
624 const bool vma_madvised
= !!(vma
->vm_flags
& VM_HUGEPAGE
);
626 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG
, &transparent_hugepage_flags
))
627 return GFP_TRANSHUGE
| (vma_madvised
? 0 : __GFP_NORETRY
);
628 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG
, &transparent_hugepage_flags
))
629 return GFP_TRANSHUGE_LIGHT
| __GFP_KSWAPD_RECLAIM
;
630 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG
, &transparent_hugepage_flags
))
631 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
632 __GFP_KSWAPD_RECLAIM
);
633 if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG
, &transparent_hugepage_flags
))
634 return GFP_TRANSHUGE_LIGHT
| (vma_madvised
? __GFP_DIRECT_RECLAIM
:
636 return GFP_TRANSHUGE_LIGHT
;
639 /* Caller must hold page table lock. */
640 static bool set_huge_zero_page(pgtable_t pgtable
, struct mm_struct
*mm
,
641 struct vm_area_struct
*vma
, unsigned long haddr
, pmd_t
*pmd
,
642 struct page
*zero_page
)
647 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
648 entry
= pmd_mkhuge(entry
);
650 pgtable_trans_huge_deposit(mm
, pmd
, pgtable
);
651 set_pmd_at(mm
, haddr
, pmd
, entry
);
652 atomic_long_inc(&mm
->nr_ptes
);
656 int do_huge_pmd_anonymous_page(struct vm_fault
*vmf
)
658 struct vm_area_struct
*vma
= vmf
->vma
;
661 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
663 if (haddr
< vma
->vm_start
|| haddr
+ HPAGE_PMD_SIZE
> vma
->vm_end
)
664 return VM_FAULT_FALLBACK
;
665 if (unlikely(anon_vma_prepare(vma
)))
667 if (unlikely(khugepaged_enter(vma
, vma
->vm_flags
)))
669 if (!(vmf
->flags
& FAULT_FLAG_WRITE
) &&
670 !mm_forbids_zeropage(vma
->vm_mm
) &&
671 transparent_hugepage_use_zero_page()) {
673 struct page
*zero_page
;
676 pgtable
= pte_alloc_one(vma
->vm_mm
, haddr
);
677 if (unlikely(!pgtable
))
679 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
680 if (unlikely(!zero_page
)) {
681 pte_free(vma
->vm_mm
, pgtable
);
682 count_vm_event(THP_FAULT_FALLBACK
);
683 return VM_FAULT_FALLBACK
;
685 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
688 if (pmd_none(*vmf
->pmd
)) {
689 if (userfaultfd_missing(vma
)) {
690 spin_unlock(vmf
->ptl
);
691 ret
= handle_userfault(vmf
, VM_UFFD_MISSING
);
692 VM_BUG_ON(ret
& VM_FAULT_FALLBACK
);
694 set_huge_zero_page(pgtable
, vma
->vm_mm
, vma
,
695 haddr
, vmf
->pmd
, zero_page
);
696 spin_unlock(vmf
->ptl
);
700 spin_unlock(vmf
->ptl
);
702 pte_free(vma
->vm_mm
, pgtable
);
705 gfp
= alloc_hugepage_direct_gfpmask(vma
);
706 page
= alloc_hugepage_vma(gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
707 if (unlikely(!page
)) {
708 count_vm_event(THP_FAULT_FALLBACK
);
709 return VM_FAULT_FALLBACK
;
711 prep_transhuge_page(page
);
712 return __do_huge_pmd_anonymous_page(vmf
, page
, gfp
);
715 static void insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
716 pmd_t
*pmd
, pfn_t pfn
, pgprot_t prot
, bool write
)
718 struct mm_struct
*mm
= vma
->vm_mm
;
722 ptl
= pmd_lock(mm
, pmd
);
723 entry
= pmd_mkhuge(pfn_t_pmd(pfn
, prot
));
724 if (pfn_t_devmap(pfn
))
725 entry
= pmd_mkdevmap(entry
);
727 entry
= pmd_mkyoung(pmd_mkdirty(entry
));
728 entry
= maybe_pmd_mkwrite(entry
, vma
);
730 set_pmd_at(mm
, addr
, pmd
, entry
);
731 update_mmu_cache_pmd(vma
, addr
, pmd
);
735 int vmf_insert_pfn_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
736 pmd_t
*pmd
, pfn_t pfn
, bool write
)
738 pgprot_t pgprot
= vma
->vm_page_prot
;
740 * If we had pmd_special, we could avoid all these restrictions,
741 * but we need to be consistent with PTEs and architectures that
742 * can't support a 'special' bit.
744 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
745 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
746 (VM_PFNMAP
|VM_MIXEDMAP
));
747 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
748 BUG_ON(!pfn_t_devmap(pfn
));
750 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
751 return VM_FAULT_SIGBUS
;
753 track_pfn_insert(vma
, &pgprot
, pfn
);
755 insert_pfn_pmd(vma
, addr
, pmd
, pfn
, pgprot
, write
);
756 return VM_FAULT_NOPAGE
;
758 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd
);
760 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
761 static pud_t
maybe_pud_mkwrite(pud_t pud
, struct vm_area_struct
*vma
)
763 if (likely(vma
->vm_flags
& VM_WRITE
))
764 pud
= pud_mkwrite(pud
);
768 static void insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
769 pud_t
*pud
, pfn_t pfn
, pgprot_t prot
, bool write
)
771 struct mm_struct
*mm
= vma
->vm_mm
;
775 ptl
= pud_lock(mm
, pud
);
776 entry
= pud_mkhuge(pfn_t_pud(pfn
, prot
));
777 if (pfn_t_devmap(pfn
))
778 entry
= pud_mkdevmap(entry
);
780 entry
= pud_mkyoung(pud_mkdirty(entry
));
781 entry
= maybe_pud_mkwrite(entry
, vma
);
783 set_pud_at(mm
, addr
, pud
, entry
);
784 update_mmu_cache_pud(vma
, addr
, pud
);
788 int vmf_insert_pfn_pud(struct vm_area_struct
*vma
, unsigned long addr
,
789 pud_t
*pud
, pfn_t pfn
, bool write
)
791 pgprot_t pgprot
= vma
->vm_page_prot
;
793 * If we had pud_special, we could avoid all these restrictions,
794 * but we need to be consistent with PTEs and architectures that
795 * can't support a 'special' bit.
797 BUG_ON(!(vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)));
798 BUG_ON((vma
->vm_flags
& (VM_PFNMAP
|VM_MIXEDMAP
)) ==
799 (VM_PFNMAP
|VM_MIXEDMAP
));
800 BUG_ON((vma
->vm_flags
& VM_PFNMAP
) && is_cow_mapping(vma
->vm_flags
));
801 BUG_ON(!pfn_t_devmap(pfn
));
803 if (addr
< vma
->vm_start
|| addr
>= vma
->vm_end
)
804 return VM_FAULT_SIGBUS
;
806 track_pfn_insert(vma
, &pgprot
, pfn
);
808 insert_pfn_pud(vma
, addr
, pud
, pfn
, pgprot
, write
);
809 return VM_FAULT_NOPAGE
;
811 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud
);
812 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
814 static void touch_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
820 * We should set the dirty bit only for FOLL_WRITE but for now
821 * the dirty bit in the pmd is meaningless. And if the dirty
822 * bit will become meaningful and we'll only set it with
823 * FOLL_WRITE, an atomic set_bit will be required on the pmd to
824 * set the young bit, instead of the current set_pmd_at.
826 _pmd
= pmd_mkyoung(pmd_mkdirty(*pmd
));
827 if (pmdp_set_access_flags(vma
, addr
& HPAGE_PMD_MASK
,
829 update_mmu_cache_pmd(vma
, addr
, pmd
);
832 struct page
*follow_devmap_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
833 pmd_t
*pmd
, int flags
)
835 unsigned long pfn
= pmd_pfn(*pmd
);
836 struct mm_struct
*mm
= vma
->vm_mm
;
837 struct dev_pagemap
*pgmap
;
840 assert_spin_locked(pmd_lockptr(mm
, pmd
));
843 * When we COW a devmap PMD entry, we split it into PTEs, so we should
844 * not be in this function with `flags & FOLL_COW` set.
846 WARN_ONCE(flags
& FOLL_COW
, "mm: In follow_devmap_pmd with FOLL_COW set");
848 if (flags
& FOLL_WRITE
&& !pmd_write(*pmd
))
851 if (pmd_present(*pmd
) && pmd_devmap(*pmd
))
856 if (flags
& FOLL_TOUCH
)
857 touch_pmd(vma
, addr
, pmd
);
860 * device mapped pages can only be returned if the
861 * caller will manage the page reference count.
863 if (!(flags
& FOLL_GET
))
864 return ERR_PTR(-EEXIST
);
866 pfn
+= (addr
& ~PMD_MASK
) >> PAGE_SHIFT
;
867 pgmap
= get_dev_pagemap(pfn
, NULL
);
869 return ERR_PTR(-EFAULT
);
870 page
= pfn_to_page(pfn
);
872 put_dev_pagemap(pgmap
);
877 int copy_huge_pmd(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
878 pmd_t
*dst_pmd
, pmd_t
*src_pmd
, unsigned long addr
,
879 struct vm_area_struct
*vma
)
881 spinlock_t
*dst_ptl
, *src_ptl
;
882 struct page
*src_page
;
884 pgtable_t pgtable
= NULL
;
887 /* Skip if can be re-fill on fault */
888 if (!vma_is_anonymous(vma
))
891 pgtable
= pte_alloc_one(dst_mm
, addr
);
892 if (unlikely(!pgtable
))
895 dst_ptl
= pmd_lock(dst_mm
, dst_pmd
);
896 src_ptl
= pmd_lockptr(src_mm
, src_pmd
);
897 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
901 if (unlikely(!pmd_trans_huge(pmd
))) {
902 pte_free(dst_mm
, pgtable
);
906 * When page table lock is held, the huge zero pmd should not be
907 * under splitting since we don't split the page itself, only pmd to
910 if (is_huge_zero_pmd(pmd
)) {
911 struct page
*zero_page
;
913 * get_huge_zero_page() will never allocate a new page here,
914 * since we already have a zero page to copy. It just takes a
917 zero_page
= mm_get_huge_zero_page(dst_mm
);
918 set_huge_zero_page(pgtable
, dst_mm
, vma
, addr
, dst_pmd
,
924 src_page
= pmd_page(pmd
);
925 VM_BUG_ON_PAGE(!PageHead(src_page
), src_page
);
927 page_dup_rmap(src_page
, true);
928 add_mm_counter(dst_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
929 atomic_long_inc(&dst_mm
->nr_ptes
);
930 pgtable_trans_huge_deposit(dst_mm
, dst_pmd
, pgtable
);
932 pmdp_set_wrprotect(src_mm
, addr
, src_pmd
);
933 pmd
= pmd_mkold(pmd_wrprotect(pmd
));
934 set_pmd_at(dst_mm
, addr
, dst_pmd
, pmd
);
938 spin_unlock(src_ptl
);
939 spin_unlock(dst_ptl
);
944 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
945 static void touch_pud(struct vm_area_struct
*vma
, unsigned long addr
,
951 * We should set the dirty bit only for FOLL_WRITE but for now
952 * the dirty bit in the pud is meaningless. And if the dirty
953 * bit will become meaningful and we'll only set it with
954 * FOLL_WRITE, an atomic set_bit will be required on the pud to
955 * set the young bit, instead of the current set_pud_at.
957 _pud
= pud_mkyoung(pud_mkdirty(*pud
));
958 if (pudp_set_access_flags(vma
, addr
& HPAGE_PUD_MASK
,
960 update_mmu_cache_pud(vma
, addr
, pud
);
963 struct page
*follow_devmap_pud(struct vm_area_struct
*vma
, unsigned long addr
,
964 pud_t
*pud
, int flags
)
966 unsigned long pfn
= pud_pfn(*pud
);
967 struct mm_struct
*mm
= vma
->vm_mm
;
968 struct dev_pagemap
*pgmap
;
971 assert_spin_locked(pud_lockptr(mm
, pud
));
973 if (flags
& FOLL_WRITE
&& !pud_write(*pud
))
976 if (pud_present(*pud
) && pud_devmap(*pud
))
981 if (flags
& FOLL_TOUCH
)
982 touch_pud(vma
, addr
, pud
);
985 * device mapped pages can only be returned if the
986 * caller will manage the page reference count.
988 if (!(flags
& FOLL_GET
))
989 return ERR_PTR(-EEXIST
);
991 pfn
+= (addr
& ~PUD_MASK
) >> PAGE_SHIFT
;
992 pgmap
= get_dev_pagemap(pfn
, NULL
);
994 return ERR_PTR(-EFAULT
);
995 page
= pfn_to_page(pfn
);
997 put_dev_pagemap(pgmap
);
1002 int copy_huge_pud(struct mm_struct
*dst_mm
, struct mm_struct
*src_mm
,
1003 pud_t
*dst_pud
, pud_t
*src_pud
, unsigned long addr
,
1004 struct vm_area_struct
*vma
)
1006 spinlock_t
*dst_ptl
, *src_ptl
;
1010 dst_ptl
= pud_lock(dst_mm
, dst_pud
);
1011 src_ptl
= pud_lockptr(src_mm
, src_pud
);
1012 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
1016 if (unlikely(!pud_trans_huge(pud
) && !pud_devmap(pud
)))
1020 * When page table lock is held, the huge zero pud should not be
1021 * under splitting since we don't split the page itself, only pud to
1024 if (is_huge_zero_pud(pud
)) {
1025 /* No huge zero pud yet */
1028 pudp_set_wrprotect(src_mm
, addr
, src_pud
);
1029 pud
= pud_mkold(pud_wrprotect(pud
));
1030 set_pud_at(dst_mm
, addr
, dst_pud
, pud
);
1034 spin_unlock(src_ptl
);
1035 spin_unlock(dst_ptl
);
1039 void huge_pud_set_accessed(struct vm_fault
*vmf
, pud_t orig_pud
)
1042 unsigned long haddr
;
1043 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1045 vmf
->ptl
= pud_lock(vmf
->vma
->vm_mm
, vmf
->pud
);
1046 if (unlikely(!pud_same(*vmf
->pud
, orig_pud
)))
1049 entry
= pud_mkyoung(orig_pud
);
1051 entry
= pud_mkdirty(entry
);
1052 haddr
= vmf
->address
& HPAGE_PUD_MASK
;
1053 if (pudp_set_access_flags(vmf
->vma
, haddr
, vmf
->pud
, entry
, write
))
1054 update_mmu_cache_pud(vmf
->vma
, vmf
->address
, vmf
->pud
);
1057 spin_unlock(vmf
->ptl
);
1059 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1061 void huge_pmd_set_accessed(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1064 unsigned long haddr
;
1065 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1067 vmf
->ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1068 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1071 entry
= pmd_mkyoung(orig_pmd
);
1073 entry
= pmd_mkdirty(entry
);
1074 haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1075 if (pmdp_set_access_flags(vmf
->vma
, haddr
, vmf
->pmd
, entry
, write
))
1076 update_mmu_cache_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
);
1079 spin_unlock(vmf
->ptl
);
1082 static int do_huge_pmd_wp_page_fallback(struct vm_fault
*vmf
, pmd_t orig_pmd
,
1085 struct vm_area_struct
*vma
= vmf
->vma
;
1086 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1087 struct mem_cgroup
*memcg
;
1091 struct page
**pages
;
1092 unsigned long mmun_start
; /* For mmu_notifiers */
1093 unsigned long mmun_end
; /* For mmu_notifiers */
1095 pages
= kmalloc(sizeof(struct page
*) * HPAGE_PMD_NR
,
1097 if (unlikely(!pages
)) {
1098 ret
|= VM_FAULT_OOM
;
1102 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1103 pages
[i
] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE
, vma
,
1104 vmf
->address
, page_to_nid(page
));
1105 if (unlikely(!pages
[i
] ||
1106 mem_cgroup_try_charge(pages
[i
], vma
->vm_mm
,
1107 GFP_KERNEL
, &memcg
, false))) {
1111 memcg
= (void *)page_private(pages
[i
]);
1112 set_page_private(pages
[i
], 0);
1113 mem_cgroup_cancel_charge(pages
[i
], memcg
,
1118 ret
|= VM_FAULT_OOM
;
1121 set_page_private(pages
[i
], (unsigned long)memcg
);
1124 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1125 copy_user_highpage(pages
[i
], page
+ i
,
1126 haddr
+ PAGE_SIZE
* i
, vma
);
1127 __SetPageUptodate(pages
[i
]);
1132 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1133 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1135 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1136 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1137 goto out_free_pages
;
1138 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1140 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1141 /* leave pmd empty until pte is filled */
1143 pgtable
= pgtable_trans_huge_withdraw(vma
->vm_mm
, vmf
->pmd
);
1144 pmd_populate(vma
->vm_mm
, &_pmd
, pgtable
);
1146 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1148 entry
= mk_pte(pages
[i
], vma
->vm_page_prot
);
1149 entry
= maybe_mkwrite(pte_mkdirty(entry
), vma
);
1150 memcg
= (void *)page_private(pages
[i
]);
1151 set_page_private(pages
[i
], 0);
1152 page_add_new_anon_rmap(pages
[i
], vmf
->vma
, haddr
, false);
1153 mem_cgroup_commit_charge(pages
[i
], memcg
, false, false);
1154 lru_cache_add_active_or_unevictable(pages
[i
], vma
);
1155 vmf
->pte
= pte_offset_map(&_pmd
, haddr
);
1156 VM_BUG_ON(!pte_none(*vmf
->pte
));
1157 set_pte_at(vma
->vm_mm
, haddr
, vmf
->pte
, entry
);
1158 pte_unmap(vmf
->pte
);
1162 smp_wmb(); /* make pte visible before pmd */
1163 pmd_populate(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1164 page_remove_rmap(page
, true);
1165 spin_unlock(vmf
->ptl
);
1167 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1169 ret
|= VM_FAULT_WRITE
;
1176 spin_unlock(vmf
->ptl
);
1177 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1178 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
1179 memcg
= (void *)page_private(pages
[i
]);
1180 set_page_private(pages
[i
], 0);
1181 mem_cgroup_cancel_charge(pages
[i
], memcg
, false);
1188 int do_huge_pmd_wp_page(struct vm_fault
*vmf
, pmd_t orig_pmd
)
1190 struct vm_area_struct
*vma
= vmf
->vma
;
1191 struct page
*page
= NULL
, *new_page
;
1192 struct mem_cgroup
*memcg
;
1193 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1194 unsigned long mmun_start
; /* For mmu_notifiers */
1195 unsigned long mmun_end
; /* For mmu_notifiers */
1196 gfp_t huge_gfp
; /* for allocation and charge */
1199 vmf
->ptl
= pmd_lockptr(vma
->vm_mm
, vmf
->pmd
);
1200 VM_BUG_ON_VMA(!vma
->anon_vma
, vma
);
1201 if (is_huge_zero_pmd(orig_pmd
))
1203 spin_lock(vmf
->ptl
);
1204 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
)))
1207 page
= pmd_page(orig_pmd
);
1208 VM_BUG_ON_PAGE(!PageCompound(page
) || !PageHead(page
), page
);
1210 * We can only reuse the page if nobody else maps the huge page or it's
1213 if (page_trans_huge_mapcount(page
, NULL
) == 1) {
1215 entry
= pmd_mkyoung(orig_pmd
);
1216 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1217 if (pmdp_set_access_flags(vma
, haddr
, vmf
->pmd
, entry
, 1))
1218 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1219 ret
|= VM_FAULT_WRITE
;
1223 spin_unlock(vmf
->ptl
);
1225 if (transparent_hugepage_enabled(vma
) &&
1226 !transparent_hugepage_debug_cow()) {
1227 huge_gfp
= alloc_hugepage_direct_gfpmask(vma
);
1228 new_page
= alloc_hugepage_vma(huge_gfp
, vma
, haddr
, HPAGE_PMD_ORDER
);
1232 if (likely(new_page
)) {
1233 prep_transhuge_page(new_page
);
1236 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1237 ret
|= VM_FAULT_FALLBACK
;
1239 ret
= do_huge_pmd_wp_page_fallback(vmf
, orig_pmd
, page
);
1240 if (ret
& VM_FAULT_OOM
) {
1241 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1242 ret
|= VM_FAULT_FALLBACK
;
1246 count_vm_event(THP_FAULT_FALLBACK
);
1250 if (unlikely(mem_cgroup_try_charge(new_page
, vma
->vm_mm
,
1251 huge_gfp
, &memcg
, true))) {
1253 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1256 ret
|= VM_FAULT_FALLBACK
;
1257 count_vm_event(THP_FAULT_FALLBACK
);
1261 count_vm_event(THP_FAULT_ALLOC
);
1264 clear_huge_page(new_page
, haddr
, HPAGE_PMD_NR
);
1266 copy_user_huge_page(new_page
, page
, haddr
, vma
, HPAGE_PMD_NR
);
1267 __SetPageUptodate(new_page
);
1270 mmun_end
= haddr
+ HPAGE_PMD_SIZE
;
1271 mmu_notifier_invalidate_range_start(vma
->vm_mm
, mmun_start
, mmun_end
);
1273 spin_lock(vmf
->ptl
);
1276 if (unlikely(!pmd_same(*vmf
->pmd
, orig_pmd
))) {
1277 spin_unlock(vmf
->ptl
);
1278 mem_cgroup_cancel_charge(new_page
, memcg
, true);
1283 entry
= mk_huge_pmd(new_page
, vma
->vm_page_prot
);
1284 entry
= maybe_pmd_mkwrite(pmd_mkdirty(entry
), vma
);
1285 pmdp_huge_clear_flush_notify(vma
, haddr
, vmf
->pmd
);
1286 page_add_new_anon_rmap(new_page
, vma
, haddr
, true);
1287 mem_cgroup_commit_charge(new_page
, memcg
, false, true);
1288 lru_cache_add_active_or_unevictable(new_page
, vma
);
1289 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, entry
);
1290 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1292 add_mm_counter(vma
->vm_mm
, MM_ANONPAGES
, HPAGE_PMD_NR
);
1294 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1295 page_remove_rmap(page
, true);
1298 ret
|= VM_FAULT_WRITE
;
1300 spin_unlock(vmf
->ptl
);
1302 mmu_notifier_invalidate_range_end(vma
->vm_mm
, mmun_start
, mmun_end
);
1306 spin_unlock(vmf
->ptl
);
1311 * FOLL_FORCE can write to even unwritable pmd's, but only
1312 * after we've gone through a COW cycle and they are dirty.
1314 static inline bool can_follow_write_pmd(pmd_t pmd
, unsigned int flags
)
1316 return pmd_write(pmd
) ||
1317 ((flags
& FOLL_FORCE
) && (flags
& FOLL_COW
) && pmd_dirty(pmd
));
1320 struct page
*follow_trans_huge_pmd(struct vm_area_struct
*vma
,
1325 struct mm_struct
*mm
= vma
->vm_mm
;
1326 struct page
*page
= NULL
;
1328 assert_spin_locked(pmd_lockptr(mm
, pmd
));
1330 if (flags
& FOLL_WRITE
&& !can_follow_write_pmd(*pmd
, flags
))
1333 /* Avoid dumping huge zero page */
1334 if ((flags
& FOLL_DUMP
) && is_huge_zero_pmd(*pmd
))
1335 return ERR_PTR(-EFAULT
);
1337 /* Full NUMA hinting faults to serialise migration in fault paths */
1338 if ((flags
& FOLL_NUMA
) && pmd_protnone(*pmd
))
1341 page
= pmd_page(*pmd
);
1342 VM_BUG_ON_PAGE(!PageHead(page
) && !is_zone_device_page(page
), page
);
1343 if (flags
& FOLL_TOUCH
)
1344 touch_pmd(vma
, addr
, pmd
);
1345 if ((flags
& FOLL_MLOCK
) && (vma
->vm_flags
& VM_LOCKED
)) {
1347 * We don't mlock() pte-mapped THPs. This way we can avoid
1348 * leaking mlocked pages into non-VM_LOCKED VMAs.
1352 * In most cases the pmd is the only mapping of the page as we
1353 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1354 * writable private mappings in populate_vma_page_range().
1356 * The only scenario when we have the page shared here is if we
1357 * mlocking read-only mapping shared over fork(). We skip
1358 * mlocking such pages.
1362 * We can expect PageDoubleMap() to be stable under page lock:
1363 * for file pages we set it in page_add_file_rmap(), which
1364 * requires page to be locked.
1367 if (PageAnon(page
) && compound_mapcount(page
) != 1)
1369 if (PageDoubleMap(page
) || !page
->mapping
)
1371 if (!trylock_page(page
))
1374 if (page
->mapping
&& !PageDoubleMap(page
))
1375 mlock_vma_page(page
);
1379 page
+= (addr
& ~HPAGE_PMD_MASK
) >> PAGE_SHIFT
;
1380 VM_BUG_ON_PAGE(!PageCompound(page
) && !is_zone_device_page(page
), page
);
1381 if (flags
& FOLL_GET
)
1388 /* NUMA hinting page fault entry point for trans huge pmds */
1389 int do_huge_pmd_numa_page(struct vm_fault
*vmf
, pmd_t pmd
)
1391 struct vm_area_struct
*vma
= vmf
->vma
;
1392 struct anon_vma
*anon_vma
= NULL
;
1394 unsigned long haddr
= vmf
->address
& HPAGE_PMD_MASK
;
1395 int page_nid
= -1, this_nid
= numa_node_id();
1396 int target_nid
, last_cpupid
= -1;
1398 bool migrated
= false;
1402 vmf
->ptl
= pmd_lock(vma
->vm_mm
, vmf
->pmd
);
1403 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
)))
1407 * If there are potential migrations, wait for completion and retry
1408 * without disrupting NUMA hinting information. Do not relock and
1409 * check_same as the page may no longer be mapped.
1411 if (unlikely(pmd_trans_migrating(*vmf
->pmd
))) {
1412 page
= pmd_page(*vmf
->pmd
);
1413 spin_unlock(vmf
->ptl
);
1414 wait_on_page_locked(page
);
1418 page
= pmd_page(pmd
);
1419 BUG_ON(is_huge_zero_page(page
));
1420 page_nid
= page_to_nid(page
);
1421 last_cpupid
= page_cpupid_last(page
);
1422 count_vm_numa_event(NUMA_HINT_FAULTS
);
1423 if (page_nid
== this_nid
) {
1424 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL
);
1425 flags
|= TNF_FAULT_LOCAL
;
1428 /* See similar comment in do_numa_page for explanation */
1429 if (!pmd_savedwrite(pmd
))
1430 flags
|= TNF_NO_GROUP
;
1433 * Acquire the page lock to serialise THP migrations but avoid dropping
1434 * page_table_lock if at all possible
1436 page_locked
= trylock_page(page
);
1437 target_nid
= mpol_misplaced(page
, vma
, haddr
);
1438 if (target_nid
== -1) {
1439 /* If the page was locked, there are no parallel migrations */
1444 /* Migration could have started since the pmd_trans_migrating check */
1446 spin_unlock(vmf
->ptl
);
1447 wait_on_page_locked(page
);
1453 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1454 * to serialises splits
1457 spin_unlock(vmf
->ptl
);
1458 anon_vma
= page_lock_anon_vma_read(page
);
1460 /* Confirm the PMD did not change while page_table_lock was released */
1461 spin_lock(vmf
->ptl
);
1462 if (unlikely(!pmd_same(pmd
, *vmf
->pmd
))) {
1469 /* Bail if we fail to protect against THP splits for any reason */
1470 if (unlikely(!anon_vma
)) {
1477 * Migrate the THP to the requested node, returns with page unlocked
1478 * and access rights restored.
1480 spin_unlock(vmf
->ptl
);
1481 migrated
= migrate_misplaced_transhuge_page(vma
->vm_mm
, vma
,
1482 vmf
->pmd
, pmd
, vmf
->address
, page
, target_nid
);
1484 flags
|= TNF_MIGRATED
;
1485 page_nid
= target_nid
;
1487 flags
|= TNF_MIGRATE_FAIL
;
1491 BUG_ON(!PageLocked(page
));
1492 was_writable
= pmd_savedwrite(pmd
);
1493 pmd
= pmd_modify(pmd
, vma
->vm_page_prot
);
1494 pmd
= pmd_mkyoung(pmd
);
1496 pmd
= pmd_mkwrite(pmd
);
1497 set_pmd_at(vma
->vm_mm
, haddr
, vmf
->pmd
, pmd
);
1498 update_mmu_cache_pmd(vma
, vmf
->address
, vmf
->pmd
);
1501 spin_unlock(vmf
->ptl
);
1505 page_unlock_anon_vma_read(anon_vma
);
1508 task_numa_fault(last_cpupid
, page_nid
, HPAGE_PMD_NR
,
1515 * Return true if we do MADV_FREE successfully on entire pmd page.
1516 * Otherwise, return false.
1518 bool madvise_free_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1519 pmd_t
*pmd
, unsigned long addr
, unsigned long next
)
1524 struct mm_struct
*mm
= tlb
->mm
;
1527 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1529 ptl
= pmd_trans_huge_lock(pmd
, vma
);
1534 if (is_huge_zero_pmd(orig_pmd
))
1537 page
= pmd_page(orig_pmd
);
1539 * If other processes are mapping this page, we couldn't discard
1540 * the page unless they all do MADV_FREE so let's skip the page.
1542 if (page_mapcount(page
) != 1)
1545 if (!trylock_page(page
))
1549 * If user want to discard part-pages of THP, split it so MADV_FREE
1550 * will deactivate only them.
1552 if (next
- addr
!= HPAGE_PMD_SIZE
) {
1555 split_huge_page(page
);
1561 if (PageDirty(page
))
1562 ClearPageDirty(page
);
1565 if (PageActive(page
))
1566 deactivate_page(page
);
1568 if (pmd_young(orig_pmd
) || pmd_dirty(orig_pmd
)) {
1569 orig_pmd
= pmdp_huge_get_and_clear_full(tlb
->mm
, addr
, pmd
,
1571 orig_pmd
= pmd_mkold(orig_pmd
);
1572 orig_pmd
= pmd_mkclean(orig_pmd
);
1574 set_pmd_at(mm
, addr
, pmd
, orig_pmd
);
1575 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1584 static inline void zap_deposited_table(struct mm_struct
*mm
, pmd_t
*pmd
)
1588 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1589 pte_free(mm
, pgtable
);
1590 atomic_long_dec(&mm
->nr_ptes
);
1593 int zap_huge_pmd(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1594 pmd_t
*pmd
, unsigned long addr
)
1599 tlb_remove_check_page_size_change(tlb
, HPAGE_PMD_SIZE
);
1601 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1605 * For architectures like ppc64 we look at deposited pgtable
1606 * when calling pmdp_huge_get_and_clear. So do the
1607 * pgtable_trans_huge_withdraw after finishing pmdp related
1610 orig_pmd
= pmdp_huge_get_and_clear_full(tlb
->mm
, addr
, pmd
,
1612 tlb_remove_pmd_tlb_entry(tlb
, pmd
, addr
);
1613 if (vma_is_dax(vma
)) {
1615 if (is_huge_zero_pmd(orig_pmd
))
1616 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1617 } else if (is_huge_zero_pmd(orig_pmd
)) {
1618 pte_free(tlb
->mm
, pgtable_trans_huge_withdraw(tlb
->mm
, pmd
));
1619 atomic_long_dec(&tlb
->mm
->nr_ptes
);
1621 tlb_remove_page_size(tlb
, pmd_page(orig_pmd
), HPAGE_PMD_SIZE
);
1623 struct page
*page
= pmd_page(orig_pmd
);
1624 page_remove_rmap(page
, true);
1625 VM_BUG_ON_PAGE(page_mapcount(page
) < 0, page
);
1626 VM_BUG_ON_PAGE(!PageHead(page
), page
);
1627 if (PageAnon(page
)) {
1629 pgtable
= pgtable_trans_huge_withdraw(tlb
->mm
, pmd
);
1630 pte_free(tlb
->mm
, pgtable
);
1631 atomic_long_dec(&tlb
->mm
->nr_ptes
);
1632 add_mm_counter(tlb
->mm
, MM_ANONPAGES
, -HPAGE_PMD_NR
);
1634 if (arch_needs_pgtable_deposit())
1635 zap_deposited_table(tlb
->mm
, pmd
);
1636 add_mm_counter(tlb
->mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
1639 tlb_remove_page_size(tlb
, page
, HPAGE_PMD_SIZE
);
1644 #ifndef pmd_move_must_withdraw
1645 static inline int pmd_move_must_withdraw(spinlock_t
*new_pmd_ptl
,
1646 spinlock_t
*old_pmd_ptl
,
1647 struct vm_area_struct
*vma
)
1650 * With split pmd lock we also need to move preallocated
1651 * PTE page table if new_pmd is on different PMD page table.
1653 * We also don't deposit and withdraw tables for file pages.
1655 return (new_pmd_ptl
!= old_pmd_ptl
) && vma_is_anonymous(vma
);
1659 bool move_huge_pmd(struct vm_area_struct
*vma
, unsigned long old_addr
,
1660 unsigned long new_addr
, unsigned long old_end
,
1661 pmd_t
*old_pmd
, pmd_t
*new_pmd
, bool *need_flush
)
1663 spinlock_t
*old_ptl
, *new_ptl
;
1665 struct mm_struct
*mm
= vma
->vm_mm
;
1666 bool force_flush
= false;
1668 if ((old_addr
& ~HPAGE_PMD_MASK
) ||
1669 (new_addr
& ~HPAGE_PMD_MASK
) ||
1670 old_end
- old_addr
< HPAGE_PMD_SIZE
)
1674 * The destination pmd shouldn't be established, free_pgtables()
1675 * should have release it.
1677 if (WARN_ON(!pmd_none(*new_pmd
))) {
1678 VM_BUG_ON(pmd_trans_huge(*new_pmd
));
1683 * We don't have to worry about the ordering of src and dst
1684 * ptlocks because exclusive mmap_sem prevents deadlock.
1686 old_ptl
= __pmd_trans_huge_lock(old_pmd
, vma
);
1688 new_ptl
= pmd_lockptr(mm
, new_pmd
);
1689 if (new_ptl
!= old_ptl
)
1690 spin_lock_nested(new_ptl
, SINGLE_DEPTH_NESTING
);
1691 pmd
= pmdp_huge_get_and_clear(mm
, old_addr
, old_pmd
);
1692 if (pmd_present(pmd
) && pmd_dirty(pmd
))
1694 VM_BUG_ON(!pmd_none(*new_pmd
));
1696 if (pmd_move_must_withdraw(new_ptl
, old_ptl
, vma
)) {
1698 pgtable
= pgtable_trans_huge_withdraw(mm
, old_pmd
);
1699 pgtable_trans_huge_deposit(mm
, new_pmd
, pgtable
);
1701 set_pmd_at(mm
, new_addr
, new_pmd
, pmd_mksoft_dirty(pmd
));
1702 if (new_ptl
!= old_ptl
)
1703 spin_unlock(new_ptl
);
1705 flush_tlb_range(vma
, old_addr
, old_addr
+ PMD_SIZE
);
1708 spin_unlock(old_ptl
);
1716 * - 0 if PMD could not be locked
1717 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1718 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1720 int change_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1721 unsigned long addr
, pgprot_t newprot
, int prot_numa
)
1723 struct mm_struct
*mm
= vma
->vm_mm
;
1727 ptl
= __pmd_trans_huge_lock(pmd
, vma
);
1730 bool preserve_write
= prot_numa
&& pmd_write(*pmd
);
1734 * Avoid trapping faults against the zero page. The read-only
1735 * data is likely to be read-cached on the local CPU and
1736 * local/remote hits to the zero page are not interesting.
1738 if (prot_numa
&& is_huge_zero_pmd(*pmd
)) {
1743 if (!prot_numa
|| !pmd_protnone(*pmd
)) {
1744 entry
= pmdp_huge_get_and_clear_notify(mm
, addr
, pmd
);
1745 entry
= pmd_modify(entry
, newprot
);
1747 entry
= pmd_mk_savedwrite(entry
);
1749 set_pmd_at(mm
, addr
, pmd
, entry
);
1750 BUG_ON(vma_is_anonymous(vma
) && !preserve_write
&&
1760 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1762 * Note that if it returns page table lock pointer, this routine returns without
1763 * unlocking page table lock. So callers must unlock it.
1765 spinlock_t
*__pmd_trans_huge_lock(pmd_t
*pmd
, struct vm_area_struct
*vma
)
1768 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1769 if (likely(pmd_trans_huge(*pmd
) || pmd_devmap(*pmd
)))
1776 * Returns true if a given pud maps a thp, false otherwise.
1778 * Note that if it returns true, this routine returns without unlocking page
1779 * table lock. So callers must unlock it.
1781 spinlock_t
*__pud_trans_huge_lock(pud_t
*pud
, struct vm_area_struct
*vma
)
1785 ptl
= pud_lock(vma
->vm_mm
, pud
);
1786 if (likely(pud_trans_huge(*pud
) || pud_devmap(*pud
)))
1792 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1793 int zap_huge_pud(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
1794 pud_t
*pud
, unsigned long addr
)
1799 ptl
= __pud_trans_huge_lock(pud
, vma
);
1803 * For architectures like ppc64 we look at deposited pgtable
1804 * when calling pudp_huge_get_and_clear. So do the
1805 * pgtable_trans_huge_withdraw after finishing pudp related
1808 orig_pud
= pudp_huge_get_and_clear_full(tlb
->mm
, addr
, pud
,
1810 tlb_remove_pud_tlb_entry(tlb
, pud
, addr
);
1811 if (vma_is_dax(vma
)) {
1813 /* No zero page support yet */
1815 /* No support for anonymous PUD pages yet */
1821 static void __split_huge_pud_locked(struct vm_area_struct
*vma
, pud_t
*pud
,
1822 unsigned long haddr
)
1824 VM_BUG_ON(haddr
& ~HPAGE_PUD_MASK
);
1825 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1826 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PUD_SIZE
, vma
);
1827 VM_BUG_ON(!pud_trans_huge(*pud
) && !pud_devmap(*pud
));
1829 count_vm_event(THP_SPLIT_PMD
);
1831 pudp_huge_clear_flush_notify(vma
, haddr
, pud
);
1834 void __split_huge_pud(struct vm_area_struct
*vma
, pud_t
*pud
,
1835 unsigned long address
)
1838 struct mm_struct
*mm
= vma
->vm_mm
;
1839 unsigned long haddr
= address
& HPAGE_PUD_MASK
;
1841 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PUD_SIZE
);
1842 ptl
= pud_lock(mm
, pud
);
1843 if (unlikely(!pud_trans_huge(*pud
) && !pud_devmap(*pud
)))
1845 __split_huge_pud_locked(vma
, pud
, haddr
);
1849 mmu_notifier_invalidate_range_end(mm
, haddr
, haddr
+ HPAGE_PUD_SIZE
);
1851 #endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1853 static void __split_huge_zero_page_pmd(struct vm_area_struct
*vma
,
1854 unsigned long haddr
, pmd_t
*pmd
)
1856 struct mm_struct
*mm
= vma
->vm_mm
;
1861 /* leave pmd empty until pte is filled */
1862 pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
1864 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1865 pmd_populate(mm
, &_pmd
, pgtable
);
1867 for (i
= 0; i
< HPAGE_PMD_NR
; i
++, haddr
+= PAGE_SIZE
) {
1869 entry
= pfn_pte(my_zero_pfn(haddr
), vma
->vm_page_prot
);
1870 entry
= pte_mkspecial(entry
);
1871 pte
= pte_offset_map(&_pmd
, haddr
);
1872 VM_BUG_ON(!pte_none(*pte
));
1873 set_pte_at(mm
, haddr
, pte
, entry
);
1876 smp_wmb(); /* make pte visible before pmd */
1877 pmd_populate(mm
, pmd
, pgtable
);
1880 static void __split_huge_pmd_locked(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1881 unsigned long haddr
, bool freeze
)
1883 struct mm_struct
*mm
= vma
->vm_mm
;
1887 bool young
, write
, dirty
, soft_dirty
;
1891 VM_BUG_ON(haddr
& ~HPAGE_PMD_MASK
);
1892 VM_BUG_ON_VMA(vma
->vm_start
> haddr
, vma
);
1893 VM_BUG_ON_VMA(vma
->vm_end
< haddr
+ HPAGE_PMD_SIZE
, vma
);
1894 VM_BUG_ON(!pmd_trans_huge(*pmd
) && !pmd_devmap(*pmd
));
1896 count_vm_event(THP_SPLIT_PMD
);
1898 if (!vma_is_anonymous(vma
)) {
1899 _pmd
= pmdp_huge_clear_flush_notify(vma
, haddr
, pmd
);
1901 * We are going to unmap this huge page. So
1902 * just go ahead and zap it
1904 if (arch_needs_pgtable_deposit())
1905 zap_deposited_table(mm
, pmd
);
1906 if (vma_is_dax(vma
))
1908 page
= pmd_page(_pmd
);
1909 if (!PageReferenced(page
) && pmd_young(_pmd
))
1910 SetPageReferenced(page
);
1911 page_remove_rmap(page
, true);
1913 add_mm_counter(mm
, MM_FILEPAGES
, -HPAGE_PMD_NR
);
1915 } else if (is_huge_zero_pmd(*pmd
)) {
1916 return __split_huge_zero_page_pmd(vma
, haddr
, pmd
);
1919 page
= pmd_page(*pmd
);
1920 VM_BUG_ON_PAGE(!page_count(page
), page
);
1921 page_ref_add(page
, HPAGE_PMD_NR
- 1);
1922 write
= pmd_write(*pmd
);
1923 young
= pmd_young(*pmd
);
1924 dirty
= pmd_dirty(*pmd
);
1925 soft_dirty
= pmd_soft_dirty(*pmd
);
1927 pmdp_huge_split_prepare(vma
, haddr
, pmd
);
1928 pgtable
= pgtable_trans_huge_withdraw(mm
, pmd
);
1929 pmd_populate(mm
, &_pmd
, pgtable
);
1931 for (i
= 0, addr
= haddr
; i
< HPAGE_PMD_NR
; i
++, addr
+= PAGE_SIZE
) {
1934 * Note that NUMA hinting access restrictions are not
1935 * transferred to avoid any possibility of altering
1936 * permissions across VMAs.
1939 swp_entry_t swp_entry
;
1940 swp_entry
= make_migration_entry(page
+ i
, write
);
1941 entry
= swp_entry_to_pte(swp_entry
);
1943 entry
= pte_swp_mksoft_dirty(entry
);
1945 entry
= mk_pte(page
+ i
, READ_ONCE(vma
->vm_page_prot
));
1946 entry
= maybe_mkwrite(entry
, vma
);
1948 entry
= pte_wrprotect(entry
);
1950 entry
= pte_mkold(entry
);
1952 entry
= pte_mksoft_dirty(entry
);
1955 SetPageDirty(page
+ i
);
1956 pte
= pte_offset_map(&_pmd
, addr
);
1957 BUG_ON(!pte_none(*pte
));
1958 set_pte_at(mm
, addr
, pte
, entry
);
1959 atomic_inc(&page
[i
]._mapcount
);
1964 * Set PG_double_map before dropping compound_mapcount to avoid
1965 * false-negative page_mapped().
1967 if (compound_mapcount(page
) > 1 && !TestSetPageDoubleMap(page
)) {
1968 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
1969 atomic_inc(&page
[i
]._mapcount
);
1972 if (atomic_add_negative(-1, compound_mapcount_ptr(page
))) {
1973 /* Last compound_mapcount is gone. */
1974 __dec_node_page_state(page
, NR_ANON_THPS
);
1975 if (TestClearPageDoubleMap(page
)) {
1976 /* No need in mapcount reference anymore */
1977 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
1978 atomic_dec(&page
[i
]._mapcount
);
1982 smp_wmb(); /* make pte visible before pmd */
1984 * Up to this point the pmd is present and huge and userland has the
1985 * whole access to the hugepage during the split (which happens in
1986 * place). If we overwrite the pmd with the not-huge version pointing
1987 * to the pte here (which of course we could if all CPUs were bug
1988 * free), userland could trigger a small page size TLB miss on the
1989 * small sized TLB while the hugepage TLB entry is still established in
1990 * the huge TLB. Some CPU doesn't like that.
1991 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
1992 * 383 on page 93. Intel should be safe but is also warns that it's
1993 * only safe if the permission and cache attributes of the two entries
1994 * loaded in the two TLB is identical (which should be the case here).
1995 * But it is generally safer to never allow small and huge TLB entries
1996 * for the same virtual address to be loaded simultaneously. So instead
1997 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
1998 * current pmd notpresent (atomically because here the pmd_trans_huge
1999 * and pmd_trans_splitting must remain set at all times on the pmd
2000 * until the split is complete for this pmd), then we flush the SMP TLB
2001 * and finally we write the non-huge version of the pmd entry with
2004 pmdp_invalidate(vma
, haddr
, pmd
);
2005 pmd_populate(mm
, pmd
, pgtable
);
2008 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2009 page_remove_rmap(page
+ i
, false);
2015 void __split_huge_pmd(struct vm_area_struct
*vma
, pmd_t
*pmd
,
2016 unsigned long address
, bool freeze
, struct page
*page
)
2019 struct mm_struct
*mm
= vma
->vm_mm
;
2020 unsigned long haddr
= address
& HPAGE_PMD_MASK
;
2022 mmu_notifier_invalidate_range_start(mm
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
2023 ptl
= pmd_lock(mm
, pmd
);
2026 * If caller asks to setup a migration entries, we need a page to check
2027 * pmd against. Otherwise we can end up replacing wrong page.
2029 VM_BUG_ON(freeze
&& !page
);
2030 if (page
&& page
!= pmd_page(*pmd
))
2033 if (pmd_trans_huge(*pmd
)) {
2034 page
= pmd_page(*pmd
);
2035 if (PageMlocked(page
))
2036 clear_page_mlock(page
);
2037 } else if (!pmd_devmap(*pmd
))
2039 __split_huge_pmd_locked(vma
, pmd
, haddr
, freeze
);
2042 mmu_notifier_invalidate_range_end(mm
, haddr
, haddr
+ HPAGE_PMD_SIZE
);
2045 void split_huge_pmd_address(struct vm_area_struct
*vma
, unsigned long address
,
2046 bool freeze
, struct page
*page
)
2052 pgd
= pgd_offset(vma
->vm_mm
, address
);
2053 if (!pgd_present(*pgd
))
2056 pud
= pud_offset(pgd
, address
);
2057 if (!pud_present(*pud
))
2060 pmd
= pmd_offset(pud
, address
);
2062 __split_huge_pmd(vma
, pmd
, address
, freeze
, page
);
2065 void vma_adjust_trans_huge(struct vm_area_struct
*vma
,
2066 unsigned long start
,
2071 * If the new start address isn't hpage aligned and it could
2072 * previously contain an hugepage: check if we need to split
2075 if (start
& ~HPAGE_PMD_MASK
&&
2076 (start
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2077 (start
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2078 split_huge_pmd_address(vma
, start
, false, NULL
);
2081 * If the new end address isn't hpage aligned and it could
2082 * previously contain an hugepage: check if we need to split
2085 if (end
& ~HPAGE_PMD_MASK
&&
2086 (end
& HPAGE_PMD_MASK
) >= vma
->vm_start
&&
2087 (end
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= vma
->vm_end
)
2088 split_huge_pmd_address(vma
, end
, false, NULL
);
2091 * If we're also updating the vma->vm_next->vm_start, if the new
2092 * vm_next->vm_start isn't page aligned and it could previously
2093 * contain an hugepage: check if we need to split an huge pmd.
2095 if (adjust_next
> 0) {
2096 struct vm_area_struct
*next
= vma
->vm_next
;
2097 unsigned long nstart
= next
->vm_start
;
2098 nstart
+= adjust_next
<< PAGE_SHIFT
;
2099 if (nstart
& ~HPAGE_PMD_MASK
&&
2100 (nstart
& HPAGE_PMD_MASK
) >= next
->vm_start
&&
2101 (nstart
& HPAGE_PMD_MASK
) + HPAGE_PMD_SIZE
<= next
->vm_end
)
2102 split_huge_pmd_address(next
, nstart
, false, NULL
);
2106 static void freeze_page(struct page
*page
)
2108 enum ttu_flags ttu_flags
= TTU_IGNORE_MLOCK
| TTU_IGNORE_ACCESS
|
2109 TTU_RMAP_LOCKED
| TTU_SPLIT_HUGE_PMD
;
2112 VM_BUG_ON_PAGE(!PageHead(page
), page
);
2115 ttu_flags
|= TTU_MIGRATION
;
2117 ret
= try_to_unmap(page
, ttu_flags
);
2118 VM_BUG_ON_PAGE(ret
, page
);
2121 static void unfreeze_page(struct page
*page
)
2124 if (PageTransHuge(page
)) {
2125 remove_migration_ptes(page
, page
, true);
2127 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2128 remove_migration_ptes(page
+ i
, page
+ i
, true);
2132 static void __split_huge_page_tail(struct page
*head
, int tail
,
2133 struct lruvec
*lruvec
, struct list_head
*list
)
2135 struct page
*page_tail
= head
+ tail
;
2137 VM_BUG_ON_PAGE(atomic_read(&page_tail
->_mapcount
) != -1, page_tail
);
2138 VM_BUG_ON_PAGE(page_ref_count(page_tail
) != 0, page_tail
);
2141 * tail_page->_refcount is zero and not changing from under us. But
2142 * get_page_unless_zero() may be running from under us on the
2143 * tail_page. If we used atomic_set() below instead of atomic_inc() or
2144 * atomic_add(), we would then run atomic_set() concurrently with
2145 * get_page_unless_zero(), and atomic_set() is implemented in C not
2146 * using locked ops. spin_unlock on x86 sometime uses locked ops
2147 * because of PPro errata 66, 92, so unless somebody can guarantee
2148 * atomic_set() here would be safe on all archs (and not only on x86),
2149 * it's safer to use atomic_inc()/atomic_add().
2151 if (PageAnon(head
)) {
2152 page_ref_inc(page_tail
);
2154 /* Additional pin to radix tree */
2155 page_ref_add(page_tail
, 2);
2158 page_tail
->flags
&= ~PAGE_FLAGS_CHECK_AT_PREP
;
2159 page_tail
->flags
|= (head
->flags
&
2160 ((1L << PG_referenced
) |
2161 (1L << PG_swapbacked
) |
2162 (1L << PG_mlocked
) |
2163 (1L << PG_uptodate
) |
2166 (1L << PG_unevictable
) |
2170 * After clearing PageTail the gup refcount can be released.
2171 * Page flags also must be visible before we make the page non-compound.
2175 clear_compound_head(page_tail
);
2177 if (page_is_young(head
))
2178 set_page_young(page_tail
);
2179 if (page_is_idle(head
))
2180 set_page_idle(page_tail
);
2182 /* ->mapping in first tail page is compound_mapcount */
2183 VM_BUG_ON_PAGE(tail
> 2 && page_tail
->mapping
!= TAIL_MAPPING
,
2185 page_tail
->mapping
= head
->mapping
;
2187 page_tail
->index
= head
->index
+ tail
;
2188 page_cpupid_xchg_last(page_tail
, page_cpupid_last(head
));
2189 lru_add_page_tail(head
, page_tail
, lruvec
, list
);
2192 static void __split_huge_page(struct page
*page
, struct list_head
*list
,
2193 unsigned long flags
)
2195 struct page
*head
= compound_head(page
);
2196 struct zone
*zone
= page_zone(head
);
2197 struct lruvec
*lruvec
;
2201 lruvec
= mem_cgroup_page_lruvec(head
, zone
->zone_pgdat
);
2203 /* complete memcg works before add pages to LRU */
2204 mem_cgroup_split_huge_fixup(head
);
2206 if (!PageAnon(page
))
2207 end
= DIV_ROUND_UP(i_size_read(head
->mapping
->host
), PAGE_SIZE
);
2209 for (i
= HPAGE_PMD_NR
- 1; i
>= 1; i
--) {
2210 __split_huge_page_tail(head
, i
, lruvec
, list
);
2211 /* Some pages can be beyond i_size: drop them from page cache */
2212 if (head
[i
].index
>= end
) {
2213 __ClearPageDirty(head
+ i
);
2214 __delete_from_page_cache(head
+ i
, NULL
);
2215 if (IS_ENABLED(CONFIG_SHMEM
) && PageSwapBacked(head
))
2216 shmem_uncharge(head
->mapping
->host
, 1);
2221 ClearPageCompound(head
);
2222 /* See comment in __split_huge_page_tail() */
2223 if (PageAnon(head
)) {
2226 /* Additional pin to radix tree */
2227 page_ref_add(head
, 2);
2228 spin_unlock(&head
->mapping
->tree_lock
);
2231 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2233 unfreeze_page(head
);
2235 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2236 struct page
*subpage
= head
+ i
;
2237 if (subpage
== page
)
2239 unlock_page(subpage
);
2242 * Subpages may be freed if there wasn't any mapping
2243 * like if add_to_swap() is running on a lru page that
2244 * had its mapping zapped. And freeing these pages
2245 * requires taking the lru_lock so we do the put_page
2246 * of the tail pages after the split is complete.
2252 int total_mapcount(struct page
*page
)
2254 int i
, compound
, ret
;
2256 VM_BUG_ON_PAGE(PageTail(page
), page
);
2258 if (likely(!PageCompound(page
)))
2259 return atomic_read(&page
->_mapcount
) + 1;
2261 compound
= compound_mapcount(page
);
2265 for (i
= 0; i
< HPAGE_PMD_NR
; i
++)
2266 ret
+= atomic_read(&page
[i
]._mapcount
) + 1;
2267 /* File pages has compound_mapcount included in _mapcount */
2268 if (!PageAnon(page
))
2269 return ret
- compound
* HPAGE_PMD_NR
;
2270 if (PageDoubleMap(page
))
2271 ret
-= HPAGE_PMD_NR
;
2276 * This calculates accurately how many mappings a transparent hugepage
2277 * has (unlike page_mapcount() which isn't fully accurate). This full
2278 * accuracy is primarily needed to know if copy-on-write faults can
2279 * reuse the page and change the mapping to read-write instead of
2280 * copying them. At the same time this returns the total_mapcount too.
2282 * The function returns the highest mapcount any one of the subpages
2283 * has. If the return value is one, even if different processes are
2284 * mapping different subpages of the transparent hugepage, they can
2285 * all reuse it, because each process is reusing a different subpage.
2287 * The total_mapcount is instead counting all virtual mappings of the
2288 * subpages. If the total_mapcount is equal to "one", it tells the
2289 * caller all mappings belong to the same "mm" and in turn the
2290 * anon_vma of the transparent hugepage can become the vma->anon_vma
2291 * local one as no other process may be mapping any of the subpages.
2293 * It would be more accurate to replace page_mapcount() with
2294 * page_trans_huge_mapcount(), however we only use
2295 * page_trans_huge_mapcount() in the copy-on-write faults where we
2296 * need full accuracy to avoid breaking page pinning, because
2297 * page_trans_huge_mapcount() is slower than page_mapcount().
2299 int page_trans_huge_mapcount(struct page
*page
, int *total_mapcount
)
2301 int i
, ret
, _total_mapcount
, mapcount
;
2303 /* hugetlbfs shouldn't call it */
2304 VM_BUG_ON_PAGE(PageHuge(page
), page
);
2306 if (likely(!PageTransCompound(page
))) {
2307 mapcount
= atomic_read(&page
->_mapcount
) + 1;
2309 *total_mapcount
= mapcount
;
2313 page
= compound_head(page
);
2315 _total_mapcount
= ret
= 0;
2316 for (i
= 0; i
< HPAGE_PMD_NR
; i
++) {
2317 mapcount
= atomic_read(&page
[i
]._mapcount
) + 1;
2318 ret
= max(ret
, mapcount
);
2319 _total_mapcount
+= mapcount
;
2321 if (PageDoubleMap(page
)) {
2323 _total_mapcount
-= HPAGE_PMD_NR
;
2325 mapcount
= compound_mapcount(page
);
2327 _total_mapcount
+= mapcount
;
2329 *total_mapcount
= _total_mapcount
;
2334 * This function splits huge page into normal pages. @page can point to any
2335 * subpage of huge page to split. Split doesn't change the position of @page.
2337 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2338 * The huge page must be locked.
2340 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
2342 * Both head page and tail pages will inherit mapping, flags, and so on from
2345 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2346 * they are not mapped.
2348 * Returns 0 if the hugepage is split successfully.
2349 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2352 int split_huge_page_to_list(struct page
*page
, struct list_head
*list
)
2354 struct page
*head
= compound_head(page
);
2355 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(head
));
2356 struct anon_vma
*anon_vma
= NULL
;
2357 struct address_space
*mapping
= NULL
;
2358 int count
, mapcount
, extra_pins
, ret
;
2360 unsigned long flags
;
2362 VM_BUG_ON_PAGE(is_huge_zero_page(page
), page
);
2363 VM_BUG_ON_PAGE(!PageLocked(page
), page
);
2364 VM_BUG_ON_PAGE(!PageSwapBacked(page
), page
);
2365 VM_BUG_ON_PAGE(!PageCompound(page
), page
);
2367 if (PageAnon(head
)) {
2369 * The caller does not necessarily hold an mmap_sem that would
2370 * prevent the anon_vma disappearing so we first we take a
2371 * reference to it and then lock the anon_vma for write. This
2372 * is similar to page_lock_anon_vma_read except the write lock
2373 * is taken to serialise against parallel split or collapse
2376 anon_vma
= page_get_anon_vma(head
);
2383 anon_vma_lock_write(anon_vma
);
2385 mapping
= head
->mapping
;
2393 /* Addidional pins from radix tree */
2394 extra_pins
= HPAGE_PMD_NR
;
2396 i_mmap_lock_read(mapping
);
2400 * Racy check if we can split the page, before freeze_page() will
2403 if (total_mapcount(head
) != page_count(head
) - extra_pins
- 1) {
2408 mlocked
= PageMlocked(page
);
2410 VM_BUG_ON_PAGE(compound_mapcount(head
), head
);
2412 /* Make sure the page is not on per-CPU pagevec as it takes pin */
2416 /* prevent PageLRU to go away from under us, and freeze lru stats */
2417 spin_lock_irqsave(zone_lru_lock(page_zone(head
)), flags
);
2422 spin_lock(&mapping
->tree_lock
);
2423 pslot
= radix_tree_lookup_slot(&mapping
->page_tree
,
2426 * Check if the head page is present in radix tree.
2427 * We assume all tail are present too, if head is there.
2429 if (radix_tree_deref_slot_protected(pslot
,
2430 &mapping
->tree_lock
) != head
)
2434 /* Prevent deferred_split_scan() touching ->_refcount */
2435 spin_lock(&pgdata
->split_queue_lock
);
2436 count
= page_count(head
);
2437 mapcount
= total_mapcount(head
);
2438 if (!mapcount
&& page_ref_freeze(head
, 1 + extra_pins
)) {
2439 if (!list_empty(page_deferred_list(head
))) {
2440 pgdata
->split_queue_len
--;
2441 list_del(page_deferred_list(head
));
2444 __dec_node_page_state(page
, NR_SHMEM_THPS
);
2445 spin_unlock(&pgdata
->split_queue_lock
);
2446 __split_huge_page(page
, list
, flags
);
2449 if (IS_ENABLED(CONFIG_DEBUG_VM
) && mapcount
) {
2450 pr_alert("total_mapcount: %u, page_count(): %u\n",
2453 dump_page(head
, NULL
);
2454 dump_page(page
, "total_mapcount(head) > 0");
2457 spin_unlock(&pgdata
->split_queue_lock
);
2459 spin_unlock(&mapping
->tree_lock
);
2460 spin_unlock_irqrestore(zone_lru_lock(page_zone(head
)), flags
);
2461 unfreeze_page(head
);
2467 anon_vma_unlock_write(anon_vma
);
2468 put_anon_vma(anon_vma
);
2471 i_mmap_unlock_read(mapping
);
2473 count_vm_event(!ret
? THP_SPLIT_PAGE
: THP_SPLIT_PAGE_FAILED
);
2477 void free_transhuge_page(struct page
*page
)
2479 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2480 unsigned long flags
;
2482 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2483 if (!list_empty(page_deferred_list(page
))) {
2484 pgdata
->split_queue_len
--;
2485 list_del(page_deferred_list(page
));
2487 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2488 free_compound_page(page
);
2491 void deferred_split_huge_page(struct page
*page
)
2493 struct pglist_data
*pgdata
= NODE_DATA(page_to_nid(page
));
2494 unsigned long flags
;
2496 VM_BUG_ON_PAGE(!PageTransHuge(page
), page
);
2498 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2499 if (list_empty(page_deferred_list(page
))) {
2500 count_vm_event(THP_DEFERRED_SPLIT_PAGE
);
2501 list_add_tail(page_deferred_list(page
), &pgdata
->split_queue
);
2502 pgdata
->split_queue_len
++;
2504 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2507 static unsigned long deferred_split_count(struct shrinker
*shrink
,
2508 struct shrink_control
*sc
)
2510 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2511 return ACCESS_ONCE(pgdata
->split_queue_len
);
2514 static unsigned long deferred_split_scan(struct shrinker
*shrink
,
2515 struct shrink_control
*sc
)
2517 struct pglist_data
*pgdata
= NODE_DATA(sc
->nid
);
2518 unsigned long flags
;
2519 LIST_HEAD(list
), *pos
, *next
;
2523 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2524 /* Take pin on all head pages to avoid freeing them under us */
2525 list_for_each_safe(pos
, next
, &pgdata
->split_queue
) {
2526 page
= list_entry((void *)pos
, struct page
, mapping
);
2527 page
= compound_head(page
);
2528 if (get_page_unless_zero(page
)) {
2529 list_move(page_deferred_list(page
), &list
);
2531 /* We lost race with put_compound_page() */
2532 list_del_init(page_deferred_list(page
));
2533 pgdata
->split_queue_len
--;
2535 if (!--sc
->nr_to_scan
)
2538 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2540 list_for_each_safe(pos
, next
, &list
) {
2541 page
= list_entry((void *)pos
, struct page
, mapping
);
2543 /* split_huge_page() removes page from list on success */
2544 if (!split_huge_page(page
))
2550 spin_lock_irqsave(&pgdata
->split_queue_lock
, flags
);
2551 list_splice_tail(&list
, &pgdata
->split_queue
);
2552 spin_unlock_irqrestore(&pgdata
->split_queue_lock
, flags
);
2555 * Stop shrinker if we didn't split any page, but the queue is empty.
2556 * This can happen if pages were freed under us.
2558 if (!split
&& list_empty(&pgdata
->split_queue
))
2563 static struct shrinker deferred_split_shrinker
= {
2564 .count_objects
= deferred_split_count
,
2565 .scan_objects
= deferred_split_scan
,
2566 .seeks
= DEFAULT_SEEKS
,
2567 .flags
= SHRINKER_NUMA_AWARE
,
2570 #ifdef CONFIG_DEBUG_FS
2571 static int split_huge_pages_set(void *data
, u64 val
)
2575 unsigned long pfn
, max_zone_pfn
;
2576 unsigned long total
= 0, split
= 0;
2581 for_each_populated_zone(zone
) {
2582 max_zone_pfn
= zone_end_pfn(zone
);
2583 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++) {
2584 if (!pfn_valid(pfn
))
2587 page
= pfn_to_page(pfn
);
2588 if (!get_page_unless_zero(page
))
2591 if (zone
!= page_zone(page
))
2594 if (!PageHead(page
) || PageHuge(page
) || !PageLRU(page
))
2599 if (!split_huge_page(page
))
2607 pr_info("%lu of %lu THP split\n", split
, total
);
2611 DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops
, NULL
, split_huge_pages_set
,
2614 static int __init
split_huge_pages_debugfs(void)
2618 ret
= debugfs_create_file("split_huge_pages", 0200, NULL
, NULL
,
2619 &split_huge_pages_fops
);
2621 pr_warn("Failed to create split_huge_pages in debugfs");
2624 late_initcall(split_huge_pages_debugfs
);