]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - mm/huge_memory.c
arch/arm64/include/asm/pgtable.h: add pmd_mkclean for THP
[mirror_ubuntu-artful-kernel.git] / mm / huge_memory.c
CommitLineData
71e3aac0
AA
1/*
2 * Copyright (C) 2009 Red Hat, Inc.
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
6 */
7
ae3a8c1c
AM
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
71e3aac0
AA
10#include <linux/mm.h>
11#include <linux/sched.h>
12#include <linux/highmem.h>
13#include <linux/hugetlb.h>
14#include <linux/mmu_notifier.h>
15#include <linux/rmap.h>
16#include <linux/swap.h>
97ae1749 17#include <linux/shrinker.h>
ba76149f 18#include <linux/mm_inline.h>
e9b61f19 19#include <linux/swapops.h>
4897c765 20#include <linux/dax.h>
ba76149f
AA
21#include <linux/kthread.h>
22#include <linux/khugepaged.h>
878aee7d 23#include <linux/freezer.h>
a664b2d8 24#include <linux/mman.h>
325adeb5 25#include <linux/pagemap.h>
49071d43 26#include <linux/debugfs.h>
4daae3b4 27#include <linux/migrate.h>
43b5fbbd 28#include <linux/hashtable.h>
6b251fc9 29#include <linux/userfaultfd_k.h>
33c3fc71 30#include <linux/page_idle.h>
97ae1749 31
71e3aac0
AA
32#include <asm/tlb.h>
33#include <asm/pgalloc.h>
34#include "internal.h"
35
7d2eba05
EA
36enum scan_result {
37 SCAN_FAIL,
38 SCAN_SUCCEED,
39 SCAN_PMD_NULL,
40 SCAN_EXCEED_NONE_PTE,
41 SCAN_PTE_NON_PRESENT,
42 SCAN_PAGE_RO,
43 SCAN_NO_REFERENCED_PAGE,
44 SCAN_PAGE_NULL,
45 SCAN_SCAN_ABORT,
46 SCAN_PAGE_COUNT,
47 SCAN_PAGE_LRU,
48 SCAN_PAGE_LOCK,
49 SCAN_PAGE_ANON,
b1caa957 50 SCAN_PAGE_COMPOUND,
7d2eba05
EA
51 SCAN_ANY_PROCESS,
52 SCAN_VMA_NULL,
53 SCAN_VMA_CHECK,
54 SCAN_ADDRESS_RANGE,
55 SCAN_SWAP_CACHE_PAGE,
56 SCAN_DEL_PAGE_LRU,
57 SCAN_ALLOC_HUGE_PAGE_FAIL,
58 SCAN_CGROUP_CHARGE_FAIL
59};
60
61#define CREATE_TRACE_POINTS
62#include <trace/events/huge_memory.h>
63
ba76149f 64/*
8bfa3f9a
JW
65 * By default transparent hugepage support is disabled in order that avoid
66 * to risk increase the memory footprint of applications without a guaranteed
67 * benefit. When transparent hugepage support is enabled, is for all mappings,
68 * and khugepaged scans all mappings.
69 * Defrag is invoked by khugepaged hugepage allocations and by page faults
70 * for all hugepage allocations.
ba76149f 71 */
71e3aac0 72unsigned long transparent_hugepage_flags __read_mostly =
13ece886 73#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
ba76149f 74 (1<<TRANSPARENT_HUGEPAGE_FLAG)|
13ece886
AA
75#endif
76#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
77 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
78#endif
d39d33c3 79 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
79da5407
KS
80 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
81 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
ba76149f
AA
82
83/* default scan 8*512 pte (or vmas) every 30 second */
84static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
85static unsigned int khugepaged_pages_collapsed;
86static unsigned int khugepaged_full_scans;
87static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
88/* during fragmentation poll the hugepage allocator once every minute */
89static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
90static struct task_struct *khugepaged_thread __read_mostly;
91static DEFINE_MUTEX(khugepaged_mutex);
92static DEFINE_SPINLOCK(khugepaged_mm_lock);
93static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
94/*
95 * default collapse hugepages if there is at least one pte mapped like
96 * it would have happened if the vma was large enough during page
97 * fault.
98 */
99static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
100
101static int khugepaged(void *none);
ba76149f 102static int khugepaged_slab_init(void);
65ebb64f 103static void khugepaged_slab_exit(void);
ba76149f 104
43b5fbbd
SL
105#define MM_SLOTS_HASH_BITS 10
106static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
107
ba76149f
AA
108static struct kmem_cache *mm_slot_cache __read_mostly;
109
110/**
111 * struct mm_slot - hash lookup from mm to mm_slot
112 * @hash: hash collision list
113 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
114 * @mm: the mm that this information is valid for
115 */
116struct mm_slot {
117 struct hlist_node hash;
118 struct list_head mm_node;
119 struct mm_struct *mm;
120};
121
122/**
123 * struct khugepaged_scan - cursor for scanning
124 * @mm_head: the head of the mm list to scan
125 * @mm_slot: the current mm_slot we are scanning
126 * @address: the next address inside that to be scanned
127 *
128 * There is only the one khugepaged_scan instance of this cursor structure.
129 */
130struct khugepaged_scan {
131 struct list_head mm_head;
132 struct mm_slot *mm_slot;
133 unsigned long address;
2f1da642
HS
134};
135static struct khugepaged_scan khugepaged_scan = {
ba76149f
AA
136 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
137};
138
9a982250
KS
139static DEFINE_SPINLOCK(split_queue_lock);
140static LIST_HEAD(split_queue);
141static unsigned long split_queue_len;
142static struct shrinker deferred_split_shrinker;
f000565a 143
2c0b80d4 144static void set_recommended_min_free_kbytes(void)
f000565a
AA
145{
146 struct zone *zone;
147 int nr_zones = 0;
148 unsigned long recommended_min;
f000565a 149
f000565a
AA
150 for_each_populated_zone(zone)
151 nr_zones++;
152
974a786e 153 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
f000565a
AA
154 recommended_min = pageblock_nr_pages * nr_zones * 2;
155
156 /*
157 * Make sure that on average at least two pageblocks are almost free
158 * of another type, one for a migratetype to fall back to and a
159 * second to avoid subsequent fallbacks of other types There are 3
160 * MIGRATE_TYPES we care about.
161 */
162 recommended_min += pageblock_nr_pages * nr_zones *
163 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
164
165 /* don't ever allow to reserve more than 5% of the lowmem */
166 recommended_min = min(recommended_min,
167 (unsigned long) nr_free_buffer_pages() / 20);
168 recommended_min <<= (PAGE_SHIFT-10);
169
42aa83cb
HP
170 if (recommended_min > min_free_kbytes) {
171 if (user_min_free_kbytes >= 0)
172 pr_info("raising min_free_kbytes from %d to %lu "
173 "to help transparent hugepage allocations\n",
174 min_free_kbytes, recommended_min);
175
f000565a 176 min_free_kbytes = recommended_min;
42aa83cb 177 }
f000565a 178 setup_per_zone_wmarks();
f000565a 179}
f000565a 180
79553da2 181static int start_stop_khugepaged(void)
ba76149f
AA
182{
183 int err = 0;
184 if (khugepaged_enabled()) {
ba76149f
AA
185 if (!khugepaged_thread)
186 khugepaged_thread = kthread_run(khugepaged, NULL,
187 "khugepaged");
18e8e5c7 188 if (IS_ERR(khugepaged_thread)) {
ae3a8c1c 189 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
ba76149f
AA
190 err = PTR_ERR(khugepaged_thread);
191 khugepaged_thread = NULL;
79553da2 192 goto fail;
ba76149f 193 }
911891af
XG
194
195 if (!list_empty(&khugepaged_scan.mm_head))
ba76149f 196 wake_up_interruptible(&khugepaged_wait);
f000565a
AA
197
198 set_recommended_min_free_kbytes();
911891af 199 } else if (khugepaged_thread) {
911891af
XG
200 kthread_stop(khugepaged_thread);
201 khugepaged_thread = NULL;
202 }
79553da2 203fail:
ba76149f
AA
204 return err;
205}
71e3aac0 206
97ae1749 207static atomic_t huge_zero_refcount;
56873f43 208struct page *huge_zero_page __read_mostly;
4a6c1297 209
fc437044 210struct page *get_huge_zero_page(void)
97ae1749
KS
211{
212 struct page *zero_page;
213retry:
214 if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
4db0c3c2 215 return READ_ONCE(huge_zero_page);
97ae1749
KS
216
217 zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
4a6c1297 218 HPAGE_PMD_ORDER);
d8a8e1f0
KS
219 if (!zero_page) {
220 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
5918d10a 221 return NULL;
d8a8e1f0
KS
222 }
223 count_vm_event(THP_ZERO_PAGE_ALLOC);
97ae1749 224 preempt_disable();
5918d10a 225 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
97ae1749 226 preempt_enable();
5ddacbe9 227 __free_pages(zero_page, compound_order(zero_page));
97ae1749
KS
228 goto retry;
229 }
230
231 /* We take additional reference here. It will be put back by shrinker */
232 atomic_set(&huge_zero_refcount, 2);
233 preempt_enable();
4db0c3c2 234 return READ_ONCE(huge_zero_page);
4a6c1297
KS
235}
236
97ae1749 237static void put_huge_zero_page(void)
4a6c1297 238{
97ae1749
KS
239 /*
240 * Counter should never go to zero here. Only shrinker can put
241 * last reference.
242 */
243 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
4a6c1297
KS
244}
245
48896466
GC
246static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
247 struct shrink_control *sc)
4a6c1297 248{
48896466
GC
249 /* we can free zero page only if last reference remains */
250 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
251}
97ae1749 252
48896466
GC
253static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
254 struct shrink_control *sc)
255{
97ae1749 256 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
5918d10a
KS
257 struct page *zero_page = xchg(&huge_zero_page, NULL);
258 BUG_ON(zero_page == NULL);
5ddacbe9 259 __free_pages(zero_page, compound_order(zero_page));
48896466 260 return HPAGE_PMD_NR;
97ae1749
KS
261 }
262
263 return 0;
4a6c1297
KS
264}
265
97ae1749 266static struct shrinker huge_zero_page_shrinker = {
48896466
GC
267 .count_objects = shrink_huge_zero_page_count,
268 .scan_objects = shrink_huge_zero_page_scan,
97ae1749
KS
269 .seeks = DEFAULT_SEEKS,
270};
271
71e3aac0 272#ifdef CONFIG_SYSFS
ba76149f 273
71e3aac0
AA
274static ssize_t double_flag_show(struct kobject *kobj,
275 struct kobj_attribute *attr, char *buf,
276 enum transparent_hugepage_flag enabled,
277 enum transparent_hugepage_flag req_madv)
278{
279 if (test_bit(enabled, &transparent_hugepage_flags)) {
280 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
281 return sprintf(buf, "[always] madvise never\n");
282 } else if (test_bit(req_madv, &transparent_hugepage_flags))
283 return sprintf(buf, "always [madvise] never\n");
284 else
285 return sprintf(buf, "always madvise [never]\n");
286}
287static ssize_t double_flag_store(struct kobject *kobj,
288 struct kobj_attribute *attr,
289 const char *buf, size_t count,
290 enum transparent_hugepage_flag enabled,
291 enum transparent_hugepage_flag req_madv)
292{
293 if (!memcmp("always", buf,
294 min(sizeof("always")-1, count))) {
295 set_bit(enabled, &transparent_hugepage_flags);
296 clear_bit(req_madv, &transparent_hugepage_flags);
297 } else if (!memcmp("madvise", buf,
298 min(sizeof("madvise")-1, count))) {
299 clear_bit(enabled, &transparent_hugepage_flags);
300 set_bit(req_madv, &transparent_hugepage_flags);
301 } else if (!memcmp("never", buf,
302 min(sizeof("never")-1, count))) {
303 clear_bit(enabled, &transparent_hugepage_flags);
304 clear_bit(req_madv, &transparent_hugepage_flags);
305 } else
306 return -EINVAL;
307
308 return count;
309}
310
311static ssize_t enabled_show(struct kobject *kobj,
312 struct kobj_attribute *attr, char *buf)
313{
314 return double_flag_show(kobj, attr, buf,
315 TRANSPARENT_HUGEPAGE_FLAG,
316 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
317}
318static ssize_t enabled_store(struct kobject *kobj,
319 struct kobj_attribute *attr,
320 const char *buf, size_t count)
321{
ba76149f
AA
322 ssize_t ret;
323
324 ret = double_flag_store(kobj, attr, buf, count,
325 TRANSPARENT_HUGEPAGE_FLAG,
326 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
327
328 if (ret > 0) {
911891af
XG
329 int err;
330
331 mutex_lock(&khugepaged_mutex);
79553da2 332 err = start_stop_khugepaged();
911891af
XG
333 mutex_unlock(&khugepaged_mutex);
334
ba76149f
AA
335 if (err)
336 ret = err;
337 }
338
339 return ret;
71e3aac0
AA
340}
341static struct kobj_attribute enabled_attr =
342 __ATTR(enabled, 0644, enabled_show, enabled_store);
343
344static ssize_t single_flag_show(struct kobject *kobj,
345 struct kobj_attribute *attr, char *buf,
346 enum transparent_hugepage_flag flag)
347{
e27e6151
BH
348 return sprintf(buf, "%d\n",
349 !!test_bit(flag, &transparent_hugepage_flags));
71e3aac0 350}
e27e6151 351
71e3aac0
AA
352static ssize_t single_flag_store(struct kobject *kobj,
353 struct kobj_attribute *attr,
354 const char *buf, size_t count,
355 enum transparent_hugepage_flag flag)
356{
e27e6151
BH
357 unsigned long value;
358 int ret;
359
360 ret = kstrtoul(buf, 10, &value);
361 if (ret < 0)
362 return ret;
363 if (value > 1)
364 return -EINVAL;
365
366 if (value)
71e3aac0 367 set_bit(flag, &transparent_hugepage_flags);
e27e6151 368 else
71e3aac0 369 clear_bit(flag, &transparent_hugepage_flags);
71e3aac0
AA
370
371 return count;
372}
373
374/*
375 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
376 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
377 * memory just to allocate one more hugepage.
378 */
379static ssize_t defrag_show(struct kobject *kobj,
380 struct kobj_attribute *attr, char *buf)
381{
382 return double_flag_show(kobj, attr, buf,
383 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
384 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
385}
386static ssize_t defrag_store(struct kobject *kobj,
387 struct kobj_attribute *attr,
388 const char *buf, size_t count)
389{
390 return double_flag_store(kobj, attr, buf, count,
391 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
392 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
393}
394static struct kobj_attribute defrag_attr =
395 __ATTR(defrag, 0644, defrag_show, defrag_store);
396
79da5407
KS
397static ssize_t use_zero_page_show(struct kobject *kobj,
398 struct kobj_attribute *attr, char *buf)
399{
400 return single_flag_show(kobj, attr, buf,
401 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
402}
403static ssize_t use_zero_page_store(struct kobject *kobj,
404 struct kobj_attribute *attr, const char *buf, size_t count)
405{
406 return single_flag_store(kobj, attr, buf, count,
407 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
408}
409static struct kobj_attribute use_zero_page_attr =
410 __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
71e3aac0
AA
411#ifdef CONFIG_DEBUG_VM
412static ssize_t debug_cow_show(struct kobject *kobj,
413 struct kobj_attribute *attr, char *buf)
414{
415 return single_flag_show(kobj, attr, buf,
416 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
417}
418static ssize_t debug_cow_store(struct kobject *kobj,
419 struct kobj_attribute *attr,
420 const char *buf, size_t count)
421{
422 return single_flag_store(kobj, attr, buf, count,
423 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
424}
425static struct kobj_attribute debug_cow_attr =
426 __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
427#endif /* CONFIG_DEBUG_VM */
428
429static struct attribute *hugepage_attr[] = {
430 &enabled_attr.attr,
431 &defrag_attr.attr,
79da5407 432 &use_zero_page_attr.attr,
71e3aac0
AA
433#ifdef CONFIG_DEBUG_VM
434 &debug_cow_attr.attr,
435#endif
436 NULL,
437};
438
439static struct attribute_group hugepage_attr_group = {
440 .attrs = hugepage_attr,
ba76149f
AA
441};
442
443static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
444 struct kobj_attribute *attr,
445 char *buf)
446{
447 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
448}
449
450static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
451 struct kobj_attribute *attr,
452 const char *buf, size_t count)
453{
454 unsigned long msecs;
455 int err;
456
3dbb95f7 457 err = kstrtoul(buf, 10, &msecs);
ba76149f
AA
458 if (err || msecs > UINT_MAX)
459 return -EINVAL;
460
461 khugepaged_scan_sleep_millisecs = msecs;
462 wake_up_interruptible(&khugepaged_wait);
463
464 return count;
465}
466static struct kobj_attribute scan_sleep_millisecs_attr =
467 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
468 scan_sleep_millisecs_store);
469
470static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
471 struct kobj_attribute *attr,
472 char *buf)
473{
474 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
475}
476
477static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
478 struct kobj_attribute *attr,
479 const char *buf, size_t count)
480{
481 unsigned long msecs;
482 int err;
483
3dbb95f7 484 err = kstrtoul(buf, 10, &msecs);
ba76149f
AA
485 if (err || msecs > UINT_MAX)
486 return -EINVAL;
487
488 khugepaged_alloc_sleep_millisecs = msecs;
489 wake_up_interruptible(&khugepaged_wait);
490
491 return count;
492}
493static struct kobj_attribute alloc_sleep_millisecs_attr =
494 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
495 alloc_sleep_millisecs_store);
496
497static ssize_t pages_to_scan_show(struct kobject *kobj,
498 struct kobj_attribute *attr,
499 char *buf)
500{
501 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
502}
503static ssize_t pages_to_scan_store(struct kobject *kobj,
504 struct kobj_attribute *attr,
505 const char *buf, size_t count)
506{
507 int err;
508 unsigned long pages;
509
3dbb95f7 510 err = kstrtoul(buf, 10, &pages);
ba76149f
AA
511 if (err || !pages || pages > UINT_MAX)
512 return -EINVAL;
513
514 khugepaged_pages_to_scan = pages;
515
516 return count;
517}
518static struct kobj_attribute pages_to_scan_attr =
519 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
520 pages_to_scan_store);
521
522static ssize_t pages_collapsed_show(struct kobject *kobj,
523 struct kobj_attribute *attr,
524 char *buf)
525{
526 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
527}
528static struct kobj_attribute pages_collapsed_attr =
529 __ATTR_RO(pages_collapsed);
530
531static ssize_t full_scans_show(struct kobject *kobj,
532 struct kobj_attribute *attr,
533 char *buf)
534{
535 return sprintf(buf, "%u\n", khugepaged_full_scans);
536}
537static struct kobj_attribute full_scans_attr =
538 __ATTR_RO(full_scans);
539
540static ssize_t khugepaged_defrag_show(struct kobject *kobj,
541 struct kobj_attribute *attr, char *buf)
542{
543 return single_flag_show(kobj, attr, buf,
544 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
545}
546static ssize_t khugepaged_defrag_store(struct kobject *kobj,
547 struct kobj_attribute *attr,
548 const char *buf, size_t count)
549{
550 return single_flag_store(kobj, attr, buf, count,
551 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
552}
553static struct kobj_attribute khugepaged_defrag_attr =
554 __ATTR(defrag, 0644, khugepaged_defrag_show,
555 khugepaged_defrag_store);
556
557/*
558 * max_ptes_none controls if khugepaged should collapse hugepages over
559 * any unmapped ptes in turn potentially increasing the memory
560 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
561 * reduce the available free memory in the system as it
562 * runs. Increasing max_ptes_none will instead potentially reduce the
563 * free memory in the system during the khugepaged scan.
564 */
565static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
566 struct kobj_attribute *attr,
567 char *buf)
568{
569 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
570}
571static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
572 struct kobj_attribute *attr,
573 const char *buf, size_t count)
574{
575 int err;
576 unsigned long max_ptes_none;
577
3dbb95f7 578 err = kstrtoul(buf, 10, &max_ptes_none);
ba76149f
AA
579 if (err || max_ptes_none > HPAGE_PMD_NR-1)
580 return -EINVAL;
581
582 khugepaged_max_ptes_none = max_ptes_none;
583
584 return count;
585}
586static struct kobj_attribute khugepaged_max_ptes_none_attr =
587 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
588 khugepaged_max_ptes_none_store);
589
590static struct attribute *khugepaged_attr[] = {
591 &khugepaged_defrag_attr.attr,
592 &khugepaged_max_ptes_none_attr.attr,
593 &pages_to_scan_attr.attr,
594 &pages_collapsed_attr.attr,
595 &full_scans_attr.attr,
596 &scan_sleep_millisecs_attr.attr,
597 &alloc_sleep_millisecs_attr.attr,
598 NULL,
599};
600
601static struct attribute_group khugepaged_attr_group = {
602 .attrs = khugepaged_attr,
603 .name = "khugepaged",
71e3aac0 604};
71e3aac0 605
569e5590 606static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
71e3aac0 607{
71e3aac0
AA
608 int err;
609
569e5590
SL
610 *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
611 if (unlikely(!*hugepage_kobj)) {
ae3a8c1c 612 pr_err("failed to create transparent hugepage kobject\n");
569e5590 613 return -ENOMEM;
ba76149f
AA
614 }
615
569e5590 616 err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
ba76149f 617 if (err) {
ae3a8c1c 618 pr_err("failed to register transparent hugepage group\n");
569e5590 619 goto delete_obj;
ba76149f
AA
620 }
621
569e5590 622 err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
ba76149f 623 if (err) {
ae3a8c1c 624 pr_err("failed to register transparent hugepage group\n");
569e5590 625 goto remove_hp_group;
ba76149f 626 }
569e5590
SL
627
628 return 0;
629
630remove_hp_group:
631 sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
632delete_obj:
633 kobject_put(*hugepage_kobj);
634 return err;
635}
636
637static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
638{
639 sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
640 sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
641 kobject_put(hugepage_kobj);
642}
643#else
644static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
645{
646 return 0;
647}
648
649static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
650{
651}
652#endif /* CONFIG_SYSFS */
653
654static int __init hugepage_init(void)
655{
656 int err;
657 struct kobject *hugepage_kobj;
658
659 if (!has_transparent_hugepage()) {
660 transparent_hugepage_flags = 0;
661 return -EINVAL;
662 }
663
664 err = hugepage_init_sysfs(&hugepage_kobj);
665 if (err)
65ebb64f 666 goto err_sysfs;
ba76149f
AA
667
668 err = khugepaged_slab_init();
669 if (err)
65ebb64f 670 goto err_slab;
ba76149f 671
65ebb64f
KS
672 err = register_shrinker(&huge_zero_page_shrinker);
673 if (err)
674 goto err_hzp_shrinker;
9a982250
KS
675 err = register_shrinker(&deferred_split_shrinker);
676 if (err)
677 goto err_split_shrinker;
97ae1749 678
97562cd2
RR
679 /*
680 * By default disable transparent hugepages on smaller systems,
681 * where the extra memory used could hurt more than TLB overhead
682 * is likely to save. The admin can still enable it through /sys.
683 */
79553da2 684 if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
97562cd2 685 transparent_hugepage_flags = 0;
79553da2
KS
686 return 0;
687 }
97562cd2 688
79553da2 689 err = start_stop_khugepaged();
65ebb64f
KS
690 if (err)
691 goto err_khugepaged;
ba76149f 692
569e5590 693 return 0;
65ebb64f 694err_khugepaged:
9a982250
KS
695 unregister_shrinker(&deferred_split_shrinker);
696err_split_shrinker:
65ebb64f
KS
697 unregister_shrinker(&huge_zero_page_shrinker);
698err_hzp_shrinker:
699 khugepaged_slab_exit();
700err_slab:
569e5590 701 hugepage_exit_sysfs(hugepage_kobj);
65ebb64f 702err_sysfs:
ba76149f 703 return err;
71e3aac0 704}
a64fb3cd 705subsys_initcall(hugepage_init);
71e3aac0
AA
706
707static int __init setup_transparent_hugepage(char *str)
708{
709 int ret = 0;
710 if (!str)
711 goto out;
712 if (!strcmp(str, "always")) {
713 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
714 &transparent_hugepage_flags);
715 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
716 &transparent_hugepage_flags);
717 ret = 1;
718 } else if (!strcmp(str, "madvise")) {
719 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
720 &transparent_hugepage_flags);
721 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
722 &transparent_hugepage_flags);
723 ret = 1;
724 } else if (!strcmp(str, "never")) {
725 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
726 &transparent_hugepage_flags);
727 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
728 &transparent_hugepage_flags);
729 ret = 1;
730 }
731out:
732 if (!ret)
ae3a8c1c 733 pr_warn("transparent_hugepage= cannot parse, ignored\n");
71e3aac0
AA
734 return ret;
735}
736__setup("transparent_hugepage=", setup_transparent_hugepage);
737
b32967ff 738pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
71e3aac0
AA
739{
740 if (likely(vma->vm_flags & VM_WRITE))
741 pmd = pmd_mkwrite(pmd);
742 return pmd;
743}
744
3122359a 745static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
b3092b3b
BL
746{
747 pmd_t entry;
3122359a 748 entry = mk_pmd(page, prot);
b3092b3b
BL
749 entry = pmd_mkhuge(entry);
750 return entry;
751}
752
9a982250
KS
753static inline struct list_head *page_deferred_list(struct page *page)
754{
755 /*
756 * ->lru in the tail pages is occupied by compound_head.
757 * Let's use ->mapping + ->index in the second tail page as list_head.
758 */
759 return (struct list_head *)&page[2].mapping;
760}
761
762void prep_transhuge_page(struct page *page)
763{
764 /*
765 * we use page->mapping and page->indexlru in second tail page
766 * as list_head: assuming THP order >= 2
767 */
768 BUILD_BUG_ON(HPAGE_PMD_ORDER < 2);
769
770 INIT_LIST_HEAD(page_deferred_list(page));
771 set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR);
772}
773
71e3aac0
AA
774static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
775 struct vm_area_struct *vma,
230c92a8 776 unsigned long address, pmd_t *pmd,
6b251fc9
AA
777 struct page *page, gfp_t gfp,
778 unsigned int flags)
71e3aac0 779{
00501b53 780 struct mem_cgroup *memcg;
71e3aac0 781 pgtable_t pgtable;
c4088ebd 782 spinlock_t *ptl;
230c92a8 783 unsigned long haddr = address & HPAGE_PMD_MASK;
71e3aac0 784
309381fe 785 VM_BUG_ON_PAGE(!PageCompound(page), page);
00501b53 786
f627c2f5 787 if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) {
6b251fc9
AA
788 put_page(page);
789 count_vm_event(THP_FAULT_FALLBACK);
790 return VM_FAULT_FALLBACK;
791 }
00501b53 792
71e3aac0 793 pgtable = pte_alloc_one(mm, haddr);
00501b53 794 if (unlikely(!pgtable)) {
f627c2f5 795 mem_cgroup_cancel_charge(page, memcg, true);
6b251fc9 796 put_page(page);
71e3aac0 797 return VM_FAULT_OOM;
00501b53 798 }
71e3aac0
AA
799
800 clear_huge_page(page, haddr, HPAGE_PMD_NR);
52f37629
MK
801 /*
802 * The memory barrier inside __SetPageUptodate makes sure that
803 * clear_huge_page writes become visible before the set_pmd_at()
804 * write.
805 */
71e3aac0
AA
806 __SetPageUptodate(page);
807
c4088ebd 808 ptl = pmd_lock(mm, pmd);
71e3aac0 809 if (unlikely(!pmd_none(*pmd))) {
c4088ebd 810 spin_unlock(ptl);
f627c2f5 811 mem_cgroup_cancel_charge(page, memcg, true);
71e3aac0
AA
812 put_page(page);
813 pte_free(mm, pgtable);
814 } else {
815 pmd_t entry;
6b251fc9
AA
816
817 /* Deliver the page fault to userland */
818 if (userfaultfd_missing(vma)) {
819 int ret;
820
821 spin_unlock(ptl);
f627c2f5 822 mem_cgroup_cancel_charge(page, memcg, true);
6b251fc9
AA
823 put_page(page);
824 pte_free(mm, pgtable);
230c92a8 825 ret = handle_userfault(vma, address, flags,
6b251fc9
AA
826 VM_UFFD_MISSING);
827 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
828 return ret;
829 }
830
3122359a
KS
831 entry = mk_huge_pmd(page, vma->vm_page_prot);
832 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
d281ee61 833 page_add_new_anon_rmap(page, vma, haddr, true);
f627c2f5 834 mem_cgroup_commit_charge(page, memcg, false, true);
00501b53 835 lru_cache_add_active_or_unevictable(page, vma);
6b0b50b0 836 pgtable_trans_huge_deposit(mm, pmd, pgtable);
71e3aac0 837 set_pmd_at(mm, haddr, pmd, entry);
71e3aac0 838 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
e1f56c89 839 atomic_long_inc(&mm->nr_ptes);
c4088ebd 840 spin_unlock(ptl);
6b251fc9 841 count_vm_event(THP_FAULT_ALLOC);
71e3aac0
AA
842 }
843
aa2e878e 844 return 0;
71e3aac0
AA
845}
846
cc5d462f 847static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
0bbbc0b3 848{
71baba4b 849 return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
0bbbc0b3
AA
850}
851
c4088ebd 852/* Caller must hold page table lock. */
d295e341 853static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
97ae1749 854 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
5918d10a 855 struct page *zero_page)
fc9fe822
KS
856{
857 pmd_t entry;
7c414164
AM
858 if (!pmd_none(*pmd))
859 return false;
5918d10a 860 entry = mk_pmd(zero_page, vma->vm_page_prot);
fc9fe822 861 entry = pmd_mkhuge(entry);
6b0b50b0 862 pgtable_trans_huge_deposit(mm, pmd, pgtable);
fc9fe822 863 set_pmd_at(mm, haddr, pmd, entry);
e1f56c89 864 atomic_long_inc(&mm->nr_ptes);
7c414164 865 return true;
fc9fe822
KS
866}
867
71e3aac0
AA
868int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
869 unsigned long address, pmd_t *pmd,
870 unsigned int flags)
871{
077fcf11 872 gfp_t gfp;
71e3aac0
AA
873 struct page *page;
874 unsigned long haddr = address & HPAGE_PMD_MASK;
71e3aac0 875
128ec037 876 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
c0292554 877 return VM_FAULT_FALLBACK;
128ec037
KS
878 if (unlikely(anon_vma_prepare(vma)))
879 return VM_FAULT_OOM;
6d50e60c 880 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
128ec037 881 return VM_FAULT_OOM;
593befa6 882 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
128ec037 883 transparent_hugepage_use_zero_page()) {
c4088ebd 884 spinlock_t *ptl;
128ec037
KS
885 pgtable_t pgtable;
886 struct page *zero_page;
887 bool set;
6b251fc9 888 int ret;
128ec037
KS
889 pgtable = pte_alloc_one(mm, haddr);
890 if (unlikely(!pgtable))
ba76149f 891 return VM_FAULT_OOM;
128ec037
KS
892 zero_page = get_huge_zero_page();
893 if (unlikely(!zero_page)) {
894 pte_free(mm, pgtable);
81ab4201 895 count_vm_event(THP_FAULT_FALLBACK);
c0292554 896 return VM_FAULT_FALLBACK;
b9bbfbe3 897 }
c4088ebd 898 ptl = pmd_lock(mm, pmd);
6b251fc9
AA
899 ret = 0;
900 set = false;
901 if (pmd_none(*pmd)) {
902 if (userfaultfd_missing(vma)) {
903 spin_unlock(ptl);
230c92a8 904 ret = handle_userfault(vma, address, flags,
6b251fc9
AA
905 VM_UFFD_MISSING);
906 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
907 } else {
908 set_huge_zero_page(pgtable, mm, vma,
909 haddr, pmd,
910 zero_page);
911 spin_unlock(ptl);
912 set = true;
913 }
914 } else
915 spin_unlock(ptl);
128ec037
KS
916 if (!set) {
917 pte_free(mm, pgtable);
918 put_huge_zero_page();
edad9d2c 919 }
6b251fc9 920 return ret;
71e3aac0 921 }
077fcf11
AK
922 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
923 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
128ec037
KS
924 if (unlikely(!page)) {
925 count_vm_event(THP_FAULT_FALLBACK);
c0292554 926 return VM_FAULT_FALLBACK;
128ec037 927 }
9a982250 928 prep_transhuge_page(page);
230c92a8
AA
929 return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
930 flags);
71e3aac0
AA
931}
932
ae18d6dc 933static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
5cad465d
MW
934 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
935{
936 struct mm_struct *mm = vma->vm_mm;
937 pmd_t entry;
938 spinlock_t *ptl;
939
940 ptl = pmd_lock(mm, pmd);
941 if (pmd_none(*pmd)) {
942 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
943 if (write) {
944 entry = pmd_mkyoung(pmd_mkdirty(entry));
945 entry = maybe_pmd_mkwrite(entry, vma);
946 }
947 set_pmd_at(mm, addr, pmd, entry);
948 update_mmu_cache_pmd(vma, addr, pmd);
949 }
950 spin_unlock(ptl);
5cad465d
MW
951}
952
953int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
954 pmd_t *pmd, unsigned long pfn, bool write)
955{
956 pgprot_t pgprot = vma->vm_page_prot;
957 /*
958 * If we had pmd_special, we could avoid all these restrictions,
959 * but we need to be consistent with PTEs and architectures that
960 * can't support a 'special' bit.
961 */
962 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
963 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
964 (VM_PFNMAP|VM_MIXEDMAP));
965 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
966 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
967
968 if (addr < vma->vm_start || addr >= vma->vm_end)
969 return VM_FAULT_SIGBUS;
970 if (track_pfn_insert(vma, &pgprot, pfn))
971 return VM_FAULT_SIGBUS;
ae18d6dc
MW
972 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
973 return VM_FAULT_NOPAGE;
5cad465d
MW
974}
975
71e3aac0
AA
976int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
977 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
978 struct vm_area_struct *vma)
979{
c4088ebd 980 spinlock_t *dst_ptl, *src_ptl;
71e3aac0
AA
981 struct page *src_page;
982 pmd_t pmd;
983 pgtable_t pgtable;
984 int ret;
985
986 ret = -ENOMEM;
987 pgtable = pte_alloc_one(dst_mm, addr);
988 if (unlikely(!pgtable))
989 goto out;
990
c4088ebd
KS
991 dst_ptl = pmd_lock(dst_mm, dst_pmd);
992 src_ptl = pmd_lockptr(src_mm, src_pmd);
993 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
71e3aac0
AA
994
995 ret = -EAGAIN;
996 pmd = *src_pmd;
997 if (unlikely(!pmd_trans_huge(pmd))) {
998 pte_free(dst_mm, pgtable);
999 goto out_unlock;
1000 }
fc9fe822 1001 /*
c4088ebd 1002 * When page table lock is held, the huge zero pmd should not be
fc9fe822
KS
1003 * under splitting since we don't split the page itself, only pmd to
1004 * a page table.
1005 */
1006 if (is_huge_zero_pmd(pmd)) {
5918d10a 1007 struct page *zero_page;
97ae1749
KS
1008 /*
1009 * get_huge_zero_page() will never allocate a new page here,
1010 * since we already have a zero page to copy. It just takes a
1011 * reference.
1012 */
5918d10a 1013 zero_page = get_huge_zero_page();
6b251fc9 1014 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
5918d10a 1015 zero_page);
fc9fe822
KS
1016 ret = 0;
1017 goto out_unlock;
1018 }
de466bd6 1019
71e3aac0 1020 src_page = pmd_page(pmd);
309381fe 1021 VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
71e3aac0 1022 get_page(src_page);
53f9263b 1023 page_dup_rmap(src_page, true);
71e3aac0
AA
1024 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
1025
1026 pmdp_set_wrprotect(src_mm, addr, src_pmd);
1027 pmd = pmd_mkold(pmd_wrprotect(pmd));
6b0b50b0 1028 pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
71e3aac0 1029 set_pmd_at(dst_mm, addr, dst_pmd, pmd);
e1f56c89 1030 atomic_long_inc(&dst_mm->nr_ptes);
71e3aac0
AA
1031
1032 ret = 0;
1033out_unlock:
c4088ebd
KS
1034 spin_unlock(src_ptl);
1035 spin_unlock(dst_ptl);
71e3aac0
AA
1036out:
1037 return ret;
1038}
1039
a1dd450b
WD
1040void huge_pmd_set_accessed(struct mm_struct *mm,
1041 struct vm_area_struct *vma,
1042 unsigned long address,
1043 pmd_t *pmd, pmd_t orig_pmd,
1044 int dirty)
1045{
c4088ebd 1046 spinlock_t *ptl;
a1dd450b
WD
1047 pmd_t entry;
1048 unsigned long haddr;
1049
c4088ebd 1050 ptl = pmd_lock(mm, pmd);
a1dd450b
WD
1051 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1052 goto unlock;
1053
1054 entry = pmd_mkyoung(orig_pmd);
1055 haddr = address & HPAGE_PMD_MASK;
1056 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1057 update_mmu_cache_pmd(vma, address, pmd);
1058
1059unlock:
c4088ebd 1060 spin_unlock(ptl);
a1dd450b
WD
1061}
1062
71e3aac0
AA
1063static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1064 struct vm_area_struct *vma,
1065 unsigned long address,
1066 pmd_t *pmd, pmd_t orig_pmd,
1067 struct page *page,
1068 unsigned long haddr)
1069{
00501b53 1070 struct mem_cgroup *memcg;
c4088ebd 1071 spinlock_t *ptl;
71e3aac0
AA
1072 pgtable_t pgtable;
1073 pmd_t _pmd;
1074 int ret = 0, i;
1075 struct page **pages;
2ec74c3e
SG
1076 unsigned long mmun_start; /* For mmu_notifiers */
1077 unsigned long mmun_end; /* For mmu_notifiers */
71e3aac0
AA
1078
1079 pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1080 GFP_KERNEL);
1081 if (unlikely(!pages)) {
1082 ret |= VM_FAULT_OOM;
1083 goto out;
1084 }
1085
1086 for (i = 0; i < HPAGE_PMD_NR; i++) {
cc5d462f
AK
1087 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1088 __GFP_OTHER_NODE,
19ee151e 1089 vma, address, page_to_nid(page));
b9bbfbe3 1090 if (unlikely(!pages[i] ||
00501b53 1091 mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
f627c2f5 1092 &memcg, false))) {
b9bbfbe3 1093 if (pages[i])
71e3aac0 1094 put_page(pages[i]);
b9bbfbe3 1095 while (--i >= 0) {
00501b53
JW
1096 memcg = (void *)page_private(pages[i]);
1097 set_page_private(pages[i], 0);
f627c2f5
KS
1098 mem_cgroup_cancel_charge(pages[i], memcg,
1099 false);
b9bbfbe3
AA
1100 put_page(pages[i]);
1101 }
71e3aac0
AA
1102 kfree(pages);
1103 ret |= VM_FAULT_OOM;
1104 goto out;
1105 }
00501b53 1106 set_page_private(pages[i], (unsigned long)memcg);
71e3aac0
AA
1107 }
1108
1109 for (i = 0; i < HPAGE_PMD_NR; i++) {
1110 copy_user_highpage(pages[i], page + i,
0089e485 1111 haddr + PAGE_SIZE * i, vma);
71e3aac0
AA
1112 __SetPageUptodate(pages[i]);
1113 cond_resched();
1114 }
1115
2ec74c3e
SG
1116 mmun_start = haddr;
1117 mmun_end = haddr + HPAGE_PMD_SIZE;
1118 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1119
c4088ebd 1120 ptl = pmd_lock(mm, pmd);
71e3aac0
AA
1121 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1122 goto out_free_pages;
309381fe 1123 VM_BUG_ON_PAGE(!PageHead(page), page);
71e3aac0 1124
8809aa2d 1125 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
71e3aac0
AA
1126 /* leave pmd empty until pte is filled */
1127
6b0b50b0 1128 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
71e3aac0
AA
1129 pmd_populate(mm, &_pmd, pgtable);
1130
1131 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1132 pte_t *pte, entry;
1133 entry = mk_pte(pages[i], vma->vm_page_prot);
1134 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
00501b53
JW
1135 memcg = (void *)page_private(pages[i]);
1136 set_page_private(pages[i], 0);
d281ee61 1137 page_add_new_anon_rmap(pages[i], vma, haddr, false);
f627c2f5 1138 mem_cgroup_commit_charge(pages[i], memcg, false, false);
00501b53 1139 lru_cache_add_active_or_unevictable(pages[i], vma);
71e3aac0
AA
1140 pte = pte_offset_map(&_pmd, haddr);
1141 VM_BUG_ON(!pte_none(*pte));
1142 set_pte_at(mm, haddr, pte, entry);
1143 pte_unmap(pte);
1144 }
1145 kfree(pages);
1146
71e3aac0
AA
1147 smp_wmb(); /* make pte visible before pmd */
1148 pmd_populate(mm, pmd, pgtable);
d281ee61 1149 page_remove_rmap(page, true);
c4088ebd 1150 spin_unlock(ptl);
71e3aac0 1151
2ec74c3e
SG
1152 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1153
71e3aac0
AA
1154 ret |= VM_FAULT_WRITE;
1155 put_page(page);
1156
1157out:
1158 return ret;
1159
1160out_free_pages:
c4088ebd 1161 spin_unlock(ptl);
2ec74c3e 1162 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b9bbfbe3 1163 for (i = 0; i < HPAGE_PMD_NR; i++) {
00501b53
JW
1164 memcg = (void *)page_private(pages[i]);
1165 set_page_private(pages[i], 0);
f627c2f5 1166 mem_cgroup_cancel_charge(pages[i], memcg, false);
71e3aac0 1167 put_page(pages[i]);
b9bbfbe3 1168 }
71e3aac0
AA
1169 kfree(pages);
1170 goto out;
1171}
1172
1173int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1174 unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1175{
c4088ebd 1176 spinlock_t *ptl;
71e3aac0 1177 int ret = 0;
93b4796d 1178 struct page *page = NULL, *new_page;
00501b53 1179 struct mem_cgroup *memcg;
71e3aac0 1180 unsigned long haddr;
2ec74c3e
SG
1181 unsigned long mmun_start; /* For mmu_notifiers */
1182 unsigned long mmun_end; /* For mmu_notifiers */
3b363692 1183 gfp_t huge_gfp; /* for allocation and charge */
71e3aac0 1184
c4088ebd 1185 ptl = pmd_lockptr(mm, pmd);
81d1b09c 1186 VM_BUG_ON_VMA(!vma->anon_vma, vma);
93b4796d
KS
1187 haddr = address & HPAGE_PMD_MASK;
1188 if (is_huge_zero_pmd(orig_pmd))
1189 goto alloc;
c4088ebd 1190 spin_lock(ptl);
71e3aac0
AA
1191 if (unlikely(!pmd_same(*pmd, orig_pmd)))
1192 goto out_unlock;
1193
1194 page = pmd_page(orig_pmd);
309381fe 1195 VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1f25fe20
KS
1196 /*
1197 * We can only reuse the page if nobody else maps the huge page or it's
1198 * part. We can do it by checking page_mapcount() on each sub-page, but
1199 * it's expensive.
1200 * The cheaper way is to check page_count() to be equal 1: every
1201 * mapcount takes page reference reference, so this way we can
1202 * guarantee, that the PMD is the only mapping.
1203 * This can give false negative if somebody pinned the page, but that's
1204 * fine.
1205 */
1206 if (page_mapcount(page) == 1 && page_count(page) == 1) {
71e3aac0
AA
1207 pmd_t entry;
1208 entry = pmd_mkyoung(orig_pmd);
1209 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1210 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
b113da65 1211 update_mmu_cache_pmd(vma, address, pmd);
71e3aac0
AA
1212 ret |= VM_FAULT_WRITE;
1213 goto out_unlock;
1214 }
ddc58f27 1215 get_page(page);
c4088ebd 1216 spin_unlock(ptl);
93b4796d 1217alloc:
71e3aac0 1218 if (transparent_hugepage_enabled(vma) &&
077fcf11 1219 !transparent_hugepage_debug_cow()) {
3b363692
MH
1220 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1221 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
077fcf11 1222 } else
71e3aac0
AA
1223 new_page = NULL;
1224
9a982250
KS
1225 if (likely(new_page)) {
1226 prep_transhuge_page(new_page);
1227 } else {
eecc1e42 1228 if (!page) {
78ddc534 1229 split_huge_pmd(vma, pmd, address);
e9b71ca9 1230 ret |= VM_FAULT_FALLBACK;
93b4796d
KS
1231 } else {
1232 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1233 pmd, orig_pmd, page, haddr);
9845cbbd 1234 if (ret & VM_FAULT_OOM) {
78ddc534 1235 split_huge_pmd(vma, pmd, address);
9845cbbd
KS
1236 ret |= VM_FAULT_FALLBACK;
1237 }
ddc58f27 1238 put_page(page);
93b4796d 1239 }
17766dde 1240 count_vm_event(THP_FAULT_FALLBACK);
71e3aac0
AA
1241 goto out;
1242 }
1243
f627c2f5
KS
1244 if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg,
1245 true))) {
b9bbfbe3 1246 put_page(new_page);
93b4796d 1247 if (page) {
78ddc534 1248 split_huge_pmd(vma, pmd, address);
ddc58f27 1249 put_page(page);
9845cbbd 1250 } else
78ddc534 1251 split_huge_pmd(vma, pmd, address);
9845cbbd 1252 ret |= VM_FAULT_FALLBACK;
17766dde 1253 count_vm_event(THP_FAULT_FALLBACK);
b9bbfbe3
AA
1254 goto out;
1255 }
1256
17766dde
DR
1257 count_vm_event(THP_FAULT_ALLOC);
1258
eecc1e42 1259 if (!page)
93b4796d
KS
1260 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1261 else
1262 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
71e3aac0
AA
1263 __SetPageUptodate(new_page);
1264
2ec74c3e
SG
1265 mmun_start = haddr;
1266 mmun_end = haddr + HPAGE_PMD_SIZE;
1267 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1268
c4088ebd 1269 spin_lock(ptl);
93b4796d 1270 if (page)
ddc58f27 1271 put_page(page);
b9bbfbe3 1272 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
c4088ebd 1273 spin_unlock(ptl);
f627c2f5 1274 mem_cgroup_cancel_charge(new_page, memcg, true);
71e3aac0 1275 put_page(new_page);
2ec74c3e 1276 goto out_mn;
b9bbfbe3 1277 } else {
71e3aac0 1278 pmd_t entry;
3122359a
KS
1279 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1280 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
8809aa2d 1281 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
d281ee61 1282 page_add_new_anon_rmap(new_page, vma, haddr, true);
f627c2f5 1283 mem_cgroup_commit_charge(new_page, memcg, false, true);
00501b53 1284 lru_cache_add_active_or_unevictable(new_page, vma);
71e3aac0 1285 set_pmd_at(mm, haddr, pmd, entry);
b113da65 1286 update_mmu_cache_pmd(vma, address, pmd);
eecc1e42 1287 if (!page) {
93b4796d 1288 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
97ae1749
KS
1289 put_huge_zero_page();
1290 } else {
309381fe 1291 VM_BUG_ON_PAGE(!PageHead(page), page);
d281ee61 1292 page_remove_rmap(page, true);
93b4796d
KS
1293 put_page(page);
1294 }
71e3aac0
AA
1295 ret |= VM_FAULT_WRITE;
1296 }
c4088ebd 1297 spin_unlock(ptl);
2ec74c3e
SG
1298out_mn:
1299 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
71e3aac0
AA
1300out:
1301 return ret;
2ec74c3e 1302out_unlock:
c4088ebd 1303 spin_unlock(ptl);
2ec74c3e 1304 return ret;
71e3aac0
AA
1305}
1306
b676b293 1307struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
71e3aac0
AA
1308 unsigned long addr,
1309 pmd_t *pmd,
1310 unsigned int flags)
1311{
b676b293 1312 struct mm_struct *mm = vma->vm_mm;
71e3aac0
AA
1313 struct page *page = NULL;
1314
c4088ebd 1315 assert_spin_locked(pmd_lockptr(mm, pmd));
71e3aac0
AA
1316
1317 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1318 goto out;
1319
85facf25
KS
1320 /* Avoid dumping huge zero page */
1321 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1322 return ERR_PTR(-EFAULT);
1323
2b4847e7 1324 /* Full NUMA hinting faults to serialise migration in fault paths */
8a0516ed 1325 if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
2b4847e7
MG
1326 goto out;
1327
71e3aac0 1328 page = pmd_page(*pmd);
309381fe 1329 VM_BUG_ON_PAGE(!PageHead(page), page);
71e3aac0
AA
1330 if (flags & FOLL_TOUCH) {
1331 pmd_t _pmd;
1332 /*
1333 * We should set the dirty bit only for FOLL_WRITE but
1334 * for now the dirty bit in the pmd is meaningless.
1335 * And if the dirty bit will become meaningful and
1336 * we'll only set it with FOLL_WRITE, an atomic
1337 * set_bit will be required on the pmd to set the
1338 * young bit, instead of the current set_pmd_at.
1339 */
1340 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
8663890a
AK
1341 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1342 pmd, _pmd, 1))
1343 update_mmu_cache_pmd(vma, addr, pmd);
71e3aac0 1344 }
de60f5f1 1345 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
e90309c9
KS
1346 /*
1347 * We don't mlock() pte-mapped THPs. This way we can avoid
1348 * leaking mlocked pages into non-VM_LOCKED VMAs.
1349 *
1350 * In most cases the pmd is the only mapping of the page as we
1351 * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for
1352 * writable private mappings in populate_vma_page_range().
1353 *
1354 * The only scenario when we have the page shared here is if we
1355 * mlocking read-only mapping shared over fork(). We skip
1356 * mlocking such pages.
1357 */
1358 if (compound_mapcount(page) == 1 && !PageDoubleMap(page) &&
1359 page->mapping && trylock_page(page)) {
b676b293
DR
1360 lru_add_drain();
1361 if (page->mapping)
1362 mlock_vma_page(page);
1363 unlock_page(page);
1364 }
1365 }
71e3aac0 1366 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
309381fe 1367 VM_BUG_ON_PAGE(!PageCompound(page), page);
71e3aac0 1368 if (flags & FOLL_GET)
ddc58f27 1369 get_page(page);
71e3aac0
AA
1370
1371out:
1372 return page;
1373}
1374
d10e63f2 1375/* NUMA hinting page fault entry point for trans huge pmds */
4daae3b4
MG
1376int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1377 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
d10e63f2 1378{
c4088ebd 1379 spinlock_t *ptl;
b8916634 1380 struct anon_vma *anon_vma = NULL;
b32967ff 1381 struct page *page;
d10e63f2 1382 unsigned long haddr = addr & HPAGE_PMD_MASK;
8191acbd 1383 int page_nid = -1, this_nid = numa_node_id();
90572890 1384 int target_nid, last_cpupid = -1;
8191acbd
MG
1385 bool page_locked;
1386 bool migrated = false;
b191f9b1 1387 bool was_writable;
6688cc05 1388 int flags = 0;
d10e63f2 1389
c0e7cad9
MG
1390 /* A PROT_NONE fault should not end up here */
1391 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1392
c4088ebd 1393 ptl = pmd_lock(mm, pmdp);
d10e63f2
MG
1394 if (unlikely(!pmd_same(pmd, *pmdp)))
1395 goto out_unlock;
1396
de466bd6
MG
1397 /*
1398 * If there are potential migrations, wait for completion and retry
1399 * without disrupting NUMA hinting information. Do not relock and
1400 * check_same as the page may no longer be mapped.
1401 */
1402 if (unlikely(pmd_trans_migrating(*pmdp))) {
5d833062 1403 page = pmd_page(*pmdp);
de466bd6 1404 spin_unlock(ptl);
5d833062 1405 wait_on_page_locked(page);
de466bd6
MG
1406 goto out;
1407 }
1408
d10e63f2 1409 page = pmd_page(pmd);
a1a46184 1410 BUG_ON(is_huge_zero_page(page));
8191acbd 1411 page_nid = page_to_nid(page);
90572890 1412 last_cpupid = page_cpupid_last(page);
03c5a6e1 1413 count_vm_numa_event(NUMA_HINT_FAULTS);
04bb2f94 1414 if (page_nid == this_nid) {
03c5a6e1 1415 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
04bb2f94
RR
1416 flags |= TNF_FAULT_LOCAL;
1417 }
4daae3b4 1418
bea66fbd
MG
1419 /* See similar comment in do_numa_page for explanation */
1420 if (!(vma->vm_flags & VM_WRITE))
6688cc05
PZ
1421 flags |= TNF_NO_GROUP;
1422
ff9042b1
MG
1423 /*
1424 * Acquire the page lock to serialise THP migrations but avoid dropping
1425 * page_table_lock if at all possible
1426 */
b8916634
MG
1427 page_locked = trylock_page(page);
1428 target_nid = mpol_misplaced(page, vma, haddr);
1429 if (target_nid == -1) {
1430 /* If the page was locked, there are no parallel migrations */
a54a407f 1431 if (page_locked)
b8916634 1432 goto clear_pmdnuma;
2b4847e7 1433 }
4daae3b4 1434
de466bd6 1435 /* Migration could have started since the pmd_trans_migrating check */
2b4847e7 1436 if (!page_locked) {
c4088ebd 1437 spin_unlock(ptl);
b8916634 1438 wait_on_page_locked(page);
a54a407f 1439 page_nid = -1;
b8916634
MG
1440 goto out;
1441 }
1442
2b4847e7
MG
1443 /*
1444 * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1445 * to serialises splits
1446 */
b8916634 1447 get_page(page);
c4088ebd 1448 spin_unlock(ptl);
b8916634 1449 anon_vma = page_lock_anon_vma_read(page);
4daae3b4 1450
c69307d5 1451 /* Confirm the PMD did not change while page_table_lock was released */
c4088ebd 1452 spin_lock(ptl);
b32967ff
MG
1453 if (unlikely(!pmd_same(pmd, *pmdp))) {
1454 unlock_page(page);
1455 put_page(page);
a54a407f 1456 page_nid = -1;
4daae3b4 1457 goto out_unlock;
b32967ff 1458 }
ff9042b1 1459
c3a489ca
MG
1460 /* Bail if we fail to protect against THP splits for any reason */
1461 if (unlikely(!anon_vma)) {
1462 put_page(page);
1463 page_nid = -1;
1464 goto clear_pmdnuma;
1465 }
1466
a54a407f
MG
1467 /*
1468 * Migrate the THP to the requested node, returns with page unlocked
8a0516ed 1469 * and access rights restored.
a54a407f 1470 */
c4088ebd 1471 spin_unlock(ptl);
b32967ff 1472 migrated = migrate_misplaced_transhuge_page(mm, vma,
340ef390 1473 pmdp, pmd, addr, page, target_nid);
6688cc05
PZ
1474 if (migrated) {
1475 flags |= TNF_MIGRATED;
8191acbd 1476 page_nid = target_nid;
074c2381
MG
1477 } else
1478 flags |= TNF_MIGRATE_FAIL;
b32967ff 1479
8191acbd 1480 goto out;
b32967ff 1481clear_pmdnuma:
a54a407f 1482 BUG_ON(!PageLocked(page));
b191f9b1 1483 was_writable = pmd_write(pmd);
4d942466 1484 pmd = pmd_modify(pmd, vma->vm_page_prot);
b7b04004 1485 pmd = pmd_mkyoung(pmd);
b191f9b1
MG
1486 if (was_writable)
1487 pmd = pmd_mkwrite(pmd);
d10e63f2 1488 set_pmd_at(mm, haddr, pmdp, pmd);
d10e63f2 1489 update_mmu_cache_pmd(vma, addr, pmdp);
a54a407f 1490 unlock_page(page);
d10e63f2 1491out_unlock:
c4088ebd 1492 spin_unlock(ptl);
b8916634
MG
1493
1494out:
1495 if (anon_vma)
1496 page_unlock_anon_vma_read(anon_vma);
1497
8191acbd 1498 if (page_nid != -1)
6688cc05 1499 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
8191acbd 1500
d10e63f2
MG
1501 return 0;
1502}
1503
71e3aac0 1504int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
f21760b1 1505 pmd_t *pmd, unsigned long addr)
71e3aac0 1506{
da146769 1507 pmd_t orig_pmd;
bf929152 1508 spinlock_t *ptl;
71e3aac0 1509
4b471e88 1510 if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
da146769
KS
1511 return 0;
1512 /*
1513 * For architectures like ppc64 we look at deposited pgtable
1514 * when calling pmdp_huge_get_and_clear. So do the
1515 * pgtable_trans_huge_withdraw after finishing pmdp related
1516 * operations.
1517 */
1518 orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1519 tlb->fullmm);
1520 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1521 if (vma_is_dax(vma)) {
1522 spin_unlock(ptl);
1523 if (is_huge_zero_pmd(orig_pmd))
97ae1749 1524 put_huge_zero_page();
da146769
KS
1525 } else if (is_huge_zero_pmd(orig_pmd)) {
1526 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1527 atomic_long_dec(&tlb->mm->nr_ptes);
1528 spin_unlock(ptl);
1529 put_huge_zero_page();
1530 } else {
1531 struct page *page = pmd_page(orig_pmd);
d281ee61 1532 page_remove_rmap(page, true);
da146769
KS
1533 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1534 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1535 VM_BUG_ON_PAGE(!PageHead(page), page);
1536 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1537 atomic_long_dec(&tlb->mm->nr_ptes);
1538 spin_unlock(ptl);
1539 tlb_remove_page(tlb, page);
025c5b24 1540 }
da146769 1541 return 1;
71e3aac0
AA
1542}
1543
4b471e88 1544bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
37a1c49a
AA
1545 unsigned long old_addr,
1546 unsigned long new_addr, unsigned long old_end,
1547 pmd_t *old_pmd, pmd_t *new_pmd)
1548{
bf929152 1549 spinlock_t *old_ptl, *new_ptl;
37a1c49a
AA
1550 pmd_t pmd;
1551
1552 struct mm_struct *mm = vma->vm_mm;
1553
1554 if ((old_addr & ~HPAGE_PMD_MASK) ||
1555 (new_addr & ~HPAGE_PMD_MASK) ||
1556 old_end - old_addr < HPAGE_PMD_SIZE ||
1557 (new_vma->vm_flags & VM_NOHUGEPAGE))
4b471e88 1558 return false;
37a1c49a
AA
1559
1560 /*
1561 * The destination pmd shouldn't be established, free_pgtables()
1562 * should have release it.
1563 */
1564 if (WARN_ON(!pmd_none(*new_pmd))) {
1565 VM_BUG_ON(pmd_trans_huge(*new_pmd));
4b471e88 1566 return false;
37a1c49a
AA
1567 }
1568
bf929152
KS
1569 /*
1570 * We don't have to worry about the ordering of src and dst
1571 * ptlocks because exclusive mmap_sem prevents deadlock.
1572 */
4b471e88 1573 if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
bf929152
KS
1574 new_ptl = pmd_lockptr(mm, new_pmd);
1575 if (new_ptl != old_ptl)
1576 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
8809aa2d 1577 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
025c5b24 1578 VM_BUG_ON(!pmd_none(*new_pmd));
3592806c 1579
b3084f4d
AK
1580 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1581 pgtable_t pgtable;
3592806c
KS
1582 pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1583 pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
3592806c 1584 }
b3084f4d
AK
1585 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1586 if (new_ptl != old_ptl)
1587 spin_unlock(new_ptl);
bf929152 1588 spin_unlock(old_ptl);
4b471e88 1589 return true;
37a1c49a 1590 }
4b471e88 1591 return false;
37a1c49a
AA
1592}
1593
f123d74a
MG
1594/*
1595 * Returns
1596 * - 0 if PMD could not be locked
1597 * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1598 * - HPAGE_PMD_NR is protections changed and TLB flush necessary
1599 */
cd7548ab 1600int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
e944fd67 1601 unsigned long addr, pgprot_t newprot, int prot_numa)
cd7548ab
JW
1602{
1603 struct mm_struct *mm = vma->vm_mm;
bf929152 1604 spinlock_t *ptl;
cd7548ab
JW
1605 int ret = 0;
1606
4b471e88 1607 if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
025c5b24 1608 pmd_t entry;
b191f9b1 1609 bool preserve_write = prot_numa && pmd_write(*pmd);
ba68bc01 1610 ret = 1;
e944fd67
MG
1611
1612 /*
1613 * Avoid trapping faults against the zero page. The read-only
1614 * data is likely to be read-cached on the local CPU and
1615 * local/remote hits to the zero page are not interesting.
1616 */
1617 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1618 spin_unlock(ptl);
ba68bc01 1619 return ret;
e944fd67
MG
1620 }
1621
10c1045f 1622 if (!prot_numa || !pmd_protnone(*pmd)) {
8809aa2d 1623 entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
10c1045f 1624 entry = pmd_modify(entry, newprot);
b191f9b1
MG
1625 if (preserve_write)
1626 entry = pmd_mkwrite(entry);
10c1045f
MG
1627 ret = HPAGE_PMD_NR;
1628 set_pmd_at(mm, addr, pmd, entry);
b191f9b1 1629 BUG_ON(!preserve_write && pmd_write(entry));
10c1045f 1630 }
bf929152 1631 spin_unlock(ptl);
025c5b24
NH
1632 }
1633
1634 return ret;
1635}
1636
1637/*
4b471e88 1638 * Returns true if a given pmd maps a thp, false otherwise.
025c5b24 1639 *
4b471e88
KS
1640 * Note that if it returns true, this routine returns without unlocking page
1641 * table lock. So callers must unlock it.
025c5b24 1642 */
4b471e88 1643bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
bf929152 1644 spinlock_t **ptl)
025c5b24 1645{
bf929152 1646 *ptl = pmd_lock(vma->vm_mm, pmd);
4b471e88
KS
1647 if (likely(pmd_trans_huge(*pmd)))
1648 return true;
bf929152 1649 spin_unlock(*ptl);
4b471e88 1650 return false;
cd7548ab
JW
1651}
1652
9050d7eb 1653#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
78f11a25 1654
60ab3244
AA
1655int hugepage_madvise(struct vm_area_struct *vma,
1656 unsigned long *vm_flags, int advice)
0af4e98b 1657{
a664b2d8
AA
1658 switch (advice) {
1659 case MADV_HUGEPAGE:
1e1836e8
AT
1660#ifdef CONFIG_S390
1661 /*
1662 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
1663 * can't handle this properly after s390_enable_sie, so we simply
1664 * ignore the madvise to prevent qemu from causing a SIGSEGV.
1665 */
1666 if (mm_has_pgste(vma->vm_mm))
1667 return 0;
1668#endif
a664b2d8
AA
1669 /*
1670 * Be somewhat over-protective like KSM for now!
1671 */
1a763615 1672 if (*vm_flags & VM_NO_THP)
a664b2d8
AA
1673 return -EINVAL;
1674 *vm_flags &= ~VM_NOHUGEPAGE;
1675 *vm_flags |= VM_HUGEPAGE;
60ab3244
AA
1676 /*
1677 * If the vma become good for khugepaged to scan,
1678 * register it here without waiting a page fault that
1679 * may not happen any time soon.
1680 */
6d50e60c 1681 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
60ab3244 1682 return -ENOMEM;
a664b2d8
AA
1683 break;
1684 case MADV_NOHUGEPAGE:
1685 /*
1686 * Be somewhat over-protective like KSM for now!
1687 */
1a763615 1688 if (*vm_flags & VM_NO_THP)
a664b2d8
AA
1689 return -EINVAL;
1690 *vm_flags &= ~VM_HUGEPAGE;
1691 *vm_flags |= VM_NOHUGEPAGE;
60ab3244
AA
1692 /*
1693 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1694 * this vma even if we leave the mm registered in khugepaged if
1695 * it got registered before VM_NOHUGEPAGE was set.
1696 */
a664b2d8
AA
1697 break;
1698 }
0af4e98b
AA
1699
1700 return 0;
1701}
1702
ba76149f
AA
1703static int __init khugepaged_slab_init(void)
1704{
1705 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1706 sizeof(struct mm_slot),
1707 __alignof__(struct mm_slot), 0, NULL);
1708 if (!mm_slot_cache)
1709 return -ENOMEM;
1710
1711 return 0;
1712}
1713
65ebb64f
KS
1714static void __init khugepaged_slab_exit(void)
1715{
1716 kmem_cache_destroy(mm_slot_cache);
1717}
1718
ba76149f
AA
1719static inline struct mm_slot *alloc_mm_slot(void)
1720{
1721 if (!mm_slot_cache) /* initialization failed */
1722 return NULL;
1723 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1724}
1725
1726static inline void free_mm_slot(struct mm_slot *mm_slot)
1727{
1728 kmem_cache_free(mm_slot_cache, mm_slot);
1729}
1730
ba76149f
AA
1731static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1732{
1733 struct mm_slot *mm_slot;
ba76149f 1734
b67bfe0d 1735 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
ba76149f
AA
1736 if (mm == mm_slot->mm)
1737 return mm_slot;
43b5fbbd 1738
ba76149f
AA
1739 return NULL;
1740}
1741
1742static void insert_to_mm_slots_hash(struct mm_struct *mm,
1743 struct mm_slot *mm_slot)
1744{
ba76149f 1745 mm_slot->mm = mm;
43b5fbbd 1746 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
ba76149f
AA
1747}
1748
1749static inline int khugepaged_test_exit(struct mm_struct *mm)
1750{
1751 return atomic_read(&mm->mm_users) == 0;
1752}
1753
1754int __khugepaged_enter(struct mm_struct *mm)
1755{
1756 struct mm_slot *mm_slot;
1757 int wakeup;
1758
1759 mm_slot = alloc_mm_slot();
1760 if (!mm_slot)
1761 return -ENOMEM;
1762
1763 /* __khugepaged_exit() must not run from under us */
96dad67f 1764 VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
ba76149f
AA
1765 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1766 free_mm_slot(mm_slot);
1767 return 0;
1768 }
1769
1770 spin_lock(&khugepaged_mm_lock);
1771 insert_to_mm_slots_hash(mm, mm_slot);
1772 /*
1773 * Insert just behind the scanning cursor, to let the area settle
1774 * down a little.
1775 */
1776 wakeup = list_empty(&khugepaged_scan.mm_head);
1777 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1778 spin_unlock(&khugepaged_mm_lock);
1779
1780 atomic_inc(&mm->mm_count);
1781 if (wakeup)
1782 wake_up_interruptible(&khugepaged_wait);
1783
1784 return 0;
1785}
1786
6d50e60c
DR
1787int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
1788 unsigned long vm_flags)
ba76149f
AA
1789{
1790 unsigned long hstart, hend;
1791 if (!vma->anon_vma)
1792 /*
1793 * Not yet faulted in so we will register later in the
1794 * page fault if needed.
1795 */
1796 return 0;
78f11a25 1797 if (vma->vm_ops)
ba76149f
AA
1798 /* khugepaged not yet working on file or special mappings */
1799 return 0;
6d50e60c 1800 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
ba76149f
AA
1801 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1802 hend = vma->vm_end & HPAGE_PMD_MASK;
1803 if (hstart < hend)
6d50e60c 1804 return khugepaged_enter(vma, vm_flags);
ba76149f
AA
1805 return 0;
1806}
1807
1808void __khugepaged_exit(struct mm_struct *mm)
1809{
1810 struct mm_slot *mm_slot;
1811 int free = 0;
1812
1813 spin_lock(&khugepaged_mm_lock);
1814 mm_slot = get_mm_slot(mm);
1815 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
43b5fbbd 1816 hash_del(&mm_slot->hash);
ba76149f
AA
1817 list_del(&mm_slot->mm_node);
1818 free = 1;
1819 }
d788e80a 1820 spin_unlock(&khugepaged_mm_lock);
ba76149f
AA
1821
1822 if (free) {
ba76149f
AA
1823 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1824 free_mm_slot(mm_slot);
1825 mmdrop(mm);
1826 } else if (mm_slot) {
ba76149f
AA
1827 /*
1828 * This is required to serialize against
1829 * khugepaged_test_exit() (which is guaranteed to run
1830 * under mmap sem read mode). Stop here (after we
1831 * return all pagetables will be destroyed) until
1832 * khugepaged has finished working on the pagetables
1833 * under the mmap_sem.
1834 */
1835 down_write(&mm->mmap_sem);
1836 up_write(&mm->mmap_sem);
d788e80a 1837 }
ba76149f
AA
1838}
1839
1840static void release_pte_page(struct page *page)
1841{
1842 /* 0 stands for page_is_file_cache(page) == false */
1843 dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1844 unlock_page(page);
1845 putback_lru_page(page);
1846}
1847
1848static void release_pte_pages(pte_t *pte, pte_t *_pte)
1849{
1850 while (--_pte >= pte) {
1851 pte_t pteval = *_pte;
ca0984ca 1852 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
ba76149f
AA
1853 release_pte_page(pte_page(pteval));
1854 }
1855}
1856
ba76149f
AA
1857static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1858 unsigned long address,
1859 pte_t *pte)
1860{
7d2eba05 1861 struct page *page = NULL;
ba76149f 1862 pte_t *_pte;
7d2eba05 1863 int none_or_zero = 0, result = 0;
10359213 1864 bool referenced = false, writable = false;
7d2eba05 1865
ba76149f
AA
1866 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1867 _pte++, address += PAGE_SIZE) {
1868 pte_t pteval = *_pte;
47aee4d8
MK
1869 if (pte_none(pteval) || (pte_present(pteval) &&
1870 is_zero_pfn(pte_pfn(pteval)))) {
c1294d05 1871 if (!userfaultfd_armed(vma) &&
7d2eba05 1872 ++none_or_zero <= khugepaged_max_ptes_none) {
ba76149f 1873 continue;
7d2eba05
EA
1874 } else {
1875 result = SCAN_EXCEED_NONE_PTE;
ba76149f 1876 goto out;
7d2eba05 1877 }
ba76149f 1878 }
7d2eba05
EA
1879 if (!pte_present(pteval)) {
1880 result = SCAN_PTE_NON_PRESENT;
ba76149f 1881 goto out;
7d2eba05 1882 }
ba76149f 1883 page = vm_normal_page(vma, address, pteval);
7d2eba05
EA
1884 if (unlikely(!page)) {
1885 result = SCAN_PAGE_NULL;
ba76149f 1886 goto out;
7d2eba05 1887 }
344aa35c 1888
309381fe
SL
1889 VM_BUG_ON_PAGE(PageCompound(page), page);
1890 VM_BUG_ON_PAGE(!PageAnon(page), page);
1891 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
ba76149f 1892
ba76149f
AA
1893 /*
1894 * We can do it before isolate_lru_page because the
1895 * page can't be freed from under us. NOTE: PG_lock
1896 * is needed to serialize against split_huge_page
1897 * when invoked from the VM.
1898 */
7d2eba05
EA
1899 if (!trylock_page(page)) {
1900 result = SCAN_PAGE_LOCK;
ba76149f 1901 goto out;
7d2eba05 1902 }
10359213
EA
1903
1904 /*
1905 * cannot use mapcount: can't collapse if there's a gup pin.
1906 * The page must only be referenced by the scanned process
1907 * and page swap cache.
1908 */
1909 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1910 unlock_page(page);
7d2eba05 1911 result = SCAN_PAGE_COUNT;
10359213
EA
1912 goto out;
1913 }
1914 if (pte_write(pteval)) {
1915 writable = true;
1916 } else {
1917 if (PageSwapCache(page) && !reuse_swap_page(page)) {
1918 unlock_page(page);
7d2eba05 1919 result = SCAN_SWAP_CACHE_PAGE;
10359213
EA
1920 goto out;
1921 }
1922 /*
1923 * Page is not in the swap cache. It can be collapsed
1924 * into a THP.
1925 */
1926 }
1927
ba76149f
AA
1928 /*
1929 * Isolate the page to avoid collapsing an hugepage
1930 * currently in use by the VM.
1931 */
1932 if (isolate_lru_page(page)) {
1933 unlock_page(page);
7d2eba05 1934 result = SCAN_DEL_PAGE_LRU;
ba76149f
AA
1935 goto out;
1936 }
1937 /* 0 stands for page_is_file_cache(page) == false */
1938 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
309381fe
SL
1939 VM_BUG_ON_PAGE(!PageLocked(page), page);
1940 VM_BUG_ON_PAGE(PageLRU(page), page);
ba76149f
AA
1941
1942 /* If there is no mapped pte young don't collapse the page */
33c3fc71
VD
1943 if (pte_young(pteval) ||
1944 page_is_young(page) || PageReferenced(page) ||
8ee53820 1945 mmu_notifier_test_young(vma->vm_mm, address))
10359213 1946 referenced = true;
ba76149f 1947 }
7d2eba05
EA
1948 if (likely(writable)) {
1949 if (likely(referenced)) {
1950 result = SCAN_SUCCEED;
1951 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
1952 referenced, writable, result);
1953 return 1;
1954 }
1955 } else {
1956 result = SCAN_PAGE_RO;
1957 }
1958
ba76149f 1959out:
344aa35c 1960 release_pte_pages(pte, _pte);
7d2eba05
EA
1961 trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero,
1962 referenced, writable, result);
344aa35c 1963 return 0;
ba76149f
AA
1964}
1965
1966static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1967 struct vm_area_struct *vma,
1968 unsigned long address,
1969 spinlock_t *ptl)
1970{
1971 pte_t *_pte;
1972 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1973 pte_t pteval = *_pte;
1974 struct page *src_page;
1975
ca0984ca 1976 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
ba76149f
AA
1977 clear_user_highpage(page, address);
1978 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
ca0984ca
EA
1979 if (is_zero_pfn(pte_pfn(pteval))) {
1980 /*
1981 * ptl mostly unnecessary.
1982 */
1983 spin_lock(ptl);
1984 /*
1985 * paravirt calls inside pte_clear here are
1986 * superfluous.
1987 */
1988 pte_clear(vma->vm_mm, address, _pte);
1989 spin_unlock(ptl);
1990 }
ba76149f
AA
1991 } else {
1992 src_page = pte_page(pteval);
1993 copy_user_highpage(page, src_page, address, vma);
309381fe 1994 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
ba76149f
AA
1995 release_pte_page(src_page);
1996 /*
1997 * ptl mostly unnecessary, but preempt has to
1998 * be disabled to update the per-cpu stats
1999 * inside page_remove_rmap().
2000 */
2001 spin_lock(ptl);
2002 /*
2003 * paravirt calls inside pte_clear here are
2004 * superfluous.
2005 */
2006 pte_clear(vma->vm_mm, address, _pte);
d281ee61 2007 page_remove_rmap(src_page, false);
ba76149f
AA
2008 spin_unlock(ptl);
2009 free_page_and_swap_cache(src_page);
2010 }
2011
2012 address += PAGE_SIZE;
2013 page++;
2014 }
2015}
2016
26234f36 2017static void khugepaged_alloc_sleep(void)
ba76149f 2018{
bde43c6c
PM
2019 DEFINE_WAIT(wait);
2020
2021 add_wait_queue(&khugepaged_wait, &wait);
2022 freezable_schedule_timeout_interruptible(
2023 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2024 remove_wait_queue(&khugepaged_wait, &wait);
26234f36 2025}
ba76149f 2026
9f1b868a
BL
2027static int khugepaged_node_load[MAX_NUMNODES];
2028
14a4e214
DR
2029static bool khugepaged_scan_abort(int nid)
2030{
2031 int i;
2032
2033 /*
2034 * If zone_reclaim_mode is disabled, then no extra effort is made to
2035 * allocate memory locally.
2036 */
2037 if (!zone_reclaim_mode)
2038 return false;
2039
2040 /* If there is a count for this node already, it must be acceptable */
2041 if (khugepaged_node_load[nid])
2042 return false;
2043
2044 for (i = 0; i < MAX_NUMNODES; i++) {
2045 if (!khugepaged_node_load[i])
2046 continue;
2047 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2048 return true;
2049 }
2050 return false;
2051}
2052
26234f36 2053#ifdef CONFIG_NUMA
9f1b868a
BL
2054static int khugepaged_find_target_node(void)
2055{
2056 static int last_khugepaged_target_node = NUMA_NO_NODE;
2057 int nid, target_node = 0, max_value = 0;
2058
2059 /* find first node with max normal pages hit */
2060 for (nid = 0; nid < MAX_NUMNODES; nid++)
2061 if (khugepaged_node_load[nid] > max_value) {
2062 max_value = khugepaged_node_load[nid];
2063 target_node = nid;
2064 }
2065
2066 /* do some balance if several nodes have the same hit record */
2067 if (target_node <= last_khugepaged_target_node)
2068 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2069 nid++)
2070 if (max_value == khugepaged_node_load[nid]) {
2071 target_node = nid;
2072 break;
2073 }
2074
2075 last_khugepaged_target_node = target_node;
2076 return target_node;
2077}
2078
26234f36
XG
2079static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2080{
2081 if (IS_ERR(*hpage)) {
2082 if (!*wait)
2083 return false;
2084
2085 *wait = false;
e3b4126c 2086 *hpage = NULL;
26234f36
XG
2087 khugepaged_alloc_sleep();
2088 } else if (*hpage) {
2089 put_page(*hpage);
2090 *hpage = NULL;
2091 }
2092
2093 return true;
2094}
2095
3b363692
MH
2096static struct page *
2097khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
d6669d68 2098 unsigned long address, int node)
26234f36 2099{
309381fe 2100 VM_BUG_ON_PAGE(*hpage, *hpage);
8b164568 2101
ce83d217 2102 /*
8b164568
VB
2103 * Before allocating the hugepage, release the mmap_sem read lock.
2104 * The allocation can take potentially a long time if it involves
2105 * sync compaction, and we do not need to hold the mmap_sem during
2106 * that. We will recheck the vma after taking it again in write mode.
ce83d217 2107 */
8b164568
VB
2108 up_read(&mm->mmap_sem);
2109
96db800f 2110 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
26234f36 2111 if (unlikely(!*hpage)) {
81ab4201 2112 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
ce83d217 2113 *hpage = ERR_PTR(-ENOMEM);
26234f36 2114 return NULL;
ce83d217 2115 }
26234f36 2116
9a982250 2117 prep_transhuge_page(*hpage);
65b3c07b 2118 count_vm_event(THP_COLLAPSE_ALLOC);
26234f36
XG
2119 return *hpage;
2120}
2121#else
9f1b868a
BL
2122static int khugepaged_find_target_node(void)
2123{
2124 return 0;
2125}
2126
10dc4155
BL
2127static inline struct page *alloc_hugepage(int defrag)
2128{
9a982250
KS
2129 struct page *page;
2130
2131 page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER);
2132 if (page)
2133 prep_transhuge_page(page);
2134 return page;
10dc4155
BL
2135}
2136
26234f36
XG
2137static struct page *khugepaged_alloc_hugepage(bool *wait)
2138{
2139 struct page *hpage;
2140
2141 do {
2142 hpage = alloc_hugepage(khugepaged_defrag());
2143 if (!hpage) {
2144 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2145 if (!*wait)
2146 return NULL;
2147
2148 *wait = false;
2149 khugepaged_alloc_sleep();
2150 } else
2151 count_vm_event(THP_COLLAPSE_ALLOC);
2152 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2153
2154 return hpage;
2155}
2156
2157static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2158{
2159 if (!*hpage)
2160 *hpage = khugepaged_alloc_hugepage(wait);
2161
2162 if (unlikely(!*hpage))
2163 return false;
2164
2165 return true;
2166}
2167
3b363692
MH
2168static struct page *
2169khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
d6669d68 2170 unsigned long address, int node)
26234f36
XG
2171{
2172 up_read(&mm->mmap_sem);
2173 VM_BUG_ON(!*hpage);
3b363692 2174
26234f36
XG
2175 return *hpage;
2176}
692e0b35
AA
2177#endif
2178
fa475e51
BL
2179static bool hugepage_vma_check(struct vm_area_struct *vma)
2180{
2181 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2182 (vma->vm_flags & VM_NOHUGEPAGE))
2183 return false;
fa475e51
BL
2184 if (!vma->anon_vma || vma->vm_ops)
2185 return false;
2186 if (is_vma_temporary_stack(vma))
2187 return false;
81d1b09c 2188 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
fa475e51
BL
2189 return true;
2190}
2191
26234f36
XG
2192static void collapse_huge_page(struct mm_struct *mm,
2193 unsigned long address,
2194 struct page **hpage,
2195 struct vm_area_struct *vma,
2196 int node)
2197{
26234f36
XG
2198 pmd_t *pmd, _pmd;
2199 pte_t *pte;
2200 pgtable_t pgtable;
2201 struct page *new_page;
c4088ebd 2202 spinlock_t *pmd_ptl, *pte_ptl;
7d2eba05 2203 int isolated, result = 0;
26234f36 2204 unsigned long hstart, hend;
00501b53 2205 struct mem_cgroup *memcg;
2ec74c3e
SG
2206 unsigned long mmun_start; /* For mmu_notifiers */
2207 unsigned long mmun_end; /* For mmu_notifiers */
3b363692 2208 gfp_t gfp;
26234f36
XG
2209
2210 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2211
3b363692
MH
2212 /* Only allocate from the target node */
2213 gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2214 __GFP_THISNODE;
2215
26234f36 2216 /* release the mmap_sem read lock. */
d6669d68 2217 new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
7d2eba05
EA
2218 if (!new_page) {
2219 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
2220 goto out_nolock;
2221 }
26234f36 2222
f627c2f5 2223 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
7d2eba05
EA
2224 result = SCAN_CGROUP_CHARGE_FAIL;
2225 goto out_nolock;
2226 }
ba76149f
AA
2227
2228 /*
2229 * Prevent all access to pagetables with the exception of
2230 * gup_fast later hanlded by the ptep_clear_flush and the VM
2231 * handled by the anon_vma lock + PG_lock.
2232 */
2233 down_write(&mm->mmap_sem);
7d2eba05
EA
2234 if (unlikely(khugepaged_test_exit(mm))) {
2235 result = SCAN_ANY_PROCESS;
ba76149f 2236 goto out;
7d2eba05 2237 }
ba76149f
AA
2238
2239 vma = find_vma(mm, address);
7d2eba05
EA
2240 if (!vma) {
2241 result = SCAN_VMA_NULL;
a8f531eb 2242 goto out;
7d2eba05 2243 }
ba76149f
AA
2244 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2245 hend = vma->vm_end & HPAGE_PMD_MASK;
7d2eba05
EA
2246 if (address < hstart || address + HPAGE_PMD_SIZE > hend) {
2247 result = SCAN_ADDRESS_RANGE;
ba76149f 2248 goto out;
7d2eba05
EA
2249 }
2250 if (!hugepage_vma_check(vma)) {
2251 result = SCAN_VMA_CHECK;
a7d6e4ec 2252 goto out;
7d2eba05 2253 }
6219049a 2254 pmd = mm_find_pmd(mm, address);
7d2eba05
EA
2255 if (!pmd) {
2256 result = SCAN_PMD_NULL;
ba76149f 2257 goto out;
7d2eba05 2258 }
ba76149f 2259
4fc3f1d6 2260 anon_vma_lock_write(vma->anon_vma);
ba76149f
AA
2261
2262 pte = pte_offset_map(pmd, address);
c4088ebd 2263 pte_ptl = pte_lockptr(mm, pmd);
ba76149f 2264
2ec74c3e
SG
2265 mmun_start = address;
2266 mmun_end = address + HPAGE_PMD_SIZE;
2267 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
c4088ebd 2268 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
ba76149f
AA
2269 /*
2270 * After this gup_fast can't run anymore. This also removes
2271 * any huge TLB entry from the CPU so we won't allow
2272 * huge and small TLB entries for the same virtual address
2273 * to avoid the risk of CPU bugs in that area.
2274 */
15a25b2e 2275 _pmd = pmdp_collapse_flush(vma, address, pmd);
c4088ebd 2276 spin_unlock(pmd_ptl);
2ec74c3e 2277 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
ba76149f 2278
c4088ebd 2279 spin_lock(pte_ptl);
ba76149f 2280 isolated = __collapse_huge_page_isolate(vma, address, pte);
c4088ebd 2281 spin_unlock(pte_ptl);
ba76149f
AA
2282
2283 if (unlikely(!isolated)) {
453c7192 2284 pte_unmap(pte);
c4088ebd 2285 spin_lock(pmd_ptl);
ba76149f 2286 BUG_ON(!pmd_none(*pmd));
7c342512
AK
2287 /*
2288 * We can only use set_pmd_at when establishing
2289 * hugepmds and never for establishing regular pmds that
2290 * points to regular pagetables. Use pmd_populate for that
2291 */
2292 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
c4088ebd 2293 spin_unlock(pmd_ptl);
08b52706 2294 anon_vma_unlock_write(vma->anon_vma);
7d2eba05 2295 result = SCAN_FAIL;
ce83d217 2296 goto out;
ba76149f
AA
2297 }
2298
2299 /*
2300 * All pages are isolated and locked so anon_vma rmap
2301 * can't run anymore.
2302 */
08b52706 2303 anon_vma_unlock_write(vma->anon_vma);
ba76149f 2304
c4088ebd 2305 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
453c7192 2306 pte_unmap(pte);
ba76149f
AA
2307 __SetPageUptodate(new_page);
2308 pgtable = pmd_pgtable(_pmd);
ba76149f 2309
3122359a
KS
2310 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2311 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
ba76149f
AA
2312
2313 /*
2314 * spin_lock() below is not the equivalent of smp_wmb(), so
2315 * this is needed to avoid the copy_huge_page writes to become
2316 * visible after the set_pmd_at() write.
2317 */
2318 smp_wmb();
2319
c4088ebd 2320 spin_lock(pmd_ptl);
ba76149f 2321 BUG_ON(!pmd_none(*pmd));
d281ee61 2322 page_add_new_anon_rmap(new_page, vma, address, true);
f627c2f5 2323 mem_cgroup_commit_charge(new_page, memcg, false, true);
00501b53 2324 lru_cache_add_active_or_unevictable(new_page, vma);
fce144b4 2325 pgtable_trans_huge_deposit(mm, pmd, pgtable);
ba76149f 2326 set_pmd_at(mm, address, pmd, _pmd);
b113da65 2327 update_mmu_cache_pmd(vma, address, pmd);
c4088ebd 2328 spin_unlock(pmd_ptl);
ba76149f
AA
2329
2330 *hpage = NULL;
420256ef 2331
ba76149f 2332 khugepaged_pages_collapsed++;
7d2eba05 2333 result = SCAN_SUCCEED;
ce83d217 2334out_up_write:
ba76149f 2335 up_write(&mm->mmap_sem);
7d2eba05 2336 trace_mm_collapse_huge_page(mm, isolated, result);
0bbbc0b3
AA
2337 return;
2338
7d2eba05
EA
2339out_nolock:
2340 trace_mm_collapse_huge_page(mm, isolated, result);
2341 return;
ce83d217 2342out:
f627c2f5 2343 mem_cgroup_cancel_charge(new_page, memcg, true);
ce83d217 2344 goto out_up_write;
ba76149f
AA
2345}
2346
2347static int khugepaged_scan_pmd(struct mm_struct *mm,
2348 struct vm_area_struct *vma,
2349 unsigned long address,
2350 struct page **hpage)
2351{
ba76149f
AA
2352 pmd_t *pmd;
2353 pte_t *pte, *_pte;
7d2eba05
EA
2354 int ret = 0, none_or_zero = 0, result = 0;
2355 struct page *page = NULL;
ba76149f
AA
2356 unsigned long _address;
2357 spinlock_t *ptl;
00ef2d2f 2358 int node = NUMA_NO_NODE;
10359213 2359 bool writable = false, referenced = false;
ba76149f
AA
2360
2361 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2362
6219049a 2363 pmd = mm_find_pmd(mm, address);
7d2eba05
EA
2364 if (!pmd) {
2365 result = SCAN_PMD_NULL;
ba76149f 2366 goto out;
7d2eba05 2367 }
ba76149f 2368
9f1b868a 2369 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
ba76149f
AA
2370 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2371 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2372 _pte++, _address += PAGE_SIZE) {
2373 pte_t pteval = *_pte;
ca0984ca 2374 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
c1294d05 2375 if (!userfaultfd_armed(vma) &&
7d2eba05 2376 ++none_or_zero <= khugepaged_max_ptes_none) {
ba76149f 2377 continue;
7d2eba05
EA
2378 } else {
2379 result = SCAN_EXCEED_NONE_PTE;
ba76149f 2380 goto out_unmap;
7d2eba05 2381 }
ba76149f 2382 }
7d2eba05
EA
2383 if (!pte_present(pteval)) {
2384 result = SCAN_PTE_NON_PRESENT;
ba76149f 2385 goto out_unmap;
7d2eba05 2386 }
10359213
EA
2387 if (pte_write(pteval))
2388 writable = true;
2389
ba76149f 2390 page = vm_normal_page(vma, _address, pteval);
7d2eba05
EA
2391 if (unlikely(!page)) {
2392 result = SCAN_PAGE_NULL;
ba76149f 2393 goto out_unmap;
7d2eba05 2394 }
b1caa957
KS
2395
2396 /* TODO: teach khugepaged to collapse THP mapped with pte */
2397 if (PageCompound(page)) {
2398 result = SCAN_PAGE_COMPOUND;
2399 goto out_unmap;
2400 }
2401
5c4b4be3 2402 /*
9f1b868a
BL
2403 * Record which node the original page is from and save this
2404 * information to khugepaged_node_load[].
2405 * Khupaged will allocate hugepage from the node has the max
2406 * hit record.
5c4b4be3 2407 */
9f1b868a 2408 node = page_to_nid(page);
7d2eba05
EA
2409 if (khugepaged_scan_abort(node)) {
2410 result = SCAN_SCAN_ABORT;
14a4e214 2411 goto out_unmap;
7d2eba05 2412 }
9f1b868a 2413 khugepaged_node_load[node]++;
7d2eba05
EA
2414 if (!PageLRU(page)) {
2415 result = SCAN_SCAN_ABORT;
2416 goto out_unmap;
2417 }
2418 if (PageLocked(page)) {
2419 result = SCAN_PAGE_LOCK;
ba76149f 2420 goto out_unmap;
7d2eba05
EA
2421 }
2422 if (!PageAnon(page)) {
2423 result = SCAN_PAGE_ANON;
2424 goto out_unmap;
2425 }
2426
10359213
EA
2427 /*
2428 * cannot use mapcount: can't collapse if there's a gup pin.
2429 * The page must only be referenced by the scanned process
2430 * and page swap cache.
2431 */
7d2eba05
EA
2432 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2433 result = SCAN_PAGE_COUNT;
ba76149f 2434 goto out_unmap;
7d2eba05 2435 }
33c3fc71
VD
2436 if (pte_young(pteval) ||
2437 page_is_young(page) || PageReferenced(page) ||
8ee53820 2438 mmu_notifier_test_young(vma->vm_mm, address))
10359213 2439 referenced = true;
ba76149f 2440 }
7d2eba05
EA
2441 if (writable) {
2442 if (referenced) {
2443 result = SCAN_SUCCEED;
2444 ret = 1;
2445 } else {
2446 result = SCAN_NO_REFERENCED_PAGE;
2447 }
2448 } else {
2449 result = SCAN_PAGE_RO;
2450 }
ba76149f
AA
2451out_unmap:
2452 pte_unmap_unlock(pte, ptl);
9f1b868a
BL
2453 if (ret) {
2454 node = khugepaged_find_target_node();
ce83d217 2455 /* collapse_huge_page will return with the mmap_sem released */
5c4b4be3 2456 collapse_huge_page(mm, address, hpage, vma, node);
9f1b868a 2457 }
ba76149f 2458out:
7d2eba05
EA
2459 trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced,
2460 none_or_zero, result);
ba76149f
AA
2461 return ret;
2462}
2463
2464static void collect_mm_slot(struct mm_slot *mm_slot)
2465{
2466 struct mm_struct *mm = mm_slot->mm;
2467
b9980cdc 2468 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
ba76149f
AA
2469
2470 if (khugepaged_test_exit(mm)) {
2471 /* free mm_slot */
43b5fbbd 2472 hash_del(&mm_slot->hash);
ba76149f
AA
2473 list_del(&mm_slot->mm_node);
2474
2475 /*
2476 * Not strictly needed because the mm exited already.
2477 *
2478 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2479 */
2480
2481 /* khugepaged_mm_lock actually not necessary for the below */
2482 free_mm_slot(mm_slot);
2483 mmdrop(mm);
2484 }
2485}
2486
2487static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2488 struct page **hpage)
2f1da642
HS
2489 __releases(&khugepaged_mm_lock)
2490 __acquires(&khugepaged_mm_lock)
ba76149f
AA
2491{
2492 struct mm_slot *mm_slot;
2493 struct mm_struct *mm;
2494 struct vm_area_struct *vma;
2495 int progress = 0;
2496
2497 VM_BUG_ON(!pages);
b9980cdc 2498 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
ba76149f
AA
2499
2500 if (khugepaged_scan.mm_slot)
2501 mm_slot = khugepaged_scan.mm_slot;
2502 else {
2503 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2504 struct mm_slot, mm_node);
2505 khugepaged_scan.address = 0;
2506 khugepaged_scan.mm_slot = mm_slot;
2507 }
2508 spin_unlock(&khugepaged_mm_lock);
2509
2510 mm = mm_slot->mm;
2511 down_read(&mm->mmap_sem);
2512 if (unlikely(khugepaged_test_exit(mm)))
2513 vma = NULL;
2514 else
2515 vma = find_vma(mm, khugepaged_scan.address);
2516
2517 progress++;
2518 for (; vma; vma = vma->vm_next) {
2519 unsigned long hstart, hend;
2520
2521 cond_resched();
2522 if (unlikely(khugepaged_test_exit(mm))) {
2523 progress++;
2524 break;
2525 }
fa475e51
BL
2526 if (!hugepage_vma_check(vma)) {
2527skip:
ba76149f
AA
2528 progress++;
2529 continue;
2530 }
ba76149f
AA
2531 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2532 hend = vma->vm_end & HPAGE_PMD_MASK;
a7d6e4ec
AA
2533 if (hstart >= hend)
2534 goto skip;
2535 if (khugepaged_scan.address > hend)
2536 goto skip;
ba76149f
AA
2537 if (khugepaged_scan.address < hstart)
2538 khugepaged_scan.address = hstart;
a7d6e4ec 2539 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
ba76149f
AA
2540
2541 while (khugepaged_scan.address < hend) {
2542 int ret;
2543 cond_resched();
2544 if (unlikely(khugepaged_test_exit(mm)))
2545 goto breakouterloop;
2546
2547 VM_BUG_ON(khugepaged_scan.address < hstart ||
2548 khugepaged_scan.address + HPAGE_PMD_SIZE >
2549 hend);
2550 ret = khugepaged_scan_pmd(mm, vma,
2551 khugepaged_scan.address,
2552 hpage);
2553 /* move to next address */
2554 khugepaged_scan.address += HPAGE_PMD_SIZE;
2555 progress += HPAGE_PMD_NR;
2556 if (ret)
2557 /* we released mmap_sem so break loop */
2558 goto breakouterloop_mmap_sem;
2559 if (progress >= pages)
2560 goto breakouterloop;
2561 }
2562 }
2563breakouterloop:
2564 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2565breakouterloop_mmap_sem:
2566
2567 spin_lock(&khugepaged_mm_lock);
a7d6e4ec 2568 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
ba76149f
AA
2569 /*
2570 * Release the current mm_slot if this mm is about to die, or
2571 * if we scanned all vmas of this mm.
2572 */
2573 if (khugepaged_test_exit(mm) || !vma) {
2574 /*
2575 * Make sure that if mm_users is reaching zero while
2576 * khugepaged runs here, khugepaged_exit will find
2577 * mm_slot not pointing to the exiting mm.
2578 */
2579 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2580 khugepaged_scan.mm_slot = list_entry(
2581 mm_slot->mm_node.next,
2582 struct mm_slot, mm_node);
2583 khugepaged_scan.address = 0;
2584 } else {
2585 khugepaged_scan.mm_slot = NULL;
2586 khugepaged_full_scans++;
2587 }
2588
2589 collect_mm_slot(mm_slot);
2590 }
2591
2592 return progress;
2593}
2594
2595static int khugepaged_has_work(void)
2596{
2597 return !list_empty(&khugepaged_scan.mm_head) &&
2598 khugepaged_enabled();
2599}
2600
2601static int khugepaged_wait_event(void)
2602{
2603 return !list_empty(&khugepaged_scan.mm_head) ||
2017c0bf 2604 kthread_should_stop();
ba76149f
AA
2605}
2606
d516904b 2607static void khugepaged_do_scan(void)
ba76149f 2608{
d516904b 2609 struct page *hpage = NULL;
ba76149f
AA
2610 unsigned int progress = 0, pass_through_head = 0;
2611 unsigned int pages = khugepaged_pages_to_scan;
d516904b 2612 bool wait = true;
ba76149f
AA
2613
2614 barrier(); /* write khugepaged_pages_to_scan to local stack */
2615
2616 while (progress < pages) {
26234f36 2617 if (!khugepaged_prealloc_page(&hpage, &wait))
d516904b 2618 break;
26234f36 2619
420256ef 2620 cond_resched();
ba76149f 2621
cd092411 2622 if (unlikely(kthread_should_stop() || try_to_freeze()))
878aee7d
AA
2623 break;
2624
ba76149f
AA
2625 spin_lock(&khugepaged_mm_lock);
2626 if (!khugepaged_scan.mm_slot)
2627 pass_through_head++;
2628 if (khugepaged_has_work() &&
2629 pass_through_head < 2)
2630 progress += khugepaged_scan_mm_slot(pages - progress,
d516904b 2631 &hpage);
ba76149f
AA
2632 else
2633 progress = pages;
2634 spin_unlock(&khugepaged_mm_lock);
2635 }
ba76149f 2636
d516904b
XG
2637 if (!IS_ERR_OR_NULL(hpage))
2638 put_page(hpage);
0bbbc0b3
AA
2639}
2640
2017c0bf
XG
2641static void khugepaged_wait_work(void)
2642{
2017c0bf
XG
2643 if (khugepaged_has_work()) {
2644 if (!khugepaged_scan_sleep_millisecs)
2645 return;
2646
2647 wait_event_freezable_timeout(khugepaged_wait,
2648 kthread_should_stop(),
2649 msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2650 return;
2651 }
2652
2653 if (khugepaged_enabled())
2654 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2655}
2656
ba76149f
AA
2657static int khugepaged(void *none)
2658{
2659 struct mm_slot *mm_slot;
2660
878aee7d 2661 set_freezable();
8698a745 2662 set_user_nice(current, MAX_NICE);
ba76149f 2663
b7231789
XG
2664 while (!kthread_should_stop()) {
2665 khugepaged_do_scan();
2666 khugepaged_wait_work();
2667 }
ba76149f
AA
2668
2669 spin_lock(&khugepaged_mm_lock);
2670 mm_slot = khugepaged_scan.mm_slot;
2671 khugepaged_scan.mm_slot = NULL;
2672 if (mm_slot)
2673 collect_mm_slot(mm_slot);
2674 spin_unlock(&khugepaged_mm_lock);
ba76149f
AA
2675 return 0;
2676}
2677
eef1b3ba
KS
2678static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2679 unsigned long haddr, pmd_t *pmd)
2680{
2681 struct mm_struct *mm = vma->vm_mm;
2682 pgtable_t pgtable;
2683 pmd_t _pmd;
2684 int i;
2685
2686 /* leave pmd empty until pte is filled */
2687 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2688
2689 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2690 pmd_populate(mm, &_pmd, pgtable);
2691
2692 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2693 pte_t *pte, entry;
2694 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2695 entry = pte_mkspecial(entry);
2696 pte = pte_offset_map(&_pmd, haddr);
2697 VM_BUG_ON(!pte_none(*pte));
2698 set_pte_at(mm, haddr, pte, entry);
2699 pte_unmap(pte);
2700 }
2701 smp_wmb(); /* make pte visible before pmd */
2702 pmd_populate(mm, pmd, pgtable);
2703 put_huge_zero_page();
2704}
2705
2706static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
ba988280 2707 unsigned long haddr, bool freeze)
eef1b3ba
KS
2708{
2709 struct mm_struct *mm = vma->vm_mm;
2710 struct page *page;
2711 pgtable_t pgtable;
2712 pmd_t _pmd;
2713 bool young, write;
2714 int i;
2715
2716 VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
2717 VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
2718 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2719 VM_BUG_ON(!pmd_trans_huge(*pmd));
2720
2721 count_vm_event(THP_SPLIT_PMD);
2722
2723 if (vma_is_dax(vma)) {
2724 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2725 if (is_huge_zero_pmd(_pmd))
2726 put_huge_zero_page();
2727 return;
2728 } else if (is_huge_zero_pmd(*pmd)) {
2729 return __split_huge_zero_page_pmd(vma, haddr, pmd);
2730 }
2731
2732 page = pmd_page(*pmd);
2733 VM_BUG_ON_PAGE(!page_count(page), page);
2734 atomic_add(HPAGE_PMD_NR - 1, &page->_count);
2735 write = pmd_write(*pmd);
2736 young = pmd_young(*pmd);
2737
eef1b3ba
KS
2738 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2739 pmd_populate(mm, &_pmd, pgtable);
2740
2741 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2742 pte_t entry, *pte;
2743 /*
2744 * Note that NUMA hinting access restrictions are not
2745 * transferred to avoid any possibility of altering
2746 * permissions across VMAs.
2747 */
ba988280
KS
2748 if (freeze) {
2749 swp_entry_t swp_entry;
2750 swp_entry = make_migration_entry(page + i, write);
2751 entry = swp_entry_to_pte(swp_entry);
2752 } else {
2753 entry = mk_pte(page + i, vma->vm_page_prot);
2754 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2755 if (!write)
2756 entry = pte_wrprotect(entry);
2757 if (!young)
2758 entry = pte_mkold(entry);
2759 }
eef1b3ba
KS
2760 pte = pte_offset_map(&_pmd, haddr);
2761 BUG_ON(!pte_none(*pte));
2762 set_pte_at(mm, haddr, pte, entry);
2763 atomic_inc(&page[i]._mapcount);
2764 pte_unmap(pte);
2765 }
2766
2767 /*
2768 * Set PG_double_map before dropping compound_mapcount to avoid
2769 * false-negative page_mapped().
2770 */
2771 if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) {
2772 for (i = 0; i < HPAGE_PMD_NR; i++)
2773 atomic_inc(&page[i]._mapcount);
2774 }
2775
2776 if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
2777 /* Last compound_mapcount is gone. */
2778 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
2779 if (TestClearPageDoubleMap(page)) {
2780 /* No need in mapcount reference anymore */
2781 for (i = 0; i < HPAGE_PMD_NR; i++)
2782 atomic_dec(&page[i]._mapcount);
2783 }
2784 }
2785
2786 smp_wmb(); /* make pte visible before pmd */
e9b61f19
KS
2787 /*
2788 * Up to this point the pmd is present and huge and userland has the
2789 * whole access to the hugepage during the split (which happens in
2790 * place). If we overwrite the pmd with the not-huge version pointing
2791 * to the pte here (which of course we could if all CPUs were bug
2792 * free), userland could trigger a small page size TLB miss on the
2793 * small sized TLB while the hugepage TLB entry is still established in
2794 * the huge TLB. Some CPU doesn't like that.
2795 * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum
2796 * 383 on page 93. Intel should be safe but is also warns that it's
2797 * only safe if the permission and cache attributes of the two entries
2798 * loaded in the two TLB is identical (which should be the case here).
2799 * But it is generally safer to never allow small and huge TLB entries
2800 * for the same virtual address to be loaded simultaneously. So instead
2801 * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
2802 * current pmd notpresent (atomically because here the pmd_trans_huge
2803 * and pmd_trans_splitting must remain set at all times on the pmd
2804 * until the split is complete for this pmd), then we flush the SMP TLB
2805 * and finally we write the non-huge version of the pmd entry with
2806 * pmd_populate.
2807 */
2808 pmdp_invalidate(vma, haddr, pmd);
eef1b3ba 2809 pmd_populate(mm, pmd, pgtable);
e9b61f19
KS
2810
2811 if (freeze) {
2812 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2813 page_remove_rmap(page + i, false);
2814 put_page(page + i);
2815 }
2816 }
eef1b3ba
KS
2817}
2818
2819void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2820 unsigned long address)
2821{
2822 spinlock_t *ptl;
2823 struct mm_struct *mm = vma->vm_mm;
e90309c9 2824 struct page *page = NULL;
eef1b3ba
KS
2825 unsigned long haddr = address & HPAGE_PMD_MASK;
2826
2827 mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
2828 ptl = pmd_lock(mm, pmd);
e90309c9
KS
2829 if (unlikely(!pmd_trans_huge(*pmd)))
2830 goto out;
2831 page = pmd_page(*pmd);
2832 __split_huge_pmd_locked(vma, pmd, haddr, false);
2833 if (PageMlocked(page))
2834 get_page(page);
2835 else
2836 page = NULL;
2837out:
eef1b3ba
KS
2838 spin_unlock(ptl);
2839 mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
e90309c9
KS
2840 if (page) {
2841 lock_page(page);
2842 munlock_vma_page(page);
2843 unlock_page(page);
2844 put_page(page);
2845 }
eef1b3ba
KS
2846}
2847
78ddc534 2848static void split_huge_pmd_address(struct vm_area_struct *vma,
94fcc585
AA
2849 unsigned long address)
2850{
f72e7dcd
HD
2851 pgd_t *pgd;
2852 pud_t *pud;
94fcc585
AA
2853 pmd_t *pmd;
2854
2855 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2856
78ddc534 2857 pgd = pgd_offset(vma->vm_mm, address);
f72e7dcd
HD
2858 if (!pgd_present(*pgd))
2859 return;
2860
2861 pud = pud_offset(pgd, address);
2862 if (!pud_present(*pud))
2863 return;
2864
2865 pmd = pmd_offset(pud, address);
78ddc534 2866 if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd))
94fcc585
AA
2867 return;
2868 /*
2869 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2870 * materialize from under us.
2871 */
ad0bed24 2872 split_huge_pmd(vma, pmd, address);
94fcc585
AA
2873}
2874
e1b9996b 2875void vma_adjust_trans_huge(struct vm_area_struct *vma,
94fcc585
AA
2876 unsigned long start,
2877 unsigned long end,
2878 long adjust_next)
2879{
2880 /*
2881 * If the new start address isn't hpage aligned and it could
2882 * previously contain an hugepage: check if we need to split
2883 * an huge pmd.
2884 */
2885 if (start & ~HPAGE_PMD_MASK &&
2886 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2887 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
78ddc534 2888 split_huge_pmd_address(vma, start);
94fcc585
AA
2889
2890 /*
2891 * If the new end address isn't hpage aligned and it could
2892 * previously contain an hugepage: check if we need to split
2893 * an huge pmd.
2894 */
2895 if (end & ~HPAGE_PMD_MASK &&
2896 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2897 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
78ddc534 2898 split_huge_pmd_address(vma, end);
94fcc585
AA
2899
2900 /*
2901 * If we're also updating the vma->vm_next->vm_start, if the new
2902 * vm_next->vm_start isn't page aligned and it could previously
2903 * contain an hugepage: check if we need to split an huge pmd.
2904 */
2905 if (adjust_next > 0) {
2906 struct vm_area_struct *next = vma->vm_next;
2907 unsigned long nstart = next->vm_start;
2908 nstart += adjust_next << PAGE_SHIFT;
2909 if (nstart & ~HPAGE_PMD_MASK &&
2910 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2911 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
78ddc534 2912 split_huge_pmd_address(next, nstart);
94fcc585
AA
2913 }
2914}
e9b61f19
KS
2915
2916static void freeze_page_vma(struct vm_area_struct *vma, struct page *page,
2917 unsigned long address)
2918{
2919 spinlock_t *ptl;
2920 pgd_t *pgd;
2921 pud_t *pud;
2922 pmd_t *pmd;
2923 pte_t *pte;
2924 int i, nr = HPAGE_PMD_NR;
2925
2926 /* Skip pages which doesn't belong to the VMA */
2927 if (address < vma->vm_start) {
2928 int off = (vma->vm_start - address) >> PAGE_SHIFT;
2929 page += off;
2930 nr -= off;
2931 address = vma->vm_start;
2932 }
2933
2934 pgd = pgd_offset(vma->vm_mm, address);
2935 if (!pgd_present(*pgd))
2936 return;
2937 pud = pud_offset(pgd, address);
2938 if (!pud_present(*pud))
2939 return;
2940 pmd = pmd_offset(pud, address);
2941 ptl = pmd_lock(vma->vm_mm, pmd);
2942 if (!pmd_present(*pmd)) {
2943 spin_unlock(ptl);
2944 return;
2945 }
2946 if (pmd_trans_huge(*pmd)) {
2947 if (page == pmd_page(*pmd))
2948 __split_huge_pmd_locked(vma, pmd, address, true);
2949 spin_unlock(ptl);
2950 return;
2951 }
2952 spin_unlock(ptl);
2953
2954 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
2955 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++) {
2956 pte_t entry, swp_pte;
2957 swp_entry_t swp_entry;
2958
2959 if (!pte_present(pte[i]))
2960 continue;
2961 if (page_to_pfn(page) != pte_pfn(pte[i]))
2962 continue;
2963 flush_cache_page(vma, address, page_to_pfn(page));
2964 entry = ptep_clear_flush(vma, address, pte + i);
2965 swp_entry = make_migration_entry(page, pte_write(entry));
2966 swp_pte = swp_entry_to_pte(swp_entry);
2967 if (pte_soft_dirty(entry))
2968 swp_pte = pte_swp_mksoft_dirty(swp_pte);
2969 set_pte_at(vma->vm_mm, address, pte + i, swp_pte);
2970 page_remove_rmap(page, false);
2971 put_page(page);
2972 }
2973 pte_unmap_unlock(pte, ptl);
2974}
2975
2976static void freeze_page(struct anon_vma *anon_vma, struct page *page)
2977{
2978 struct anon_vma_chain *avc;
2979 pgoff_t pgoff = page_to_pgoff(page);
2980
2981 VM_BUG_ON_PAGE(!PageHead(page), page);
2982
2983 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff,
2984 pgoff + HPAGE_PMD_NR - 1) {
2985 unsigned long haddr;
2986
2987 haddr = __vma_address(page, avc->vma) & HPAGE_PMD_MASK;
2988 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
2989 haddr, haddr + HPAGE_PMD_SIZE);
2990 freeze_page_vma(avc->vma, page, haddr);
2991 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
2992 haddr, haddr + HPAGE_PMD_SIZE);
2993 }
2994}
2995
2996static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page,
2997 unsigned long address)
2998{
2999 spinlock_t *ptl;
3000 pmd_t *pmd;
3001 pte_t *pte, entry;
3002 swp_entry_t swp_entry;
3003 int i, nr = HPAGE_PMD_NR;
3004
3005 /* Skip pages which doesn't belong to the VMA */
3006 if (address < vma->vm_start) {
3007 int off = (vma->vm_start - address) >> PAGE_SHIFT;
3008 page += off;
3009 nr -= off;
3010 address = vma->vm_start;
3011 }
3012
3013 pmd = mm_find_pmd(vma->vm_mm, address);
3014 if (!pmd)
3015 return;
3016 pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
3017 for (i = 0; i < nr; i++, address += PAGE_SIZE, page++) {
3018 if (!is_swap_pte(pte[i]))
3019 continue;
3020
3021 swp_entry = pte_to_swp_entry(pte[i]);
3022 if (!is_migration_entry(swp_entry))
3023 continue;
3024 if (migration_entry_to_page(swp_entry) != page)
3025 continue;
3026
3027 get_page(page);
3028 page_add_anon_rmap(page, vma, address, false);
3029
3030 entry = pte_mkold(mk_pte(page, vma->vm_page_prot));
3031 entry = pte_mkdirty(entry);
3032 if (is_write_migration_entry(swp_entry))
3033 entry = maybe_mkwrite(entry, vma);
3034
3035 flush_dcache_page(page);
3036 set_pte_at(vma->vm_mm, address, pte + i, entry);
3037
3038 /* No need to invalidate - it was non-present before */
3039 update_mmu_cache(vma, address, pte + i);
3040 }
3041 pte_unmap_unlock(pte, ptl);
3042}
3043
3044static void unfreeze_page(struct anon_vma *anon_vma, struct page *page)
3045{
3046 struct anon_vma_chain *avc;
3047 pgoff_t pgoff = page_to_pgoff(page);
3048
3049 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
3050 pgoff, pgoff + HPAGE_PMD_NR - 1) {
3051 unsigned long address = __vma_address(page, avc->vma);
3052
3053 mmu_notifier_invalidate_range_start(avc->vma->vm_mm,
3054 address, address + HPAGE_PMD_SIZE);
3055 unfreeze_page_vma(avc->vma, page, address);
3056 mmu_notifier_invalidate_range_end(avc->vma->vm_mm,
3057 address, address + HPAGE_PMD_SIZE);
3058 }
3059}
3060
e9b61f19
KS
3061static int __split_huge_page_tail(struct page *head, int tail,
3062 struct lruvec *lruvec, struct list_head *list)
3063{
3064 int mapcount;
3065 struct page *page_tail = head + tail;
3066
3067 mapcount = atomic_read(&page_tail->_mapcount) + 1;
3068 VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail);
3069
3070 /*
3071 * tail_page->_count is zero and not changing from under us. But
3072 * get_page_unless_zero() may be running from under us on the
3073 * tail_page. If we used atomic_set() below instead of atomic_add(), we
3074 * would then run atomic_set() concurrently with
3075 * get_page_unless_zero(), and atomic_set() is implemented in C not
3076 * using locked ops. spin_unlock on x86 sometime uses locked ops
3077 * because of PPro errata 66, 92, so unless somebody can guarantee
3078 * atomic_set() here would be safe on all archs (and not only on x86),
3079 * it's safer to use atomic_add().
3080 */
3081 atomic_add(mapcount + 1, &page_tail->_count);
3082
3083
3084 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
3085 page_tail->flags |= (head->flags &
3086 ((1L << PG_referenced) |
3087 (1L << PG_swapbacked) |
3088 (1L << PG_mlocked) |
3089 (1L << PG_uptodate) |
3090 (1L << PG_active) |
3091 (1L << PG_locked) |
3092 (1L << PG_unevictable)));
3093 page_tail->flags |= (1L << PG_dirty);
3094
3095 /*
3096 * After clearing PageTail the gup refcount can be released.
3097 * Page flags also must be visible before we make the page non-compound.
3098 */
3099 smp_wmb();
3100
3101 clear_compound_head(page_tail);
3102
3103 if (page_is_young(head))
3104 set_page_young(page_tail);
3105 if (page_is_idle(head))
3106 set_page_idle(page_tail);
3107
3108 /* ->mapping in first tail page is compound_mapcount */
9a982250 3109 VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
e9b61f19
KS
3110 page_tail);
3111 page_tail->mapping = head->mapping;
3112
3113 page_tail->index = head->index + tail;
3114 page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
3115 lru_add_page_tail(head, page_tail, lruvec, list);
3116
3117 return mapcount;
3118}
3119
3120static void __split_huge_page(struct page *page, struct list_head *list)
3121{
3122 struct page *head = compound_head(page);
3123 struct zone *zone = page_zone(head);
3124 struct lruvec *lruvec;
3125 int i, tail_mapcount;
3126
3127 /* prevent PageLRU to go away from under us, and freeze lru stats */
3128 spin_lock_irq(&zone->lru_lock);
3129 lruvec = mem_cgroup_page_lruvec(head, zone);
3130
3131 /* complete memcg works before add pages to LRU */
3132 mem_cgroup_split_huge_fixup(head);
3133
3134 tail_mapcount = 0;
3135 for (i = HPAGE_PMD_NR - 1; i >= 1; i--)
3136 tail_mapcount += __split_huge_page_tail(head, i, lruvec, list);
3137 atomic_sub(tail_mapcount, &head->_count);
3138
3139 ClearPageCompound(head);
3140 spin_unlock_irq(&zone->lru_lock);
3141
3142 unfreeze_page(page_anon_vma(head), head);
3143
3144 for (i = 0; i < HPAGE_PMD_NR; i++) {
3145 struct page *subpage = head + i;
3146 if (subpage == page)
3147 continue;
3148 unlock_page(subpage);
3149
3150 /*
3151 * Subpages may be freed if there wasn't any mapping
3152 * like if add_to_swap() is running on a lru page that
3153 * had its mapping zapped. And freeing these pages
3154 * requires taking the lru_lock so we do the put_page
3155 * of the tail pages after the split is complete.
3156 */
3157 put_page(subpage);
3158 }
3159}
3160
b20ce5e0
KS
3161int total_mapcount(struct page *page)
3162{
3163 int i, ret;
3164
3165 VM_BUG_ON_PAGE(PageTail(page), page);
3166
3167 if (likely(!PageCompound(page)))
3168 return atomic_read(&page->_mapcount) + 1;
3169
3170 ret = compound_mapcount(page);
3171 if (PageHuge(page))
3172 return ret;
3173 for (i = 0; i < HPAGE_PMD_NR; i++)
3174 ret += atomic_read(&page[i]._mapcount) + 1;
3175 if (PageDoubleMap(page))
3176 ret -= HPAGE_PMD_NR;
3177 return ret;
3178}
3179
e9b61f19
KS
3180/*
3181 * This function splits huge page into normal pages. @page can point to any
3182 * subpage of huge page to split. Split doesn't change the position of @page.
3183 *
3184 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
3185 * The huge page must be locked.
3186 *
3187 * If @list is null, tail pages will be added to LRU list, otherwise, to @list.
3188 *
3189 * Both head page and tail pages will inherit mapping, flags, and so on from
3190 * the hugepage.
3191 *
3192 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
3193 * they are not mapped.
3194 *
3195 * Returns 0 if the hugepage is split successfully.
3196 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
3197 * us.
3198 */
3199int split_huge_page_to_list(struct page *page, struct list_head *list)
3200{
3201 struct page *head = compound_head(page);
3202 struct anon_vma *anon_vma;
3203 int count, mapcount, ret;
d9654322 3204 bool mlocked;
e9b61f19
KS
3205
3206 VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
3207 VM_BUG_ON_PAGE(!PageAnon(page), page);
3208 VM_BUG_ON_PAGE(!PageLocked(page), page);
3209 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
3210 VM_BUG_ON_PAGE(!PageCompound(page), page);
3211
3212 /*
3213 * The caller does not necessarily hold an mmap_sem that would prevent
3214 * the anon_vma disappearing so we first we take a reference to it
3215 * and then lock the anon_vma for write. This is similar to
3216 * page_lock_anon_vma_read except the write lock is taken to serialise
3217 * against parallel split or collapse operations.
3218 */
3219 anon_vma = page_get_anon_vma(head);
3220 if (!anon_vma) {
3221 ret = -EBUSY;
3222 goto out;
3223 }
3224 anon_vma_lock_write(anon_vma);
3225
3226 /*
3227 * Racy check if we can split the page, before freeze_page() will
3228 * split PMDs
3229 */
3230 if (total_mapcount(head) != page_count(head) - 1) {
3231 ret = -EBUSY;
3232 goto out_unlock;
3233 }
3234
d9654322 3235 mlocked = PageMlocked(page);
e9b61f19
KS
3236 freeze_page(anon_vma, head);
3237 VM_BUG_ON_PAGE(compound_mapcount(head), head);
3238
d9654322
KS
3239 /* Make sure the page is not on per-CPU pagevec as it takes pin */
3240 if (mlocked)
3241 lru_add_drain();
3242
9a982250
KS
3243 /* Prevent deferred_split_scan() touching ->_count */
3244 spin_lock(&split_queue_lock);
e9b61f19
KS
3245 count = page_count(head);
3246 mapcount = total_mapcount(head);
3247 if (mapcount == count - 1) {
9a982250
KS
3248 if (!list_empty(page_deferred_list(head))) {
3249 split_queue_len--;
3250 list_del(page_deferred_list(head));
3251 }
3252 spin_unlock(&split_queue_lock);
e9b61f19
KS
3253 __split_huge_page(page, list);
3254 ret = 0;
3255 } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount > count - 1) {
9a982250 3256 spin_unlock(&split_queue_lock);
e9b61f19
KS
3257 pr_alert("total_mapcount: %u, page_count(): %u\n",
3258 mapcount, count);
3259 if (PageTail(page))
3260 dump_page(head, NULL);
3261 dump_page(page, "total_mapcount(head) > page_count(head) - 1");
3262 BUG();
3263 } else {
9a982250 3264 spin_unlock(&split_queue_lock);
e9b61f19
KS
3265 unfreeze_page(anon_vma, head);
3266 ret = -EBUSY;
3267 }
3268
3269out_unlock:
3270 anon_vma_unlock_write(anon_vma);
3271 put_anon_vma(anon_vma);
3272out:
3273 count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
3274 return ret;
3275}
9a982250
KS
3276
3277void free_transhuge_page(struct page *page)
3278{
3279 unsigned long flags;
3280
3281 spin_lock_irqsave(&split_queue_lock, flags);
3282 if (!list_empty(page_deferred_list(page))) {
3283 split_queue_len--;
3284 list_del(page_deferred_list(page));
3285 }
3286 spin_unlock_irqrestore(&split_queue_lock, flags);
3287 free_compound_page(page);
3288}
3289
3290void deferred_split_huge_page(struct page *page)
3291{
3292 unsigned long flags;
3293
3294 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
3295
3296 spin_lock_irqsave(&split_queue_lock, flags);
3297 if (list_empty(page_deferred_list(page))) {
3298 list_add_tail(page_deferred_list(page), &split_queue);
3299 split_queue_len++;
3300 }
3301 spin_unlock_irqrestore(&split_queue_lock, flags);
3302}
3303
3304static unsigned long deferred_split_count(struct shrinker *shrink,
3305 struct shrink_control *sc)
3306{
3307 /*
3308 * Split a page from split_queue will free up at least one page,
3309 * at most HPAGE_PMD_NR - 1. We don't track exact number.
3310 * Let's use HPAGE_PMD_NR / 2 as ballpark.
3311 */
3312 return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
3313}
3314
3315static unsigned long deferred_split_scan(struct shrinker *shrink,
3316 struct shrink_control *sc)
3317{
3318 unsigned long flags;
3319 LIST_HEAD(list), *pos, *next;
3320 struct page *page;
3321 int split = 0;
3322
3323 spin_lock_irqsave(&split_queue_lock, flags);
3324 list_splice_init(&split_queue, &list);
3325
3326 /* Take pin on all head pages to avoid freeing them under us */
3327 list_for_each_safe(pos, next, &list) {
3328 page = list_entry((void *)pos, struct page, mapping);
3329 page = compound_head(page);
3330 /* race with put_compound_page() */
3331 if (!get_page_unless_zero(page)) {
3332 list_del_init(page_deferred_list(page));
3333 split_queue_len--;
3334 }
3335 }
3336 spin_unlock_irqrestore(&split_queue_lock, flags);
3337
3338 list_for_each_safe(pos, next, &list) {
3339 page = list_entry((void *)pos, struct page, mapping);
3340 lock_page(page);
3341 /* split_huge_page() removes page from list on success */
3342 if (!split_huge_page(page))
3343 split++;
3344 unlock_page(page);
3345 put_page(page);
3346 }
3347
3348 spin_lock_irqsave(&split_queue_lock, flags);
3349 list_splice_tail(&list, &split_queue);
3350 spin_unlock_irqrestore(&split_queue_lock, flags);
3351
3352 return split * HPAGE_PMD_NR / 2;
3353}
3354
3355static struct shrinker deferred_split_shrinker = {
3356 .count_objects = deferred_split_count,
3357 .scan_objects = deferred_split_scan,
3358 .seeks = DEFAULT_SEEKS,
3359};
49071d43
KS
3360
3361#ifdef CONFIG_DEBUG_FS
3362static int split_huge_pages_set(void *data, u64 val)
3363{
3364 struct zone *zone;
3365 struct page *page;
3366 unsigned long pfn, max_zone_pfn;
3367 unsigned long total = 0, split = 0;
3368
3369 if (val != 1)
3370 return -EINVAL;
3371
3372 for_each_populated_zone(zone) {
3373 max_zone_pfn = zone_end_pfn(zone);
3374 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
3375 if (!pfn_valid(pfn))
3376 continue;
3377
3378 page = pfn_to_page(pfn);
3379 if (!get_page_unless_zero(page))
3380 continue;
3381
3382 if (zone != page_zone(page))
3383 goto next;
3384
3385 if (!PageHead(page) || !PageAnon(page) ||
3386 PageHuge(page))
3387 goto next;
3388
3389 total++;
3390 lock_page(page);
3391 if (!split_huge_page(page))
3392 split++;
3393 unlock_page(page);
3394next:
3395 put_page(page);
3396 }
3397 }
3398
3399 pr_info("%lu of %lu THP split", split, total);
3400
3401 return 0;
3402}
3403DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set,
3404 "%llu\n");
3405
3406static int __init split_huge_pages_debugfs(void)
3407{
3408 void *ret;
3409
3410 ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
3411 &split_huge_pages_fops);
3412 if (!ret)
3413 pr_warn("Failed to create split_huge_pages in debugfs");
3414 return 0;
3415}
3416late_initcall(split_huge_pages_debugfs);
3417#endif