1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
6 #include <linux/list.h>
7 #include <linux/init.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
38 #include <asm/pgalloc.h>
42 #include <linux/hugetlb.h>
43 #include <linux/hugetlb_cgroup.h>
44 #include <linux/node.h>
45 #include <linux/page_owner.h>
47 #include "hugetlb_vmemmap.h"
49 int hugetlb_max_hstate __read_mostly
;
50 unsigned int default_hstate_idx
;
51 struct hstate hstates
[HUGE_MAX_HSTATE
];
54 static struct cma
*hugetlb_cma
[MAX_NUMNODES
];
55 static unsigned long hugetlb_cma_size_in_node
[MAX_NUMNODES
] __initdata
;
56 static bool hugetlb_cma_page(struct page
*page
, unsigned int order
)
58 return cma_pages_valid(hugetlb_cma
[page_to_nid(page
)], page
,
62 static bool hugetlb_cma_page(struct page
*page
, unsigned int order
)
67 static unsigned long hugetlb_cma_size __initdata
;
69 __initdata
LIST_HEAD(huge_boot_pages
);
71 /* for command line parsing */
72 static struct hstate
* __initdata parsed_hstate
;
73 static unsigned long __initdata default_hstate_max_huge_pages
;
74 static bool __initdata parsed_valid_hugepagesz
= true;
75 static bool __initdata parsed_default_hugepagesz
;
76 static unsigned int default_hugepages_in_node
[MAX_NUMNODES
] __initdata
;
79 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
80 * free_huge_pages, and surplus_huge_pages.
82 DEFINE_SPINLOCK(hugetlb_lock
);
85 * Serializes faults on the same logical page. This is used to
86 * prevent spurious OOMs when the hugepage pool is fully utilized.
88 static int num_fault_mutexes
;
89 struct mutex
*hugetlb_fault_mutex_table ____cacheline_aligned_in_smp
;
91 /* Forward declaration */
92 static int hugetlb_acct_memory(struct hstate
*h
, long delta
);
94 static inline bool subpool_is_free(struct hugepage_subpool
*spool
)
98 if (spool
->max_hpages
!= -1)
99 return spool
->used_hpages
== 0;
100 if (spool
->min_hpages
!= -1)
101 return spool
->rsv_hpages
== spool
->min_hpages
;
106 static inline void unlock_or_release_subpool(struct hugepage_subpool
*spool
,
107 unsigned long irq_flags
)
109 spin_unlock_irqrestore(&spool
->lock
, irq_flags
);
111 /* If no pages are used, and no other handles to the subpool
112 * remain, give up any reservations based on minimum size and
113 * free the subpool */
114 if (subpool_is_free(spool
)) {
115 if (spool
->min_hpages
!= -1)
116 hugetlb_acct_memory(spool
->hstate
,
122 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
125 struct hugepage_subpool
*spool
;
127 spool
= kzalloc(sizeof(*spool
), GFP_KERNEL
);
131 spin_lock_init(&spool
->lock
);
133 spool
->max_hpages
= max_hpages
;
135 spool
->min_hpages
= min_hpages
;
137 if (min_hpages
!= -1 && hugetlb_acct_memory(h
, min_hpages
)) {
141 spool
->rsv_hpages
= min_hpages
;
146 void hugepage_put_subpool(struct hugepage_subpool
*spool
)
150 spin_lock_irqsave(&spool
->lock
, flags
);
151 BUG_ON(!spool
->count
);
153 unlock_or_release_subpool(spool
, flags
);
157 * Subpool accounting for allocating and reserving pages.
158 * Return -ENOMEM if there are not enough resources to satisfy the
159 * request. Otherwise, return the number of pages by which the
160 * global pools must be adjusted (upward). The returned value may
161 * only be different than the passed value (delta) in the case where
162 * a subpool minimum size must be maintained.
164 static long hugepage_subpool_get_pages(struct hugepage_subpool
*spool
,
172 spin_lock_irq(&spool
->lock
);
174 if (spool
->max_hpages
!= -1) { /* maximum size accounting */
175 if ((spool
->used_hpages
+ delta
) <= spool
->max_hpages
)
176 spool
->used_hpages
+= delta
;
183 /* minimum size accounting */
184 if (spool
->min_hpages
!= -1 && spool
->rsv_hpages
) {
185 if (delta
> spool
->rsv_hpages
) {
187 * Asking for more reserves than those already taken on
188 * behalf of subpool. Return difference.
190 ret
= delta
- spool
->rsv_hpages
;
191 spool
->rsv_hpages
= 0;
193 ret
= 0; /* reserves already accounted for */
194 spool
->rsv_hpages
-= delta
;
199 spin_unlock_irq(&spool
->lock
);
204 * Subpool accounting for freeing and unreserving pages.
205 * Return the number of global page reservations that must be dropped.
206 * The return value may only be different than the passed value (delta)
207 * in the case where a subpool minimum size must be maintained.
209 static long hugepage_subpool_put_pages(struct hugepage_subpool
*spool
,
218 spin_lock_irqsave(&spool
->lock
, flags
);
220 if (spool
->max_hpages
!= -1) /* maximum size accounting */
221 spool
->used_hpages
-= delta
;
223 /* minimum size accounting */
224 if (spool
->min_hpages
!= -1 && spool
->used_hpages
< spool
->min_hpages
) {
225 if (spool
->rsv_hpages
+ delta
<= spool
->min_hpages
)
228 ret
= spool
->rsv_hpages
+ delta
- spool
->min_hpages
;
230 spool
->rsv_hpages
+= delta
;
231 if (spool
->rsv_hpages
> spool
->min_hpages
)
232 spool
->rsv_hpages
= spool
->min_hpages
;
236 * If hugetlbfs_put_super couldn't free spool due to an outstanding
237 * quota reference, free it now.
239 unlock_or_release_subpool(spool
, flags
);
244 static inline struct hugepage_subpool
*subpool_inode(struct inode
*inode
)
246 return HUGETLBFS_SB(inode
->i_sb
)->spool
;
249 static inline struct hugepage_subpool
*subpool_vma(struct vm_area_struct
*vma
)
251 return subpool_inode(file_inode(vma
->vm_file
));
254 /* Helper that removes a struct file_region from the resv_map cache and returns
257 static struct file_region
*
258 get_file_region_entry_from_cache(struct resv_map
*resv
, long from
, long to
)
260 struct file_region
*nrg
= NULL
;
262 VM_BUG_ON(resv
->region_cache_count
<= 0);
264 resv
->region_cache_count
--;
265 nrg
= list_first_entry(&resv
->region_cache
, struct file_region
, link
);
266 list_del(&nrg
->link
);
274 static void copy_hugetlb_cgroup_uncharge_info(struct file_region
*nrg
,
275 struct file_region
*rg
)
277 #ifdef CONFIG_CGROUP_HUGETLB
278 nrg
->reservation_counter
= rg
->reservation_counter
;
285 /* Helper that records hugetlb_cgroup uncharge info. */
286 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup
*h_cg
,
288 struct resv_map
*resv
,
289 struct file_region
*nrg
)
291 #ifdef CONFIG_CGROUP_HUGETLB
293 nrg
->reservation_counter
=
294 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
295 nrg
->css
= &h_cg
->css
;
297 * The caller will hold exactly one h_cg->css reference for the
298 * whole contiguous reservation region. But this area might be
299 * scattered when there are already some file_regions reside in
300 * it. As a result, many file_regions may share only one css
301 * reference. In order to ensure that one file_region must hold
302 * exactly one h_cg->css reference, we should do css_get for
303 * each file_region and leave the reference held by caller
307 if (!resv
->pages_per_hpage
)
308 resv
->pages_per_hpage
= pages_per_huge_page(h
);
309 /* pages_per_hpage should be the same for all entries in
312 VM_BUG_ON(resv
->pages_per_hpage
!= pages_per_huge_page(h
));
314 nrg
->reservation_counter
= NULL
;
320 static void put_uncharge_info(struct file_region
*rg
)
322 #ifdef CONFIG_CGROUP_HUGETLB
328 static bool has_same_uncharge_info(struct file_region
*rg
,
329 struct file_region
*org
)
331 #ifdef CONFIG_CGROUP_HUGETLB
332 return rg
->reservation_counter
== org
->reservation_counter
&&
340 static void coalesce_file_region(struct resv_map
*resv
, struct file_region
*rg
)
342 struct file_region
*nrg
= NULL
, *prg
= NULL
;
344 prg
= list_prev_entry(rg
, link
);
345 if (&prg
->link
!= &resv
->regions
&& prg
->to
== rg
->from
&&
346 has_same_uncharge_info(prg
, rg
)) {
350 put_uncharge_info(rg
);
356 nrg
= list_next_entry(rg
, link
);
357 if (&nrg
->link
!= &resv
->regions
&& nrg
->from
== rg
->to
&&
358 has_same_uncharge_info(nrg
, rg
)) {
359 nrg
->from
= rg
->from
;
362 put_uncharge_info(rg
);
368 hugetlb_resv_map_add(struct resv_map
*map
, struct list_head
*rg
, long from
,
369 long to
, struct hstate
*h
, struct hugetlb_cgroup
*cg
,
370 long *regions_needed
)
372 struct file_region
*nrg
;
374 if (!regions_needed
) {
375 nrg
= get_file_region_entry_from_cache(map
, from
, to
);
376 record_hugetlb_cgroup_uncharge_info(cg
, h
, map
, nrg
);
377 list_add(&nrg
->link
, rg
);
378 coalesce_file_region(map
, nrg
);
380 *regions_needed
+= 1;
386 * Must be called with resv->lock held.
388 * Calling this with regions_needed != NULL will count the number of pages
389 * to be added but will not modify the linked list. And regions_needed will
390 * indicate the number of file_regions needed in the cache to carry out to add
391 * the regions for this range.
393 static long add_reservation_in_range(struct resv_map
*resv
, long f
, long t
,
394 struct hugetlb_cgroup
*h_cg
,
395 struct hstate
*h
, long *regions_needed
)
398 struct list_head
*head
= &resv
->regions
;
399 long last_accounted_offset
= f
;
400 struct file_region
*iter
, *trg
= NULL
;
401 struct list_head
*rg
= NULL
;
406 /* In this loop, we essentially handle an entry for the range
407 * [last_accounted_offset, iter->from), at every iteration, with some
410 list_for_each_entry_safe(iter
, trg
, head
, link
) {
411 /* Skip irrelevant regions that start before our range. */
412 if (iter
->from
< f
) {
413 /* If this region ends after the last accounted offset,
414 * then we need to update last_accounted_offset.
416 if (iter
->to
> last_accounted_offset
)
417 last_accounted_offset
= iter
->to
;
421 /* When we find a region that starts beyond our range, we've
424 if (iter
->from
>= t
) {
425 rg
= iter
->link
.prev
;
429 /* Add an entry for last_accounted_offset -> iter->from, and
430 * update last_accounted_offset.
432 if (iter
->from
> last_accounted_offset
)
433 add
+= hugetlb_resv_map_add(resv
, iter
->link
.prev
,
434 last_accounted_offset
,
438 last_accounted_offset
= iter
->to
;
441 /* Handle the case where our range extends beyond
442 * last_accounted_offset.
446 if (last_accounted_offset
< t
)
447 add
+= hugetlb_resv_map_add(resv
, rg
, last_accounted_offset
,
448 t
, h
, h_cg
, regions_needed
);
453 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
455 static int allocate_file_region_entries(struct resv_map
*resv
,
457 __must_hold(&resv
->lock
)
459 struct list_head allocated_regions
;
460 int to_allocate
= 0, i
= 0;
461 struct file_region
*trg
= NULL
, *rg
= NULL
;
463 VM_BUG_ON(regions_needed
< 0);
465 INIT_LIST_HEAD(&allocated_regions
);
468 * Check for sufficient descriptors in the cache to accommodate
469 * the number of in progress add operations plus regions_needed.
471 * This is a while loop because when we drop the lock, some other call
472 * to region_add or region_del may have consumed some region_entries,
473 * so we keep looping here until we finally have enough entries for
474 * (adds_in_progress + regions_needed).
476 while (resv
->region_cache_count
<
477 (resv
->adds_in_progress
+ regions_needed
)) {
478 to_allocate
= resv
->adds_in_progress
+ regions_needed
-
479 resv
->region_cache_count
;
481 /* At this point, we should have enough entries in the cache
482 * for all the existing adds_in_progress. We should only be
483 * needing to allocate for regions_needed.
485 VM_BUG_ON(resv
->region_cache_count
< resv
->adds_in_progress
);
487 spin_unlock(&resv
->lock
);
488 for (i
= 0; i
< to_allocate
; i
++) {
489 trg
= kmalloc(sizeof(*trg
), GFP_KERNEL
);
492 list_add(&trg
->link
, &allocated_regions
);
495 spin_lock(&resv
->lock
);
497 list_splice(&allocated_regions
, &resv
->region_cache
);
498 resv
->region_cache_count
+= to_allocate
;
504 list_for_each_entry_safe(rg
, trg
, &allocated_regions
, link
) {
512 * Add the huge page range represented by [f, t) to the reserve
513 * map. Regions will be taken from the cache to fill in this range.
514 * Sufficient regions should exist in the cache due to the previous
515 * call to region_chg with the same range, but in some cases the cache will not
516 * have sufficient entries due to races with other code doing region_add or
517 * region_del. The extra needed entries will be allocated.
519 * regions_needed is the out value provided by a previous call to region_chg.
521 * Return the number of new huge pages added to the map. This number is greater
522 * than or equal to zero. If file_region entries needed to be allocated for
523 * this operation and we were not able to allocate, it returns -ENOMEM.
524 * region_add of regions of length 1 never allocate file_regions and cannot
525 * fail; region_chg will always allocate at least 1 entry and a region_add for
526 * 1 page will only require at most 1 entry.
528 static long region_add(struct resv_map
*resv
, long f
, long t
,
529 long in_regions_needed
, struct hstate
*h
,
530 struct hugetlb_cgroup
*h_cg
)
532 long add
= 0, actual_regions_needed
= 0;
534 spin_lock(&resv
->lock
);
537 /* Count how many regions are actually needed to execute this add. */
538 add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
539 &actual_regions_needed
);
542 * Check for sufficient descriptors in the cache to accommodate
543 * this add operation. Note that actual_regions_needed may be greater
544 * than in_regions_needed, as the resv_map may have been modified since
545 * the region_chg call. In this case, we need to make sure that we
546 * allocate extra entries, such that we have enough for all the
547 * existing adds_in_progress, plus the excess needed for this
550 if (actual_regions_needed
> in_regions_needed
&&
551 resv
->region_cache_count
<
552 resv
->adds_in_progress
+
553 (actual_regions_needed
- in_regions_needed
)) {
554 /* region_add operation of range 1 should never need to
555 * allocate file_region entries.
557 VM_BUG_ON(t
- f
<= 1);
559 if (allocate_file_region_entries(
560 resv
, actual_regions_needed
- in_regions_needed
)) {
567 add
= add_reservation_in_range(resv
, f
, t
, h_cg
, h
, NULL
);
569 resv
->adds_in_progress
-= in_regions_needed
;
571 spin_unlock(&resv
->lock
);
576 * Examine the existing reserve map and determine how many
577 * huge pages in the specified range [f, t) are NOT currently
578 * represented. This routine is called before a subsequent
579 * call to region_add that will actually modify the reserve
580 * map to add the specified range [f, t). region_chg does
581 * not change the number of huge pages represented by the
582 * map. A number of new file_region structures is added to the cache as a
583 * placeholder, for the subsequent region_add call to use. At least 1
584 * file_region structure is added.
586 * out_regions_needed is the number of regions added to the
587 * resv->adds_in_progress. This value needs to be provided to a follow up call
588 * to region_add or region_abort for proper accounting.
590 * Returns the number of huge pages that need to be added to the existing
591 * reservation map for the range [f, t). This number is greater or equal to
592 * zero. -ENOMEM is returned if a new file_region structure or cache entry
593 * is needed and can not be allocated.
595 static long region_chg(struct resv_map
*resv
, long f
, long t
,
596 long *out_regions_needed
)
600 spin_lock(&resv
->lock
);
602 /* Count how many hugepages in this range are NOT represented. */
603 chg
= add_reservation_in_range(resv
, f
, t
, NULL
, NULL
,
606 if (*out_regions_needed
== 0)
607 *out_regions_needed
= 1;
609 if (allocate_file_region_entries(resv
, *out_regions_needed
))
612 resv
->adds_in_progress
+= *out_regions_needed
;
614 spin_unlock(&resv
->lock
);
619 * Abort the in progress add operation. The adds_in_progress field
620 * of the resv_map keeps track of the operations in progress between
621 * calls to region_chg and region_add. Operations are sometimes
622 * aborted after the call to region_chg. In such cases, region_abort
623 * is called to decrement the adds_in_progress counter. regions_needed
624 * is the value returned by the region_chg call, it is used to decrement
625 * the adds_in_progress counter.
627 * NOTE: The range arguments [f, t) are not needed or used in this
628 * routine. They are kept to make reading the calling code easier as
629 * arguments will match the associated region_chg call.
631 static void region_abort(struct resv_map
*resv
, long f
, long t
,
634 spin_lock(&resv
->lock
);
635 VM_BUG_ON(!resv
->region_cache_count
);
636 resv
->adds_in_progress
-= regions_needed
;
637 spin_unlock(&resv
->lock
);
641 * Delete the specified range [f, t) from the reserve map. If the
642 * t parameter is LONG_MAX, this indicates that ALL regions after f
643 * should be deleted. Locate the regions which intersect [f, t)
644 * and either trim, delete or split the existing regions.
646 * Returns the number of huge pages deleted from the reserve map.
647 * In the normal case, the return value is zero or more. In the
648 * case where a region must be split, a new region descriptor must
649 * be allocated. If the allocation fails, -ENOMEM will be returned.
650 * NOTE: If the parameter t == LONG_MAX, then we will never split
651 * a region and possibly return -ENOMEM. Callers specifying
652 * t == LONG_MAX do not need to check for -ENOMEM error.
654 static long region_del(struct resv_map
*resv
, long f
, long t
)
656 struct list_head
*head
= &resv
->regions
;
657 struct file_region
*rg
, *trg
;
658 struct file_region
*nrg
= NULL
;
662 spin_lock(&resv
->lock
);
663 list_for_each_entry_safe(rg
, trg
, head
, link
) {
665 * Skip regions before the range to be deleted. file_region
666 * ranges are normally of the form [from, to). However, there
667 * may be a "placeholder" entry in the map which is of the form
668 * (from, to) with from == to. Check for placeholder entries
669 * at the beginning of the range to be deleted.
671 if (rg
->to
<= f
&& (rg
->to
!= rg
->from
|| rg
->to
!= f
))
677 if (f
> rg
->from
&& t
< rg
->to
) { /* Must split region */
679 * Check for an entry in the cache before dropping
680 * lock and attempting allocation.
683 resv
->region_cache_count
> resv
->adds_in_progress
) {
684 nrg
= list_first_entry(&resv
->region_cache
,
687 list_del(&nrg
->link
);
688 resv
->region_cache_count
--;
692 spin_unlock(&resv
->lock
);
693 nrg
= kmalloc(sizeof(*nrg
), GFP_KERNEL
);
700 hugetlb_cgroup_uncharge_file_region(
701 resv
, rg
, t
- f
, false);
703 /* New entry for end of split region */
707 copy_hugetlb_cgroup_uncharge_info(nrg
, rg
);
709 INIT_LIST_HEAD(&nrg
->link
);
711 /* Original entry is trimmed */
714 list_add(&nrg
->link
, &rg
->link
);
719 if (f
<= rg
->from
&& t
>= rg
->to
) { /* Remove entire region */
720 del
+= rg
->to
- rg
->from
;
721 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
722 rg
->to
- rg
->from
, true);
728 if (f
<= rg
->from
) { /* Trim beginning of region */
729 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
730 t
- rg
->from
, false);
734 } else { /* Trim end of region */
735 hugetlb_cgroup_uncharge_file_region(resv
, rg
,
743 spin_unlock(&resv
->lock
);
749 * A rare out of memory error was encountered which prevented removal of
750 * the reserve map region for a page. The huge page itself was free'ed
751 * and removed from the page cache. This routine will adjust the subpool
752 * usage count, and the global reserve count if needed. By incrementing
753 * these counts, the reserve map entry which could not be deleted will
754 * appear as a "reserved" entry instead of simply dangling with incorrect
757 void hugetlb_fix_reserve_counts(struct inode
*inode
)
759 struct hugepage_subpool
*spool
= subpool_inode(inode
);
761 bool reserved
= false;
763 rsv_adjust
= hugepage_subpool_get_pages(spool
, 1);
764 if (rsv_adjust
> 0) {
765 struct hstate
*h
= hstate_inode(inode
);
767 if (!hugetlb_acct_memory(h
, 1))
769 } else if (!rsv_adjust
) {
774 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
778 * Count and return the number of huge pages in the reserve map
779 * that intersect with the range [f, t).
781 static long region_count(struct resv_map
*resv
, long f
, long t
)
783 struct list_head
*head
= &resv
->regions
;
784 struct file_region
*rg
;
787 spin_lock(&resv
->lock
);
788 /* Locate each segment we overlap with, and count that overlap. */
789 list_for_each_entry(rg
, head
, link
) {
798 seg_from
= max(rg
->from
, f
);
799 seg_to
= min(rg
->to
, t
);
801 chg
+= seg_to
- seg_from
;
803 spin_unlock(&resv
->lock
);
809 * Convert the address within this vma to the page offset within
810 * the mapping, in pagecache page units; huge pages here.
812 static pgoff_t
vma_hugecache_offset(struct hstate
*h
,
813 struct vm_area_struct
*vma
, unsigned long address
)
815 return ((address
- vma
->vm_start
) >> huge_page_shift(h
)) +
816 (vma
->vm_pgoff
>> huge_page_order(h
));
819 pgoff_t
linear_hugepage_index(struct vm_area_struct
*vma
,
820 unsigned long address
)
822 return vma_hugecache_offset(hstate_vma(vma
), vma
, address
);
824 EXPORT_SYMBOL_GPL(linear_hugepage_index
);
827 * Return the size of the pages allocated when backing a VMA. In the majority
828 * cases this will be same size as used by the page table entries.
830 unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
832 if (vma
->vm_ops
&& vma
->vm_ops
->pagesize
)
833 return vma
->vm_ops
->pagesize(vma
);
836 EXPORT_SYMBOL_GPL(vma_kernel_pagesize
);
839 * Return the page size being used by the MMU to back a VMA. In the majority
840 * of cases, the page size used by the kernel matches the MMU size. On
841 * architectures where it differs, an architecture-specific 'strong'
842 * version of this symbol is required.
844 __weak
unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
846 return vma_kernel_pagesize(vma
);
850 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
851 * bits of the reservation map pointer, which are always clear due to
854 #define HPAGE_RESV_OWNER (1UL << 0)
855 #define HPAGE_RESV_UNMAPPED (1UL << 1)
856 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
859 * These helpers are used to track how many pages are reserved for
860 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
861 * is guaranteed to have their future faults succeed.
863 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
864 * the reserve counters are updated with the hugetlb_lock held. It is safe
865 * to reset the VMA at fork() time as it is not in use yet and there is no
866 * chance of the global counters getting corrupted as a result of the values.
868 * The private mapping reservation is represented in a subtly different
869 * manner to a shared mapping. A shared mapping has a region map associated
870 * with the underlying file, this region map represents the backing file
871 * pages which have ever had a reservation assigned which this persists even
872 * after the page is instantiated. A private mapping has a region map
873 * associated with the original mmap which is attached to all VMAs which
874 * reference it, this region map represents those offsets which have consumed
875 * reservation ie. where pages have been instantiated.
877 static unsigned long get_vma_private_data(struct vm_area_struct
*vma
)
879 return (unsigned long)vma
->vm_private_data
;
882 static void set_vma_private_data(struct vm_area_struct
*vma
,
885 vma
->vm_private_data
= (void *)value
;
889 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map
*resv_map
,
890 struct hugetlb_cgroup
*h_cg
,
893 #ifdef CONFIG_CGROUP_HUGETLB
895 resv_map
->reservation_counter
= NULL
;
896 resv_map
->pages_per_hpage
= 0;
897 resv_map
->css
= NULL
;
899 resv_map
->reservation_counter
=
900 &h_cg
->rsvd_hugepage
[hstate_index(h
)];
901 resv_map
->pages_per_hpage
= pages_per_huge_page(h
);
902 resv_map
->css
= &h_cg
->css
;
907 struct resv_map
*resv_map_alloc(void)
909 struct resv_map
*resv_map
= kmalloc(sizeof(*resv_map
), GFP_KERNEL
);
910 struct file_region
*rg
= kmalloc(sizeof(*rg
), GFP_KERNEL
);
912 if (!resv_map
|| !rg
) {
918 kref_init(&resv_map
->refs
);
919 spin_lock_init(&resv_map
->lock
);
920 INIT_LIST_HEAD(&resv_map
->regions
);
922 resv_map
->adds_in_progress
= 0;
924 * Initialize these to 0. On shared mappings, 0's here indicate these
925 * fields don't do cgroup accounting. On private mappings, these will be
926 * re-initialized to the proper values, to indicate that hugetlb cgroup
927 * reservations are to be un-charged from here.
929 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, NULL
, NULL
);
931 INIT_LIST_HEAD(&resv_map
->region_cache
);
932 list_add(&rg
->link
, &resv_map
->region_cache
);
933 resv_map
->region_cache_count
= 1;
938 void resv_map_release(struct kref
*ref
)
940 struct resv_map
*resv_map
= container_of(ref
, struct resv_map
, refs
);
941 struct list_head
*head
= &resv_map
->region_cache
;
942 struct file_region
*rg
, *trg
;
944 /* Clear out any active regions before we release the map. */
945 region_del(resv_map
, 0, LONG_MAX
);
947 /* ... and any entries left in the cache */
948 list_for_each_entry_safe(rg
, trg
, head
, link
) {
953 VM_BUG_ON(resv_map
->adds_in_progress
);
958 static inline struct resv_map
*inode_resv_map(struct inode
*inode
)
961 * At inode evict time, i_mapping may not point to the original
962 * address space within the inode. This original address space
963 * contains the pointer to the resv_map. So, always use the
964 * address space embedded within the inode.
965 * The VERY common case is inode->mapping == &inode->i_data but,
966 * this may not be true for device special inodes.
968 return (struct resv_map
*)(&inode
->i_data
)->private_data
;
971 static struct resv_map
*vma_resv_map(struct vm_area_struct
*vma
)
973 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
974 if (vma
->vm_flags
& VM_MAYSHARE
) {
975 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
976 struct inode
*inode
= mapping
->host
;
978 return inode_resv_map(inode
);
981 return (struct resv_map
*)(get_vma_private_data(vma
) &
986 static void set_vma_resv_map(struct vm_area_struct
*vma
, struct resv_map
*map
)
988 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
989 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
991 set_vma_private_data(vma
, (get_vma_private_data(vma
) &
992 HPAGE_RESV_MASK
) | (unsigned long)map
);
995 static void set_vma_resv_flags(struct vm_area_struct
*vma
, unsigned long flags
)
997 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
998 VM_BUG_ON_VMA(vma
->vm_flags
& VM_MAYSHARE
, vma
);
1000 set_vma_private_data(vma
, get_vma_private_data(vma
) | flags
);
1003 static int is_vma_resv_set(struct vm_area_struct
*vma
, unsigned long flag
)
1005 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1007 return (get_vma_private_data(vma
) & flag
) != 0;
1010 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
1011 void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
)
1013 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma
), vma
);
1014 if (!(vma
->vm_flags
& VM_MAYSHARE
))
1015 vma
->vm_private_data
= (void *)0;
1019 * Reset and decrement one ref on hugepage private reservation.
1020 * Called with mm->mmap_sem writer semaphore held.
1021 * This function should be only used by move_vma() and operate on
1022 * same sized vma. It should never come here with last ref on the
1025 void clear_vma_resv_huge_pages(struct vm_area_struct
*vma
)
1028 * Clear the old hugetlb private page reservation.
1029 * It has already been transferred to new_vma.
1031 * During a mremap() operation of a hugetlb vma we call move_vma()
1032 * which copies vma into new_vma and unmaps vma. After the copy
1033 * operation both new_vma and vma share a reference to the resv_map
1034 * struct, and at that point vma is about to be unmapped. We don't
1035 * want to return the reservation to the pool at unmap of vma because
1036 * the reservation still lives on in new_vma, so simply decrement the
1037 * ref here and remove the resv_map reference from this vma.
1039 struct resv_map
*reservations
= vma_resv_map(vma
);
1041 if (reservations
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1042 resv_map_put_hugetlb_cgroup_uncharge_info(reservations
);
1043 kref_put(&reservations
->refs
, resv_map_release
);
1046 reset_vma_resv_huge_pages(vma
);
1049 /* Returns true if the VMA has associated reserve pages */
1050 static bool vma_has_reserves(struct vm_area_struct
*vma
, long chg
)
1052 if (vma
->vm_flags
& VM_NORESERVE
) {
1054 * This address is already reserved by other process(chg == 0),
1055 * so, we should decrement reserved count. Without decrementing,
1056 * reserve count remains after releasing inode, because this
1057 * allocated page will go into page cache and is regarded as
1058 * coming from reserved pool in releasing step. Currently, we
1059 * don't have any other solution to deal with this situation
1060 * properly, so add work-around here.
1062 if (vma
->vm_flags
& VM_MAYSHARE
&& chg
== 0)
1068 /* Shared mappings always use reserves */
1069 if (vma
->vm_flags
& VM_MAYSHARE
) {
1071 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1072 * be a region map for all pages. The only situation where
1073 * there is no region map is if a hole was punched via
1074 * fallocate. In this case, there really are no reserves to
1075 * use. This situation is indicated if chg != 0.
1084 * Only the process that called mmap() has reserves for
1087 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
1089 * Like the shared case above, a hole punch or truncate
1090 * could have been performed on the private mapping.
1091 * Examine the value of chg to determine if reserves
1092 * actually exist or were previously consumed.
1093 * Very Subtle - The value of chg comes from a previous
1094 * call to vma_needs_reserves(). The reserve map for
1095 * private mappings has different (opposite) semantics
1096 * than that of shared mappings. vma_needs_reserves()
1097 * has already taken this difference in semantics into
1098 * account. Therefore, the meaning of chg is the same
1099 * as in the shared case above. Code could easily be
1100 * combined, but keeping it separate draws attention to
1101 * subtle differences.
1112 static void enqueue_huge_page(struct hstate
*h
, struct page
*page
)
1114 int nid
= page_to_nid(page
);
1116 lockdep_assert_held(&hugetlb_lock
);
1117 VM_BUG_ON_PAGE(page_count(page
), page
);
1119 list_move(&page
->lru
, &h
->hugepage_freelists
[nid
]);
1120 h
->free_huge_pages
++;
1121 h
->free_huge_pages_node
[nid
]++;
1122 SetHPageFreed(page
);
1125 static struct page
*dequeue_huge_page_node_exact(struct hstate
*h
, int nid
)
1128 bool pin
= !!(current
->flags
& PF_MEMALLOC_PIN
);
1130 lockdep_assert_held(&hugetlb_lock
);
1131 list_for_each_entry(page
, &h
->hugepage_freelists
[nid
], lru
) {
1132 if (pin
&& !is_longterm_pinnable_page(page
))
1135 if (PageHWPoison(page
))
1138 list_move(&page
->lru
, &h
->hugepage_activelist
);
1139 set_page_refcounted(page
);
1140 ClearHPageFreed(page
);
1141 h
->free_huge_pages
--;
1142 h
->free_huge_pages_node
[nid
]--;
1149 static struct page
*dequeue_huge_page_nodemask(struct hstate
*h
, gfp_t gfp_mask
, int nid
,
1152 unsigned int cpuset_mems_cookie
;
1153 struct zonelist
*zonelist
;
1156 int node
= NUMA_NO_NODE
;
1158 zonelist
= node_zonelist(nid
, gfp_mask
);
1161 cpuset_mems_cookie
= read_mems_allowed_begin();
1162 for_each_zone_zonelist_nodemask(zone
, z
, zonelist
, gfp_zone(gfp_mask
), nmask
) {
1165 if (!cpuset_zone_allowed(zone
, gfp_mask
))
1168 * no need to ask again on the same node. Pool is node rather than
1171 if (zone_to_nid(zone
) == node
)
1173 node
= zone_to_nid(zone
);
1175 page
= dequeue_huge_page_node_exact(h
, node
);
1179 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie
)))
1185 static struct page
*dequeue_huge_page_vma(struct hstate
*h
,
1186 struct vm_area_struct
*vma
,
1187 unsigned long address
, int avoid_reserve
,
1190 struct page
*page
= NULL
;
1191 struct mempolicy
*mpol
;
1193 nodemask_t
*nodemask
;
1197 * A child process with MAP_PRIVATE mappings created by their parent
1198 * have no page reserves. This check ensures that reservations are
1199 * not "stolen". The child may still get SIGKILLed
1201 if (!vma_has_reserves(vma
, chg
) &&
1202 h
->free_huge_pages
- h
->resv_huge_pages
== 0)
1205 /* If reserves cannot be used, ensure enough pages are in the pool */
1206 if (avoid_reserve
&& h
->free_huge_pages
- h
->resv_huge_pages
== 0)
1209 gfp_mask
= htlb_alloc_mask(h
);
1210 nid
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
1212 if (mpol_is_preferred_many(mpol
)) {
1213 page
= dequeue_huge_page_nodemask(h
, gfp_mask
, nid
, nodemask
);
1215 /* Fallback to all nodes if page==NULL */
1220 page
= dequeue_huge_page_nodemask(h
, gfp_mask
, nid
, nodemask
);
1222 if (page
&& !avoid_reserve
&& vma_has_reserves(vma
, chg
)) {
1223 SetHPageRestoreReserve(page
);
1224 h
->resv_huge_pages
--;
1227 mpol_cond_put(mpol
);
1235 * common helper functions for hstate_next_node_to_{alloc|free}.
1236 * We may have allocated or freed a huge page based on a different
1237 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1238 * be outside of *nodes_allowed. Ensure that we use an allowed
1239 * node for alloc or free.
1241 static int next_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1243 nid
= next_node_in(nid
, *nodes_allowed
);
1244 VM_BUG_ON(nid
>= MAX_NUMNODES
);
1249 static int get_valid_node_allowed(int nid
, nodemask_t
*nodes_allowed
)
1251 if (!node_isset(nid
, *nodes_allowed
))
1252 nid
= next_node_allowed(nid
, nodes_allowed
);
1257 * returns the previously saved node ["this node"] from which to
1258 * allocate a persistent huge page for the pool and advance the
1259 * next node from which to allocate, handling wrap at end of node
1262 static int hstate_next_node_to_alloc(struct hstate
*h
,
1263 nodemask_t
*nodes_allowed
)
1267 VM_BUG_ON(!nodes_allowed
);
1269 nid
= get_valid_node_allowed(h
->next_nid_to_alloc
, nodes_allowed
);
1270 h
->next_nid_to_alloc
= next_node_allowed(nid
, nodes_allowed
);
1276 * helper for remove_pool_huge_page() - return the previously saved
1277 * node ["this node"] from which to free a huge page. Advance the
1278 * next node id whether or not we find a free huge page to free so
1279 * that the next attempt to free addresses the next node.
1281 static int hstate_next_node_to_free(struct hstate
*h
, nodemask_t
*nodes_allowed
)
1285 VM_BUG_ON(!nodes_allowed
);
1287 nid
= get_valid_node_allowed(h
->next_nid_to_free
, nodes_allowed
);
1288 h
->next_nid_to_free
= next_node_allowed(nid
, nodes_allowed
);
1293 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1294 for (nr_nodes = nodes_weight(*mask); \
1296 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1299 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1300 for (nr_nodes = nodes_weight(*mask); \
1302 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1305 /* used to demote non-gigantic_huge pages as well */
1306 static void __destroy_compound_gigantic_page(struct page
*page
,
1307 unsigned int order
, bool demote
)
1310 int nr_pages
= 1 << order
;
1311 struct page
*p
= page
+ 1;
1313 atomic_set(compound_mapcount_ptr(page
), 0);
1314 atomic_set(compound_pincount_ptr(page
), 0);
1316 for (i
= 1; i
< nr_pages
; i
++, p
= mem_map_next(p
, page
, i
)) {
1318 clear_compound_head(p
);
1320 set_page_refcounted(p
);
1323 set_compound_order(page
, 0);
1325 page
[1].compound_nr
= 0;
1327 __ClearPageHead(page
);
1330 static void destroy_compound_hugetlb_page_for_demote(struct page
*page
,
1333 __destroy_compound_gigantic_page(page
, order
, true);
1336 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1337 static void destroy_compound_gigantic_page(struct page
*page
,
1340 __destroy_compound_gigantic_page(page
, order
, false);
1343 static void free_gigantic_page(struct page
*page
, unsigned int order
)
1346 * If the page isn't allocated using the cma allocator,
1347 * cma_release() returns false.
1350 if (cma_release(hugetlb_cma
[page_to_nid(page
)], page
, 1 << order
))
1354 free_contig_range(page_to_pfn(page
), 1 << order
);
1357 #ifdef CONFIG_CONTIG_ALLOC
1358 static struct page
*alloc_gigantic_page(struct hstate
*h
, gfp_t gfp_mask
,
1359 int nid
, nodemask_t
*nodemask
)
1361 unsigned long nr_pages
= pages_per_huge_page(h
);
1362 if (nid
== NUMA_NO_NODE
)
1363 nid
= numa_mem_id();
1370 if (hugetlb_cma
[nid
]) {
1371 page
= cma_alloc(hugetlb_cma
[nid
], nr_pages
,
1372 huge_page_order(h
), true);
1377 if (!(gfp_mask
& __GFP_THISNODE
)) {
1378 for_each_node_mask(node
, *nodemask
) {
1379 if (node
== nid
|| !hugetlb_cma
[node
])
1382 page
= cma_alloc(hugetlb_cma
[node
], nr_pages
,
1383 huge_page_order(h
), true);
1391 return alloc_contig_pages(nr_pages
, gfp_mask
, nid
, nodemask
);
1394 #else /* !CONFIG_CONTIG_ALLOC */
1395 static struct page
*alloc_gigantic_page(struct hstate
*h
, gfp_t gfp_mask
,
1396 int nid
, nodemask_t
*nodemask
)
1400 #endif /* CONFIG_CONTIG_ALLOC */
1402 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1403 static struct page
*alloc_gigantic_page(struct hstate
*h
, gfp_t gfp_mask
,
1404 int nid
, nodemask_t
*nodemask
)
1408 static inline void free_gigantic_page(struct page
*page
, unsigned int order
) { }
1409 static inline void destroy_compound_gigantic_page(struct page
*page
,
1410 unsigned int order
) { }
1414 * Remove hugetlb page from lists, and update dtor so that page appears
1415 * as just a compound page.
1417 * A reference is held on the page, except in the case of demote.
1419 * Must be called with hugetlb lock held.
1421 static void __remove_hugetlb_page(struct hstate
*h
, struct page
*page
,
1422 bool adjust_surplus
,
1425 int nid
= page_to_nid(page
);
1427 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page
), page
);
1428 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page
), page
);
1430 lockdep_assert_held(&hugetlb_lock
);
1431 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1434 list_del(&page
->lru
);
1436 if (HPageFreed(page
)) {
1437 h
->free_huge_pages
--;
1438 h
->free_huge_pages_node
[nid
]--;
1440 if (adjust_surplus
) {
1441 h
->surplus_huge_pages
--;
1442 h
->surplus_huge_pages_node
[nid
]--;
1448 * For non-gigantic pages set the destructor to the normal compound
1449 * page dtor. This is needed in case someone takes an additional
1450 * temporary ref to the page, and freeing is delayed until they drop
1453 * For gigantic pages set the destructor to the null dtor. This
1454 * destructor will never be called. Before freeing the gigantic
1455 * page destroy_compound_gigantic_page will turn the compound page
1456 * into a simple group of pages. After this the destructor does not
1459 * This handles the case where more than one ref is held when and
1460 * after update_and_free_page is called.
1462 * In the case of demote we do not ref count the page as it will soon
1463 * be turned into a page of smaller size.
1466 set_page_refcounted(page
);
1467 if (hstate_is_gigantic(h
))
1468 set_compound_page_dtor(page
, NULL_COMPOUND_DTOR
);
1470 set_compound_page_dtor(page
, COMPOUND_PAGE_DTOR
);
1473 h
->nr_huge_pages_node
[nid
]--;
1476 static void remove_hugetlb_page(struct hstate
*h
, struct page
*page
,
1477 bool adjust_surplus
)
1479 __remove_hugetlb_page(h
, page
, adjust_surplus
, false);
1482 static void remove_hugetlb_page_for_demote(struct hstate
*h
, struct page
*page
,
1483 bool adjust_surplus
)
1485 __remove_hugetlb_page(h
, page
, adjust_surplus
, true);
1488 static void add_hugetlb_page(struct hstate
*h
, struct page
*page
,
1489 bool adjust_surplus
)
1492 int nid
= page_to_nid(page
);
1494 VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page
), page
);
1496 lockdep_assert_held(&hugetlb_lock
);
1498 INIT_LIST_HEAD(&page
->lru
);
1500 h
->nr_huge_pages_node
[nid
]++;
1502 if (adjust_surplus
) {
1503 h
->surplus_huge_pages
++;
1504 h
->surplus_huge_pages_node
[nid
]++;
1507 set_compound_page_dtor(page
, HUGETLB_PAGE_DTOR
);
1508 set_page_private(page
, 0);
1509 SetHPageVmemmapOptimized(page
);
1512 * This page is about to be managed by the hugetlb allocator and
1513 * should have no users. Drop our reference, and check for others
1516 zeroed
= put_page_testzero(page
);
1519 * It is VERY unlikely soneone else has taken a ref on
1520 * the page. In this case, we simply return as the
1521 * hugetlb destructor (free_huge_page) will be called
1522 * when this other ref is dropped.
1526 arch_clear_hugepage_flags(page
);
1527 enqueue_huge_page(h
, page
);
1530 static void __update_and_free_page(struct hstate
*h
, struct page
*page
)
1533 struct page
*subpage
= page
;
1535 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
1539 * If we don't know which subpages are hwpoisoned, we can't free
1540 * the hugepage, so it's leaked intentionally.
1542 if (HPageRawHwpUnreliable(page
))
1545 if (hugetlb_vmemmap_restore(h
, page
)) {
1546 spin_lock_irq(&hugetlb_lock
);
1548 * If we cannot allocate vmemmap pages, just refuse to free the
1549 * page and put the page back on the hugetlb free list and treat
1550 * as a surplus page.
1552 add_hugetlb_page(h
, page
, true);
1553 spin_unlock_irq(&hugetlb_lock
);
1558 * Move PageHWPoison flag from head page to the raw error pages,
1559 * which makes any healthy subpages reusable.
1561 if (unlikely(PageHWPoison(page
)))
1562 hugetlb_clear_page_hwpoison(page
);
1564 for (i
= 0; i
< pages_per_huge_page(h
);
1565 i
++, subpage
= mem_map_next(subpage
, page
, i
)) {
1566 subpage
->flags
&= ~(1 << PG_locked
| 1 << PG_error
|
1567 1 << PG_referenced
| 1 << PG_dirty
|
1568 1 << PG_active
| 1 << PG_private
|
1573 * Non-gigantic pages demoted from CMA allocated gigantic pages
1574 * need to be given back to CMA in free_gigantic_page.
1576 if (hstate_is_gigantic(h
) ||
1577 hugetlb_cma_page(page
, huge_page_order(h
))) {
1578 destroy_compound_gigantic_page(page
, huge_page_order(h
));
1579 free_gigantic_page(page
, huge_page_order(h
));
1581 __free_pages(page
, huge_page_order(h
));
1586 * As update_and_free_page() can be called under any context, so we cannot
1587 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1588 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1589 * the vmemmap pages.
1591 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1592 * freed and frees them one-by-one. As the page->mapping pointer is going
1593 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1594 * structure of a lockless linked list of huge pages to be freed.
1596 static LLIST_HEAD(hpage_freelist
);
1598 static void free_hpage_workfn(struct work_struct
*work
)
1600 struct llist_node
*node
;
1602 node
= llist_del_all(&hpage_freelist
);
1608 page
= container_of((struct address_space
**)node
,
1609 struct page
, mapping
);
1611 page
->mapping
= NULL
;
1613 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate()
1614 * is going to trigger because a previous call to
1615 * remove_hugetlb_page() will set_compound_page_dtor(page,
1616 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly.
1618 h
= size_to_hstate(page_size(page
));
1620 __update_and_free_page(h
, page
);
1625 static DECLARE_WORK(free_hpage_work
, free_hpage_workfn
);
1627 static inline void flush_free_hpage_work(struct hstate
*h
)
1629 if (hugetlb_vmemmap_optimizable(h
))
1630 flush_work(&free_hpage_work
);
1633 static void update_and_free_page(struct hstate
*h
, struct page
*page
,
1636 if (!HPageVmemmapOptimized(page
) || !atomic
) {
1637 __update_and_free_page(h
, page
);
1642 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1644 * Only call schedule_work() if hpage_freelist is previously
1645 * empty. Otherwise, schedule_work() had been called but the workfn
1646 * hasn't retrieved the list yet.
1648 if (llist_add((struct llist_node
*)&page
->mapping
, &hpage_freelist
))
1649 schedule_work(&free_hpage_work
);
1652 static void update_and_free_pages_bulk(struct hstate
*h
, struct list_head
*list
)
1654 struct page
*page
, *t_page
;
1656 list_for_each_entry_safe(page
, t_page
, list
, lru
) {
1657 update_and_free_page(h
, page
, false);
1662 struct hstate
*size_to_hstate(unsigned long size
)
1666 for_each_hstate(h
) {
1667 if (huge_page_size(h
) == size
)
1673 void free_huge_page(struct page
*page
)
1676 * Can't pass hstate in here because it is called from the
1677 * compound page destructor.
1679 struct hstate
*h
= page_hstate(page
);
1680 int nid
= page_to_nid(page
);
1681 struct hugepage_subpool
*spool
= hugetlb_page_subpool(page
);
1682 bool restore_reserve
;
1683 unsigned long flags
;
1685 VM_BUG_ON_PAGE(page_count(page
), page
);
1686 VM_BUG_ON_PAGE(page_mapcount(page
), page
);
1688 hugetlb_set_page_subpool(page
, NULL
);
1690 __ClearPageAnonExclusive(page
);
1691 page
->mapping
= NULL
;
1692 restore_reserve
= HPageRestoreReserve(page
);
1693 ClearHPageRestoreReserve(page
);
1696 * If HPageRestoreReserve was set on page, page allocation consumed a
1697 * reservation. If the page was associated with a subpool, there
1698 * would have been a page reserved in the subpool before allocation
1699 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1700 * reservation, do not call hugepage_subpool_put_pages() as this will
1701 * remove the reserved page from the subpool.
1703 if (!restore_reserve
) {
1705 * A return code of zero implies that the subpool will be
1706 * under its minimum size if the reservation is not restored
1707 * after page is free. Therefore, force restore_reserve
1710 if (hugepage_subpool_put_pages(spool
, 1) == 0)
1711 restore_reserve
= true;
1714 spin_lock_irqsave(&hugetlb_lock
, flags
);
1715 ClearHPageMigratable(page
);
1716 hugetlb_cgroup_uncharge_page(hstate_index(h
),
1717 pages_per_huge_page(h
), page
);
1718 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h
),
1719 pages_per_huge_page(h
), page
);
1720 if (restore_reserve
)
1721 h
->resv_huge_pages
++;
1723 if (HPageTemporary(page
)) {
1724 remove_hugetlb_page(h
, page
, false);
1725 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1726 update_and_free_page(h
, page
, true);
1727 } else if (h
->surplus_huge_pages_node
[nid
]) {
1728 /* remove the page from active list */
1729 remove_hugetlb_page(h
, page
, true);
1730 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1731 update_and_free_page(h
, page
, true);
1733 arch_clear_hugepage_flags(page
);
1734 enqueue_huge_page(h
, page
);
1735 spin_unlock_irqrestore(&hugetlb_lock
, flags
);
1740 * Must be called with the hugetlb lock held
1742 static void __prep_account_new_huge_page(struct hstate
*h
, int nid
)
1744 lockdep_assert_held(&hugetlb_lock
);
1746 h
->nr_huge_pages_node
[nid
]++;
1749 static void __prep_new_huge_page(struct hstate
*h
, struct page
*page
)
1751 hugetlb_vmemmap_optimize(h
, page
);
1752 INIT_LIST_HEAD(&page
->lru
);
1753 set_compound_page_dtor(page
, HUGETLB_PAGE_DTOR
);
1754 hugetlb_set_page_subpool(page
, NULL
);
1755 set_hugetlb_cgroup(page
, NULL
);
1756 set_hugetlb_cgroup_rsvd(page
, NULL
);
1759 static void prep_new_huge_page(struct hstate
*h
, struct page
*page
, int nid
)
1761 __prep_new_huge_page(h
, page
);
1762 spin_lock_irq(&hugetlb_lock
);
1763 __prep_account_new_huge_page(h
, nid
);
1764 spin_unlock_irq(&hugetlb_lock
);
1767 static bool __prep_compound_gigantic_page(struct page
*page
, unsigned int order
,
1771 int nr_pages
= 1 << order
;
1772 struct page
*p
= page
+ 1;
1774 /* we rely on prep_new_huge_page to set the destructor */
1775 set_compound_order(page
, order
);
1776 __ClearPageReserved(page
);
1777 __SetPageHead(page
);
1778 for (i
= 1; i
< nr_pages
; i
++, p
= mem_map_next(p
, page
, i
)) {
1780 * For gigantic hugepages allocated through bootmem at
1781 * boot, it's safer to be consistent with the not-gigantic
1782 * hugepages and clear the PG_reserved bit from all tail pages
1783 * too. Otherwise drivers using get_user_pages() to access tail
1784 * pages may get the reference counting wrong if they see
1785 * PG_reserved set on a tail page (despite the head page not
1786 * having PG_reserved set). Enforcing this consistency between
1787 * head and tail pages allows drivers to optimize away a check
1788 * on the head page when they need know if put_page() is needed
1789 * after get_user_pages().
1791 __ClearPageReserved(p
);
1793 * Subtle and very unlikely
1795 * Gigantic 'page allocators' such as memblock or cma will
1796 * return a set of pages with each page ref counted. We need
1797 * to turn this set of pages into a compound page with tail
1798 * page ref counts set to zero. Code such as speculative page
1799 * cache adding could take a ref on a 'to be' tail page.
1800 * We need to respect any increased ref count, and only set
1801 * the ref count to zero if count is currently 1. If count
1802 * is not 1, we return an error. An error return indicates
1803 * the set of pages can not be converted to a gigantic page.
1804 * The caller who allocated the pages should then discard the
1805 * pages using the appropriate free interface.
1807 * In the case of demote, the ref count will be zero.
1810 if (!page_ref_freeze(p
, 1)) {
1811 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
1815 VM_BUG_ON_PAGE(page_count(p
), p
);
1817 set_compound_head(p
, page
);
1819 atomic_set(compound_mapcount_ptr(page
), -1);
1820 atomic_set(compound_pincount_ptr(page
), 0);
1824 /* undo tail page modifications made above */
1826 for (j
= 1; j
< i
; j
++, p
= mem_map_next(p
, page
, j
)) {
1827 clear_compound_head(p
);
1828 set_page_refcounted(p
);
1830 /* need to clear PG_reserved on remaining tail pages */
1831 for (; j
< nr_pages
; j
++, p
= mem_map_next(p
, page
, j
))
1832 __ClearPageReserved(p
);
1833 set_compound_order(page
, 0);
1835 page
[1].compound_nr
= 0;
1837 __ClearPageHead(page
);
1841 static bool prep_compound_gigantic_page(struct page
*page
, unsigned int order
)
1843 return __prep_compound_gigantic_page(page
, order
, false);
1846 static bool prep_compound_gigantic_page_for_demote(struct page
*page
,
1849 return __prep_compound_gigantic_page(page
, order
, true);
1853 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1854 * transparent huge pages. See the PageTransHuge() documentation for more
1857 int PageHuge(struct page
*page
)
1859 if (!PageCompound(page
))
1862 page
= compound_head(page
);
1863 return page
[1].compound_dtor
== HUGETLB_PAGE_DTOR
;
1865 EXPORT_SYMBOL_GPL(PageHuge
);
1868 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1869 * normal or transparent huge pages.
1871 int PageHeadHuge(struct page
*page_head
)
1873 if (!PageHead(page_head
))
1876 return page_head
[1].compound_dtor
== HUGETLB_PAGE_DTOR
;
1878 EXPORT_SYMBOL_GPL(PageHeadHuge
);
1881 * Find and lock address space (mapping) in write mode.
1883 * Upon entry, the page is locked which means that page_mapping() is
1884 * stable. Due to locking order, we can only trylock_write. If we can
1885 * not get the lock, simply return NULL to caller.
1887 struct address_space
*hugetlb_page_mapping_lock_write(struct page
*hpage
)
1889 struct address_space
*mapping
= page_mapping(hpage
);
1894 if (i_mmap_trylock_write(mapping
))
1900 pgoff_t
hugetlb_basepage_index(struct page
*page
)
1902 struct page
*page_head
= compound_head(page
);
1903 pgoff_t index
= page_index(page_head
);
1904 unsigned long compound_idx
;
1906 if (compound_order(page_head
) >= MAX_ORDER
)
1907 compound_idx
= page_to_pfn(page
) - page_to_pfn(page_head
);
1909 compound_idx
= page
- page_head
;
1911 return (index
<< compound_order(page_head
)) + compound_idx
;
1914 static struct page
*alloc_buddy_huge_page(struct hstate
*h
,
1915 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
1916 nodemask_t
*node_alloc_noretry
)
1918 int order
= huge_page_order(h
);
1920 bool alloc_try_hard
= true;
1923 * By default we always try hard to allocate the page with
1924 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1925 * a loop (to adjust global huge page counts) and previous allocation
1926 * failed, do not continue to try hard on the same node. Use the
1927 * node_alloc_noretry bitmap to manage this state information.
1929 if (node_alloc_noretry
&& node_isset(nid
, *node_alloc_noretry
))
1930 alloc_try_hard
= false;
1931 gfp_mask
|= __GFP_COMP
|__GFP_NOWARN
;
1933 gfp_mask
|= __GFP_RETRY_MAYFAIL
;
1934 if (nid
== NUMA_NO_NODE
)
1935 nid
= numa_mem_id();
1936 page
= __alloc_pages(gfp_mask
, order
, nid
, nmask
);
1938 __count_vm_event(HTLB_BUDDY_PGALLOC
);
1940 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL
);
1943 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1944 * indicates an overall state change. Clear bit so that we resume
1945 * normal 'try hard' allocations.
1947 if (node_alloc_noretry
&& page
&& !alloc_try_hard
)
1948 node_clear(nid
, *node_alloc_noretry
);
1951 * If we tried hard to get a page but failed, set bit so that
1952 * subsequent attempts will not try as hard until there is an
1953 * overall state change.
1955 if (node_alloc_noretry
&& !page
&& alloc_try_hard
)
1956 node_set(nid
, *node_alloc_noretry
);
1962 * Common helper to allocate a fresh hugetlb page. All specific allocators
1963 * should use this function to get new hugetlb pages
1965 static struct page
*alloc_fresh_huge_page(struct hstate
*h
,
1966 gfp_t gfp_mask
, int nid
, nodemask_t
*nmask
,
1967 nodemask_t
*node_alloc_noretry
)
1973 if (hstate_is_gigantic(h
))
1974 page
= alloc_gigantic_page(h
, gfp_mask
, nid
, nmask
);
1976 page
= alloc_buddy_huge_page(h
, gfp_mask
,
1977 nid
, nmask
, node_alloc_noretry
);
1981 if (hstate_is_gigantic(h
)) {
1982 if (!prep_compound_gigantic_page(page
, huge_page_order(h
))) {
1984 * Rare failure to convert pages to compound page.
1985 * Free pages and try again - ONCE!
1987 free_gigantic_page(page
, huge_page_order(h
));
1995 prep_new_huge_page(h
, page
, page_to_nid(page
));
2001 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2004 static int alloc_pool_huge_page(struct hstate
*h
, nodemask_t
*nodes_allowed
,
2005 nodemask_t
*node_alloc_noretry
)
2009 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
2011 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
2012 page
= alloc_fresh_huge_page(h
, gfp_mask
, node
, nodes_allowed
,
2013 node_alloc_noretry
);
2021 put_page(page
); /* free it into the hugepage allocator */
2027 * Remove huge page from pool from next node to free. Attempt to keep
2028 * persistent huge pages more or less balanced over allowed nodes.
2029 * This routine only 'removes' the hugetlb page. The caller must make
2030 * an additional call to free the page to low level allocators.
2031 * Called with hugetlb_lock locked.
2033 static struct page
*remove_pool_huge_page(struct hstate
*h
,
2034 nodemask_t
*nodes_allowed
,
2038 struct page
*page
= NULL
;
2040 lockdep_assert_held(&hugetlb_lock
);
2041 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
2043 * If we're returning unused surplus pages, only examine
2044 * nodes with surplus pages.
2046 if ((!acct_surplus
|| h
->surplus_huge_pages_node
[node
]) &&
2047 !list_empty(&h
->hugepage_freelists
[node
])) {
2048 page
= list_entry(h
->hugepage_freelists
[node
].next
,
2050 remove_hugetlb_page(h
, page
, acct_surplus
);
2059 * Dissolve a given free hugepage into free buddy pages. This function does
2060 * nothing for in-use hugepages and non-hugepages.
2061 * This function returns values like below:
2063 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2064 * when the system is under memory pressure and the feature of
2065 * freeing unused vmemmap pages associated with each hugetlb page
2067 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2068 * (allocated or reserved.)
2069 * 0: successfully dissolved free hugepages or the page is not a
2070 * hugepage (considered as already dissolved)
2072 int dissolve_free_huge_page(struct page
*page
)
2077 /* Not to disrupt normal path by vainly holding hugetlb_lock */
2078 if (!PageHuge(page
))
2081 spin_lock_irq(&hugetlb_lock
);
2082 if (!PageHuge(page
)) {
2087 if (!page_count(page
)) {
2088 struct page
*head
= compound_head(page
);
2089 struct hstate
*h
= page_hstate(head
);
2090 if (h
->free_huge_pages
- h
->resv_huge_pages
== 0)
2094 * We should make sure that the page is already on the free list
2095 * when it is dissolved.
2097 if (unlikely(!HPageFreed(head
))) {
2098 spin_unlock_irq(&hugetlb_lock
);
2102 * Theoretically, we should return -EBUSY when we
2103 * encounter this race. In fact, we have a chance
2104 * to successfully dissolve the page if we do a
2105 * retry. Because the race window is quite small.
2106 * If we seize this opportunity, it is an optimization
2107 * for increasing the success rate of dissolving page.
2112 remove_hugetlb_page(h
, head
, false);
2113 h
->max_huge_pages
--;
2114 spin_unlock_irq(&hugetlb_lock
);
2117 * Normally update_and_free_page will allocate required vmemmmap
2118 * before freeing the page. update_and_free_page will fail to
2119 * free the page if it can not allocate required vmemmap. We
2120 * need to adjust max_huge_pages if the page is not freed.
2121 * Attempt to allocate vmemmmap here so that we can take
2122 * appropriate action on failure.
2124 rc
= hugetlb_vmemmap_restore(h
, head
);
2126 update_and_free_page(h
, head
, false);
2128 spin_lock_irq(&hugetlb_lock
);
2129 add_hugetlb_page(h
, head
, false);
2130 h
->max_huge_pages
++;
2131 spin_unlock_irq(&hugetlb_lock
);
2137 spin_unlock_irq(&hugetlb_lock
);
2142 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2143 * make specified memory blocks removable from the system.
2144 * Note that this will dissolve a free gigantic hugepage completely, if any
2145 * part of it lies within the given range.
2146 * Also note that if dissolve_free_huge_page() returns with an error, all
2147 * free hugepages that were dissolved before that error are lost.
2149 int dissolve_free_huge_pages(unsigned long start_pfn
, unsigned long end_pfn
)
2157 if (!hugepages_supported())
2160 order
= huge_page_order(&default_hstate
);
2162 order
= min(order
, huge_page_order(h
));
2164 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
+= 1 << order
) {
2165 page
= pfn_to_page(pfn
);
2166 rc
= dissolve_free_huge_page(page
);
2175 * Allocates a fresh surplus page from the page allocator.
2177 static struct page
*alloc_surplus_huge_page(struct hstate
*h
, gfp_t gfp_mask
,
2178 int nid
, nodemask_t
*nmask
, bool zero_ref
)
2180 struct page
*page
= NULL
;
2183 if (hstate_is_gigantic(h
))
2186 spin_lock_irq(&hugetlb_lock
);
2187 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
)
2189 spin_unlock_irq(&hugetlb_lock
);
2192 page
= alloc_fresh_huge_page(h
, gfp_mask
, nid
, nmask
, NULL
);
2196 spin_lock_irq(&hugetlb_lock
);
2198 * We could have raced with the pool size change.
2199 * Double check that and simply deallocate the new page
2200 * if we would end up overcommiting the surpluses. Abuse
2201 * temporary page to workaround the nasty free_huge_page
2204 if (h
->surplus_huge_pages
>= h
->nr_overcommit_huge_pages
) {
2205 SetHPageTemporary(page
);
2206 spin_unlock_irq(&hugetlb_lock
);
2213 * Caller requires a page with zero ref count.
2214 * We will drop ref count here. If someone else is holding
2215 * a ref, the page will be freed when they drop it. Abuse
2216 * temporary page flag to accomplish this.
2218 SetHPageTemporary(page
);
2219 if (!put_page_testzero(page
)) {
2221 * Unexpected inflated ref count on freshly allocated
2224 pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
2225 spin_unlock_irq(&hugetlb_lock
);
2232 ClearHPageTemporary(page
);
2235 h
->surplus_huge_pages
++;
2236 h
->surplus_huge_pages_node
[page_to_nid(page
)]++;
2239 spin_unlock_irq(&hugetlb_lock
);
2244 static struct page
*alloc_migrate_huge_page(struct hstate
*h
, gfp_t gfp_mask
,
2245 int nid
, nodemask_t
*nmask
)
2249 if (hstate_is_gigantic(h
))
2252 page
= alloc_fresh_huge_page(h
, gfp_mask
, nid
, nmask
, NULL
);
2257 * We do not account these pages as surplus because they are only
2258 * temporary and will be released properly on the last reference
2260 SetHPageTemporary(page
);
2266 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2269 struct page
*alloc_buddy_huge_page_with_mpol(struct hstate
*h
,
2270 struct vm_area_struct
*vma
, unsigned long addr
)
2272 struct page
*page
= NULL
;
2273 struct mempolicy
*mpol
;
2274 gfp_t gfp_mask
= htlb_alloc_mask(h
);
2276 nodemask_t
*nodemask
;
2278 nid
= huge_node(vma
, addr
, gfp_mask
, &mpol
, &nodemask
);
2279 if (mpol_is_preferred_many(mpol
)) {
2280 gfp_t gfp
= gfp_mask
| __GFP_NOWARN
;
2282 gfp
&= ~(__GFP_DIRECT_RECLAIM
| __GFP_NOFAIL
);
2283 page
= alloc_surplus_huge_page(h
, gfp
, nid
, nodemask
, false);
2285 /* Fallback to all nodes if page==NULL */
2290 page
= alloc_surplus_huge_page(h
, gfp_mask
, nid
, nodemask
, false);
2291 mpol_cond_put(mpol
);
2295 /* page migration callback function */
2296 struct page
*alloc_huge_page_nodemask(struct hstate
*h
, int preferred_nid
,
2297 nodemask_t
*nmask
, gfp_t gfp_mask
)
2299 spin_lock_irq(&hugetlb_lock
);
2300 if (h
->free_huge_pages
- h
->resv_huge_pages
> 0) {
2303 page
= dequeue_huge_page_nodemask(h
, gfp_mask
, preferred_nid
, nmask
);
2305 spin_unlock_irq(&hugetlb_lock
);
2309 spin_unlock_irq(&hugetlb_lock
);
2311 return alloc_migrate_huge_page(h
, gfp_mask
, preferred_nid
, nmask
);
2314 /* mempolicy aware migration callback */
2315 struct page
*alloc_huge_page_vma(struct hstate
*h
, struct vm_area_struct
*vma
,
2316 unsigned long address
)
2318 struct mempolicy
*mpol
;
2319 nodemask_t
*nodemask
;
2324 gfp_mask
= htlb_alloc_mask(h
);
2325 node
= huge_node(vma
, address
, gfp_mask
, &mpol
, &nodemask
);
2326 page
= alloc_huge_page_nodemask(h
, node
, nodemask
, gfp_mask
);
2327 mpol_cond_put(mpol
);
2333 * Increase the hugetlb pool such that it can accommodate a reservation
2336 static int gather_surplus_pages(struct hstate
*h
, long delta
)
2337 __must_hold(&hugetlb_lock
)
2339 struct list_head surplus_list
;
2340 struct page
*page
, *tmp
;
2343 long needed
, allocated
;
2344 bool alloc_ok
= true;
2346 lockdep_assert_held(&hugetlb_lock
);
2347 needed
= (h
->resv_huge_pages
+ delta
) - h
->free_huge_pages
;
2349 h
->resv_huge_pages
+= delta
;
2354 INIT_LIST_HEAD(&surplus_list
);
2358 spin_unlock_irq(&hugetlb_lock
);
2359 for (i
= 0; i
< needed
; i
++) {
2360 page
= alloc_surplus_huge_page(h
, htlb_alloc_mask(h
),
2361 NUMA_NO_NODE
, NULL
, true);
2366 list_add(&page
->lru
, &surplus_list
);
2372 * After retaking hugetlb_lock, we need to recalculate 'needed'
2373 * because either resv_huge_pages or free_huge_pages may have changed.
2375 spin_lock_irq(&hugetlb_lock
);
2376 needed
= (h
->resv_huge_pages
+ delta
) -
2377 (h
->free_huge_pages
+ allocated
);
2382 * We were not able to allocate enough pages to
2383 * satisfy the entire reservation so we free what
2384 * we've allocated so far.
2389 * The surplus_list now contains _at_least_ the number of extra pages
2390 * needed to accommodate the reservation. Add the appropriate number
2391 * of pages to the hugetlb pool and free the extras back to the buddy
2392 * allocator. Commit the entire reservation here to prevent another
2393 * process from stealing the pages as they are added to the pool but
2394 * before they are reserved.
2396 needed
+= allocated
;
2397 h
->resv_huge_pages
+= delta
;
2400 /* Free the needed pages to the hugetlb pool */
2401 list_for_each_entry_safe(page
, tmp
, &surplus_list
, lru
) {
2404 /* Add the page to the hugetlb allocator */
2405 enqueue_huge_page(h
, page
);
2408 spin_unlock_irq(&hugetlb_lock
);
2411 * Free unnecessary surplus pages to the buddy allocator.
2412 * Pages have no ref count, call free_huge_page directly.
2414 list_for_each_entry_safe(page
, tmp
, &surplus_list
, lru
)
2415 free_huge_page(page
);
2416 spin_lock_irq(&hugetlb_lock
);
2422 * This routine has two main purposes:
2423 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2424 * in unused_resv_pages. This corresponds to the prior adjustments made
2425 * to the associated reservation map.
2426 * 2) Free any unused surplus pages that may have been allocated to satisfy
2427 * the reservation. As many as unused_resv_pages may be freed.
2429 static void return_unused_surplus_pages(struct hstate
*h
,
2430 unsigned long unused_resv_pages
)
2432 unsigned long nr_pages
;
2434 LIST_HEAD(page_list
);
2436 lockdep_assert_held(&hugetlb_lock
);
2437 /* Uncommit the reservation */
2438 h
->resv_huge_pages
-= unused_resv_pages
;
2440 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
2444 * Part (or even all) of the reservation could have been backed
2445 * by pre-allocated pages. Only free surplus pages.
2447 nr_pages
= min(unused_resv_pages
, h
->surplus_huge_pages
);
2450 * We want to release as many surplus pages as possible, spread
2451 * evenly across all nodes with memory. Iterate across these nodes
2452 * until we can no longer free unreserved surplus pages. This occurs
2453 * when the nodes with surplus pages have no free pages.
2454 * remove_pool_huge_page() will balance the freed pages across the
2455 * on-line nodes with memory and will handle the hstate accounting.
2457 while (nr_pages
--) {
2458 page
= remove_pool_huge_page(h
, &node_states
[N_MEMORY
], 1);
2462 list_add(&page
->lru
, &page_list
);
2466 spin_unlock_irq(&hugetlb_lock
);
2467 update_and_free_pages_bulk(h
, &page_list
);
2468 spin_lock_irq(&hugetlb_lock
);
2473 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2474 * are used by the huge page allocation routines to manage reservations.
2476 * vma_needs_reservation is called to determine if the huge page at addr
2477 * within the vma has an associated reservation. If a reservation is
2478 * needed, the value 1 is returned. The caller is then responsible for
2479 * managing the global reservation and subpool usage counts. After
2480 * the huge page has been allocated, vma_commit_reservation is called
2481 * to add the page to the reservation map. If the page allocation fails,
2482 * the reservation must be ended instead of committed. vma_end_reservation
2483 * is called in such cases.
2485 * In the normal case, vma_commit_reservation returns the same value
2486 * as the preceding vma_needs_reservation call. The only time this
2487 * is not the case is if a reserve map was changed between calls. It
2488 * is the responsibility of the caller to notice the difference and
2489 * take appropriate action.
2491 * vma_add_reservation is used in error paths where a reservation must
2492 * be restored when a newly allocated huge page must be freed. It is
2493 * to be called after calling vma_needs_reservation to determine if a
2494 * reservation exists.
2496 * vma_del_reservation is used in error paths where an entry in the reserve
2497 * map was created during huge page allocation and must be removed. It is to
2498 * be called after calling vma_needs_reservation to determine if a reservation
2501 enum vma_resv_mode
{
2508 static long __vma_reservation_common(struct hstate
*h
,
2509 struct vm_area_struct
*vma
, unsigned long addr
,
2510 enum vma_resv_mode mode
)
2512 struct resv_map
*resv
;
2515 long dummy_out_regions_needed
;
2517 resv
= vma_resv_map(vma
);
2521 idx
= vma_hugecache_offset(h
, vma
, addr
);
2523 case VMA_NEEDS_RESV
:
2524 ret
= region_chg(resv
, idx
, idx
+ 1, &dummy_out_regions_needed
);
2525 /* We assume that vma_reservation_* routines always operate on
2526 * 1 page, and that adding to resv map a 1 page entry can only
2527 * ever require 1 region.
2529 VM_BUG_ON(dummy_out_regions_needed
!= 1);
2531 case VMA_COMMIT_RESV
:
2532 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2533 /* region_add calls of range 1 should never fail. */
2537 region_abort(resv
, idx
, idx
+ 1, 1);
2541 if (vma
->vm_flags
& VM_MAYSHARE
) {
2542 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2543 /* region_add calls of range 1 should never fail. */
2546 region_abort(resv
, idx
, idx
+ 1, 1);
2547 ret
= region_del(resv
, idx
, idx
+ 1);
2551 if (vma
->vm_flags
& VM_MAYSHARE
) {
2552 region_abort(resv
, idx
, idx
+ 1, 1);
2553 ret
= region_del(resv
, idx
, idx
+ 1);
2555 ret
= region_add(resv
, idx
, idx
+ 1, 1, NULL
, NULL
);
2556 /* region_add calls of range 1 should never fail. */
2564 if (vma
->vm_flags
& VM_MAYSHARE
|| mode
== VMA_DEL_RESV
)
2567 * We know private mapping must have HPAGE_RESV_OWNER set.
2569 * In most cases, reserves always exist for private mappings.
2570 * However, a file associated with mapping could have been
2571 * hole punched or truncated after reserves were consumed.
2572 * As subsequent fault on such a range will not use reserves.
2573 * Subtle - The reserve map for private mappings has the
2574 * opposite meaning than that of shared mappings. If NO
2575 * entry is in the reserve map, it means a reservation exists.
2576 * If an entry exists in the reserve map, it means the
2577 * reservation has already been consumed. As a result, the
2578 * return value of this routine is the opposite of the
2579 * value returned from reserve map manipulation routines above.
2588 static long vma_needs_reservation(struct hstate
*h
,
2589 struct vm_area_struct
*vma
, unsigned long addr
)
2591 return __vma_reservation_common(h
, vma
, addr
, VMA_NEEDS_RESV
);
2594 static long vma_commit_reservation(struct hstate
*h
,
2595 struct vm_area_struct
*vma
, unsigned long addr
)
2597 return __vma_reservation_common(h
, vma
, addr
, VMA_COMMIT_RESV
);
2600 static void vma_end_reservation(struct hstate
*h
,
2601 struct vm_area_struct
*vma
, unsigned long addr
)
2603 (void)__vma_reservation_common(h
, vma
, addr
, VMA_END_RESV
);
2606 static long vma_add_reservation(struct hstate
*h
,
2607 struct vm_area_struct
*vma
, unsigned long addr
)
2609 return __vma_reservation_common(h
, vma
, addr
, VMA_ADD_RESV
);
2612 static long vma_del_reservation(struct hstate
*h
,
2613 struct vm_area_struct
*vma
, unsigned long addr
)
2615 return __vma_reservation_common(h
, vma
, addr
, VMA_DEL_RESV
);
2619 * This routine is called to restore reservation information on error paths.
2620 * It should ONLY be called for pages allocated via alloc_huge_page(), and
2621 * the hugetlb mutex should remain held when calling this routine.
2623 * It handles two specific cases:
2624 * 1) A reservation was in place and the page consumed the reservation.
2625 * HPageRestoreReserve is set in the page.
2626 * 2) No reservation was in place for the page, so HPageRestoreReserve is
2627 * not set. However, alloc_huge_page always updates the reserve map.
2629 * In case 1, free_huge_page later in the error path will increment the
2630 * global reserve count. But, free_huge_page does not have enough context
2631 * to adjust the reservation map. This case deals primarily with private
2632 * mappings. Adjust the reserve map here to be consistent with global
2633 * reserve count adjustments to be made by free_huge_page. Make sure the
2634 * reserve map indicates there is a reservation present.
2636 * In case 2, simply undo reserve map modifications done by alloc_huge_page.
2638 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
2639 unsigned long address
, struct page
*page
)
2641 long rc
= vma_needs_reservation(h
, vma
, address
);
2643 if (HPageRestoreReserve(page
)) {
2644 if (unlikely(rc
< 0))
2646 * Rare out of memory condition in reserve map
2647 * manipulation. Clear HPageRestoreReserve so that
2648 * global reserve count will not be incremented
2649 * by free_huge_page. This will make it appear
2650 * as though the reservation for this page was
2651 * consumed. This may prevent the task from
2652 * faulting in the page at a later time. This
2653 * is better than inconsistent global huge page
2654 * accounting of reserve counts.
2656 ClearHPageRestoreReserve(page
);
2658 (void)vma_add_reservation(h
, vma
, address
);
2660 vma_end_reservation(h
, vma
, address
);
2664 * This indicates there is an entry in the reserve map
2665 * not added by alloc_huge_page. We know it was added
2666 * before the alloc_huge_page call, otherwise
2667 * HPageRestoreReserve would be set on the page.
2668 * Remove the entry so that a subsequent allocation
2669 * does not consume a reservation.
2671 rc
= vma_del_reservation(h
, vma
, address
);
2674 * VERY rare out of memory condition. Since
2675 * we can not delete the entry, set
2676 * HPageRestoreReserve so that the reserve
2677 * count will be incremented when the page
2678 * is freed. This reserve will be consumed
2679 * on a subsequent allocation.
2681 SetHPageRestoreReserve(page
);
2682 } else if (rc
< 0) {
2684 * Rare out of memory condition from
2685 * vma_needs_reservation call. Memory allocation is
2686 * only attempted if a new entry is needed. Therefore,
2687 * this implies there is not an entry in the
2690 * For shared mappings, no entry in the map indicates
2691 * no reservation. We are done.
2693 if (!(vma
->vm_flags
& VM_MAYSHARE
))
2695 * For private mappings, no entry indicates
2696 * a reservation is present. Since we can
2697 * not add an entry, set SetHPageRestoreReserve
2698 * on the page so reserve count will be
2699 * incremented when freed. This reserve will
2700 * be consumed on a subsequent allocation.
2702 SetHPageRestoreReserve(page
);
2705 * No reservation present, do nothing
2707 vma_end_reservation(h
, vma
, address
);
2712 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
2713 * @h: struct hstate old page belongs to
2714 * @old_page: Old page to dissolve
2715 * @list: List to isolate the page in case we need to
2716 * Returns 0 on success, otherwise negated error.
2718 static int alloc_and_dissolve_huge_page(struct hstate
*h
, struct page
*old_page
,
2719 struct list_head
*list
)
2721 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
2722 int nid
= page_to_nid(old_page
);
2723 bool alloc_retry
= false;
2724 struct page
*new_page
;
2728 * Before dissolving the page, we need to allocate a new one for the
2729 * pool to remain stable. Here, we allocate the page and 'prep' it
2730 * by doing everything but actually updating counters and adding to
2731 * the pool. This simplifies and let us do most of the processing
2735 new_page
= alloc_buddy_huge_page(h
, gfp_mask
, nid
, NULL
, NULL
);
2739 * If all goes well, this page will be directly added to the free
2740 * list in the pool. For this the ref count needs to be zero.
2741 * Attempt to drop now, and retry once if needed. It is VERY
2742 * unlikely there is another ref on the page.
2744 * If someone else has a reference to the page, it will be freed
2745 * when they drop their ref. Abuse temporary page flag to accomplish
2746 * this. Retry once if there is an inflated ref count.
2748 SetHPageTemporary(new_page
);
2749 if (!put_page_testzero(new_page
)) {
2756 ClearHPageTemporary(new_page
);
2758 __prep_new_huge_page(h
, new_page
);
2761 spin_lock_irq(&hugetlb_lock
);
2762 if (!PageHuge(old_page
)) {
2764 * Freed from under us. Drop new_page too.
2767 } else if (page_count(old_page
)) {
2769 * Someone has grabbed the page, try to isolate it here.
2770 * Fail with -EBUSY if not possible.
2772 spin_unlock_irq(&hugetlb_lock
);
2773 ret
= isolate_hugetlb(old_page
, list
);
2774 spin_lock_irq(&hugetlb_lock
);
2776 } else if (!HPageFreed(old_page
)) {
2778 * Page's refcount is 0 but it has not been enqueued in the
2779 * freelist yet. Race window is small, so we can succeed here if
2782 spin_unlock_irq(&hugetlb_lock
);
2787 * Ok, old_page is still a genuine free hugepage. Remove it from
2788 * the freelist and decrease the counters. These will be
2789 * incremented again when calling __prep_account_new_huge_page()
2790 * and enqueue_huge_page() for new_page. The counters will remain
2791 * stable since this happens under the lock.
2793 remove_hugetlb_page(h
, old_page
, false);
2796 * Ref count on new page is already zero as it was dropped
2797 * earlier. It can be directly added to the pool free list.
2799 __prep_account_new_huge_page(h
, nid
);
2800 enqueue_huge_page(h
, new_page
);
2803 * Pages have been replaced, we can safely free the old one.
2805 spin_unlock_irq(&hugetlb_lock
);
2806 update_and_free_page(h
, old_page
, false);
2812 spin_unlock_irq(&hugetlb_lock
);
2813 /* Page has a zero ref count, but needs a ref to be freed */
2814 set_page_refcounted(new_page
);
2815 update_and_free_page(h
, new_page
, false);
2820 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
)
2827 * The page might have been dissolved from under our feet, so make sure
2828 * to carefully check the state under the lock.
2829 * Return success when racing as if we dissolved the page ourselves.
2831 spin_lock_irq(&hugetlb_lock
);
2832 if (PageHuge(page
)) {
2833 head
= compound_head(page
);
2834 h
= page_hstate(head
);
2836 spin_unlock_irq(&hugetlb_lock
);
2839 spin_unlock_irq(&hugetlb_lock
);
2842 * Fence off gigantic pages as there is a cyclic dependency between
2843 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2844 * of bailing out right away without further retrying.
2846 if (hstate_is_gigantic(h
))
2849 if (page_count(head
) && !isolate_hugetlb(head
, list
))
2851 else if (!page_count(head
))
2852 ret
= alloc_and_dissolve_huge_page(h
, head
, list
);
2857 struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
2858 unsigned long addr
, int avoid_reserve
)
2860 struct hugepage_subpool
*spool
= subpool_vma(vma
);
2861 struct hstate
*h
= hstate_vma(vma
);
2863 long map_chg
, map_commit
;
2866 struct hugetlb_cgroup
*h_cg
;
2867 bool deferred_reserve
;
2869 idx
= hstate_index(h
);
2871 * Examine the region/reserve map to determine if the process
2872 * has a reservation for the page to be allocated. A return
2873 * code of zero indicates a reservation exists (no change).
2875 map_chg
= gbl_chg
= vma_needs_reservation(h
, vma
, addr
);
2877 return ERR_PTR(-ENOMEM
);
2880 * Processes that did not create the mapping will have no
2881 * reserves as indicated by the region/reserve map. Check
2882 * that the allocation will not exceed the subpool limit.
2883 * Allocations for MAP_NORESERVE mappings also need to be
2884 * checked against any subpool limit.
2886 if (map_chg
|| avoid_reserve
) {
2887 gbl_chg
= hugepage_subpool_get_pages(spool
, 1);
2889 vma_end_reservation(h
, vma
, addr
);
2890 return ERR_PTR(-ENOSPC
);
2894 * Even though there was no reservation in the region/reserve
2895 * map, there could be reservations associated with the
2896 * subpool that can be used. This would be indicated if the
2897 * return value of hugepage_subpool_get_pages() is zero.
2898 * However, if avoid_reserve is specified we still avoid even
2899 * the subpool reservations.
2905 /* If this allocation is not consuming a reservation, charge it now.
2907 deferred_reserve
= map_chg
|| avoid_reserve
;
2908 if (deferred_reserve
) {
2909 ret
= hugetlb_cgroup_charge_cgroup_rsvd(
2910 idx
, pages_per_huge_page(h
), &h_cg
);
2912 goto out_subpool_put
;
2915 ret
= hugetlb_cgroup_charge_cgroup(idx
, pages_per_huge_page(h
), &h_cg
);
2917 goto out_uncharge_cgroup_reservation
;
2919 spin_lock_irq(&hugetlb_lock
);
2921 * glb_chg is passed to indicate whether or not a page must be taken
2922 * from the global free pool (global change). gbl_chg == 0 indicates
2923 * a reservation exists for the allocation.
2925 page
= dequeue_huge_page_vma(h
, vma
, addr
, avoid_reserve
, gbl_chg
);
2927 spin_unlock_irq(&hugetlb_lock
);
2928 page
= alloc_buddy_huge_page_with_mpol(h
, vma
, addr
);
2930 goto out_uncharge_cgroup
;
2931 if (!avoid_reserve
&& vma_has_reserves(vma
, gbl_chg
)) {
2932 SetHPageRestoreReserve(page
);
2933 h
->resv_huge_pages
--;
2935 spin_lock_irq(&hugetlb_lock
);
2936 list_add(&page
->lru
, &h
->hugepage_activelist
);
2939 hugetlb_cgroup_commit_charge(idx
, pages_per_huge_page(h
), h_cg
, page
);
2940 /* If allocation is not consuming a reservation, also store the
2941 * hugetlb_cgroup pointer on the page.
2943 if (deferred_reserve
) {
2944 hugetlb_cgroup_commit_charge_rsvd(idx
, pages_per_huge_page(h
),
2948 spin_unlock_irq(&hugetlb_lock
);
2950 hugetlb_set_page_subpool(page
, spool
);
2952 map_commit
= vma_commit_reservation(h
, vma
, addr
);
2953 if (unlikely(map_chg
> map_commit
)) {
2955 * The page was added to the reservation map between
2956 * vma_needs_reservation and vma_commit_reservation.
2957 * This indicates a race with hugetlb_reserve_pages.
2958 * Adjust for the subpool count incremented above AND
2959 * in hugetlb_reserve_pages for the same page. Also,
2960 * the reservation count added in hugetlb_reserve_pages
2961 * no longer applies.
2965 rsv_adjust
= hugepage_subpool_put_pages(spool
, 1);
2966 hugetlb_acct_memory(h
, -rsv_adjust
);
2967 if (deferred_reserve
)
2968 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h
),
2969 pages_per_huge_page(h
), page
);
2973 out_uncharge_cgroup
:
2974 hugetlb_cgroup_uncharge_cgroup(idx
, pages_per_huge_page(h
), h_cg
);
2975 out_uncharge_cgroup_reservation
:
2976 if (deferred_reserve
)
2977 hugetlb_cgroup_uncharge_cgroup_rsvd(idx
, pages_per_huge_page(h
),
2980 if (map_chg
|| avoid_reserve
)
2981 hugepage_subpool_put_pages(spool
, 1);
2982 vma_end_reservation(h
, vma
, addr
);
2983 return ERR_PTR(-ENOSPC
);
2986 int alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
2987 __attribute__ ((weak
, alias("__alloc_bootmem_huge_page")));
2988 int __alloc_bootmem_huge_page(struct hstate
*h
, int nid
)
2990 struct huge_bootmem_page
*m
= NULL
; /* initialize for clang */
2993 /* do node specific alloc */
2994 if (nid
!= NUMA_NO_NODE
) {
2995 m
= memblock_alloc_try_nid_raw(huge_page_size(h
), huge_page_size(h
),
2996 0, MEMBLOCK_ALLOC_ACCESSIBLE
, nid
);
3001 /* allocate from next node when distributing huge pages */
3002 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, &node_states
[N_MEMORY
]) {
3003 m
= memblock_alloc_try_nid_raw(
3004 huge_page_size(h
), huge_page_size(h
),
3005 0, MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
3007 * Use the beginning of the huge page to store the
3008 * huge_bootmem_page struct (until gather_bootmem
3009 * puts them into the mem_map).
3017 /* Put them into a private list first because mem_map is not up yet */
3018 INIT_LIST_HEAD(&m
->list
);
3019 list_add(&m
->list
, &huge_boot_pages
);
3025 * Put bootmem huge pages into the standard lists after mem_map is up.
3026 * Note: This only applies to gigantic (order > MAX_ORDER) pages.
3028 static void __init
gather_bootmem_prealloc(void)
3030 struct huge_bootmem_page
*m
;
3032 list_for_each_entry(m
, &huge_boot_pages
, list
) {
3033 struct page
*page
= virt_to_page(m
);
3034 struct hstate
*h
= m
->hstate
;
3036 VM_BUG_ON(!hstate_is_gigantic(h
));
3037 WARN_ON(page_count(page
) != 1);
3038 if (prep_compound_gigantic_page(page
, huge_page_order(h
))) {
3039 WARN_ON(PageReserved(page
));
3040 prep_new_huge_page(h
, page
, page_to_nid(page
));
3041 put_page(page
); /* add to the hugepage allocator */
3043 /* VERY unlikely inflated ref count on a tail page */
3044 free_gigantic_page(page
, huge_page_order(h
));
3048 * We need to restore the 'stolen' pages to totalram_pages
3049 * in order to fix confusing memory reports from free(1) and
3050 * other side-effects, like CommitLimit going negative.
3052 adjust_managed_page_count(page
, pages_per_huge_page(h
));
3056 static void __init
hugetlb_hstate_alloc_pages_onenode(struct hstate
*h
, int nid
)
3061 for (i
= 0; i
< h
->max_huge_pages_node
[nid
]; ++i
) {
3062 if (hstate_is_gigantic(h
)) {
3063 if (!alloc_bootmem_huge_page(h
, nid
))
3067 gfp_t gfp_mask
= htlb_alloc_mask(h
) | __GFP_THISNODE
;
3069 page
= alloc_fresh_huge_page(h
, gfp_mask
, nid
,
3070 &node_states
[N_MEMORY
], NULL
);
3073 put_page(page
); /* free it into the hugepage allocator */
3077 if (i
== h
->max_huge_pages_node
[nid
])
3080 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3081 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3082 h
->max_huge_pages_node
[nid
], buf
, nid
, i
);
3083 h
->max_huge_pages
-= (h
->max_huge_pages_node
[nid
] - i
);
3084 h
->max_huge_pages_node
[nid
] = i
;
3087 static void __init
hugetlb_hstate_alloc_pages(struct hstate
*h
)
3090 nodemask_t
*node_alloc_noretry
;
3091 bool node_specific_alloc
= false;
3093 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3094 if (hstate_is_gigantic(h
) && hugetlb_cma_size
) {
3095 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3099 /* do node specific alloc */
3100 for_each_online_node(i
) {
3101 if (h
->max_huge_pages_node
[i
] > 0) {
3102 hugetlb_hstate_alloc_pages_onenode(h
, i
);
3103 node_specific_alloc
= true;
3107 if (node_specific_alloc
)
3110 /* below will do all node balanced alloc */
3111 if (!hstate_is_gigantic(h
)) {
3113 * Bit mask controlling how hard we retry per-node allocations.
3114 * Ignore errors as lower level routines can deal with
3115 * node_alloc_noretry == NULL. If this kmalloc fails at boot
3116 * time, we are likely in bigger trouble.
3118 node_alloc_noretry
= kmalloc(sizeof(*node_alloc_noretry
),
3121 /* allocations done at boot time */
3122 node_alloc_noretry
= NULL
;
3125 /* bit mask controlling how hard we retry per-node allocations */
3126 if (node_alloc_noretry
)
3127 nodes_clear(*node_alloc_noretry
);
3129 for (i
= 0; i
< h
->max_huge_pages
; ++i
) {
3130 if (hstate_is_gigantic(h
)) {
3131 if (!alloc_bootmem_huge_page(h
, NUMA_NO_NODE
))
3133 } else if (!alloc_pool_huge_page(h
,
3134 &node_states
[N_MEMORY
],
3135 node_alloc_noretry
))
3139 if (i
< h
->max_huge_pages
) {
3142 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3143 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3144 h
->max_huge_pages
, buf
, i
);
3145 h
->max_huge_pages
= i
;
3147 kfree(node_alloc_noretry
);
3150 static void __init
hugetlb_init_hstates(void)
3152 struct hstate
*h
, *h2
;
3154 for_each_hstate(h
) {
3155 /* oversize hugepages were init'ed in early boot */
3156 if (!hstate_is_gigantic(h
))
3157 hugetlb_hstate_alloc_pages(h
);
3160 * Set demote order for each hstate. Note that
3161 * h->demote_order is initially 0.
3162 * - We can not demote gigantic pages if runtime freeing
3163 * is not supported, so skip this.
3164 * - If CMA allocation is possible, we can not demote
3165 * HUGETLB_PAGE_ORDER or smaller size pages.
3167 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
3169 if (hugetlb_cma_size
&& h
->order
<= HUGETLB_PAGE_ORDER
)
3171 for_each_hstate(h2
) {
3174 if (h2
->order
< h
->order
&&
3175 h2
->order
> h
->demote_order
)
3176 h
->demote_order
= h2
->order
;
3181 static void __init
report_hugepages(void)
3185 for_each_hstate(h
) {
3188 string_get_size(huge_page_size(h
), 1, STRING_UNITS_2
, buf
, 32);
3189 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3190 buf
, h
->free_huge_pages
);
3191 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3192 hugetlb_vmemmap_optimizable_size(h
) / SZ_1K
, buf
);
3196 #ifdef CONFIG_HIGHMEM
3197 static void try_to_free_low(struct hstate
*h
, unsigned long count
,
3198 nodemask_t
*nodes_allowed
)
3201 LIST_HEAD(page_list
);
3203 lockdep_assert_held(&hugetlb_lock
);
3204 if (hstate_is_gigantic(h
))
3208 * Collect pages to be freed on a list, and free after dropping lock
3210 for_each_node_mask(i
, *nodes_allowed
) {
3211 struct page
*page
, *next
;
3212 struct list_head
*freel
= &h
->hugepage_freelists
[i
];
3213 list_for_each_entry_safe(page
, next
, freel
, lru
) {
3214 if (count
>= h
->nr_huge_pages
)
3216 if (PageHighMem(page
))
3218 remove_hugetlb_page(h
, page
, false);
3219 list_add(&page
->lru
, &page_list
);
3224 spin_unlock_irq(&hugetlb_lock
);
3225 update_and_free_pages_bulk(h
, &page_list
);
3226 spin_lock_irq(&hugetlb_lock
);
3229 static inline void try_to_free_low(struct hstate
*h
, unsigned long count
,
3230 nodemask_t
*nodes_allowed
)
3236 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3237 * balanced by operating on them in a round-robin fashion.
3238 * Returns 1 if an adjustment was made.
3240 static int adjust_pool_surplus(struct hstate
*h
, nodemask_t
*nodes_allowed
,
3245 lockdep_assert_held(&hugetlb_lock
);
3246 VM_BUG_ON(delta
!= -1 && delta
!= 1);
3249 for_each_node_mask_to_alloc(h
, nr_nodes
, node
, nodes_allowed
) {
3250 if (h
->surplus_huge_pages_node
[node
])
3254 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3255 if (h
->surplus_huge_pages_node
[node
] <
3256 h
->nr_huge_pages_node
[node
])
3263 h
->surplus_huge_pages
+= delta
;
3264 h
->surplus_huge_pages_node
[node
] += delta
;
3268 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3269 static int set_max_huge_pages(struct hstate
*h
, unsigned long count
, int nid
,
3270 nodemask_t
*nodes_allowed
)
3272 unsigned long min_count
, ret
;
3274 LIST_HEAD(page_list
);
3275 NODEMASK_ALLOC(nodemask_t
, node_alloc_noretry
, GFP_KERNEL
);
3278 * Bit mask controlling how hard we retry per-node allocations.
3279 * If we can not allocate the bit mask, do not attempt to allocate
3280 * the requested huge pages.
3282 if (node_alloc_noretry
)
3283 nodes_clear(*node_alloc_noretry
);
3288 * resize_lock mutex prevents concurrent adjustments to number of
3289 * pages in hstate via the proc/sysfs interfaces.
3291 mutex_lock(&h
->resize_lock
);
3292 flush_free_hpage_work(h
);
3293 spin_lock_irq(&hugetlb_lock
);
3296 * Check for a node specific request.
3297 * Changing node specific huge page count may require a corresponding
3298 * change to the global count. In any case, the passed node mask
3299 * (nodes_allowed) will restrict alloc/free to the specified node.
3301 if (nid
!= NUMA_NO_NODE
) {
3302 unsigned long old_count
= count
;
3304 count
+= h
->nr_huge_pages
- h
->nr_huge_pages_node
[nid
];
3306 * User may have specified a large count value which caused the
3307 * above calculation to overflow. In this case, they wanted
3308 * to allocate as many huge pages as possible. Set count to
3309 * largest possible value to align with their intention.
3311 if (count
< old_count
)
3316 * Gigantic pages runtime allocation depend on the capability for large
3317 * page range allocation.
3318 * If the system does not provide this feature, return an error when
3319 * the user tries to allocate gigantic pages but let the user free the
3320 * boottime allocated gigantic pages.
3322 if (hstate_is_gigantic(h
) && !IS_ENABLED(CONFIG_CONTIG_ALLOC
)) {
3323 if (count
> persistent_huge_pages(h
)) {
3324 spin_unlock_irq(&hugetlb_lock
);
3325 mutex_unlock(&h
->resize_lock
);
3326 NODEMASK_FREE(node_alloc_noretry
);
3329 /* Fall through to decrease pool */
3333 * Increase the pool size
3334 * First take pages out of surplus state. Then make up the
3335 * remaining difference by allocating fresh huge pages.
3337 * We might race with alloc_surplus_huge_page() here and be unable
3338 * to convert a surplus huge page to a normal huge page. That is
3339 * not critical, though, it just means the overall size of the
3340 * pool might be one hugepage larger than it needs to be, but
3341 * within all the constraints specified by the sysctls.
3343 while (h
->surplus_huge_pages
&& count
> persistent_huge_pages(h
)) {
3344 if (!adjust_pool_surplus(h
, nodes_allowed
, -1))
3348 while (count
> persistent_huge_pages(h
)) {
3350 * If this allocation races such that we no longer need the
3351 * page, free_huge_page will handle it by freeing the page
3352 * and reducing the surplus.
3354 spin_unlock_irq(&hugetlb_lock
);
3356 /* yield cpu to avoid soft lockup */
3359 ret
= alloc_pool_huge_page(h
, nodes_allowed
,
3360 node_alloc_noretry
);
3361 spin_lock_irq(&hugetlb_lock
);
3365 /* Bail for signals. Probably ctrl-c from user */
3366 if (signal_pending(current
))
3371 * Decrease the pool size
3372 * First return free pages to the buddy allocator (being careful
3373 * to keep enough around to satisfy reservations). Then place
3374 * pages into surplus state as needed so the pool will shrink
3375 * to the desired size as pages become free.
3377 * By placing pages into the surplus state independent of the
3378 * overcommit value, we are allowing the surplus pool size to
3379 * exceed overcommit. There are few sane options here. Since
3380 * alloc_surplus_huge_page() is checking the global counter,
3381 * though, we'll note that we're not allowed to exceed surplus
3382 * and won't grow the pool anywhere else. Not until one of the
3383 * sysctls are changed, or the surplus pages go out of use.
3385 min_count
= h
->resv_huge_pages
+ h
->nr_huge_pages
- h
->free_huge_pages
;
3386 min_count
= max(count
, min_count
);
3387 try_to_free_low(h
, min_count
, nodes_allowed
);
3390 * Collect pages to be removed on list without dropping lock
3392 while (min_count
< persistent_huge_pages(h
)) {
3393 page
= remove_pool_huge_page(h
, nodes_allowed
, 0);
3397 list_add(&page
->lru
, &page_list
);
3399 /* free the pages after dropping lock */
3400 spin_unlock_irq(&hugetlb_lock
);
3401 update_and_free_pages_bulk(h
, &page_list
);
3402 flush_free_hpage_work(h
);
3403 spin_lock_irq(&hugetlb_lock
);
3405 while (count
< persistent_huge_pages(h
)) {
3406 if (!adjust_pool_surplus(h
, nodes_allowed
, 1))
3410 h
->max_huge_pages
= persistent_huge_pages(h
);
3411 spin_unlock_irq(&hugetlb_lock
);
3412 mutex_unlock(&h
->resize_lock
);
3414 NODEMASK_FREE(node_alloc_noretry
);
3419 static int demote_free_huge_page(struct hstate
*h
, struct page
*page
)
3421 int i
, nid
= page_to_nid(page
);
3422 struct hstate
*target_hstate
;
3425 target_hstate
= size_to_hstate(PAGE_SIZE
<< h
->demote_order
);
3427 remove_hugetlb_page_for_demote(h
, page
, false);
3428 spin_unlock_irq(&hugetlb_lock
);
3430 rc
= hugetlb_vmemmap_restore(h
, page
);
3432 /* Allocation of vmemmmap failed, we can not demote page */
3433 spin_lock_irq(&hugetlb_lock
);
3434 set_page_refcounted(page
);
3435 add_hugetlb_page(h
, page
, false);
3440 * Use destroy_compound_hugetlb_page_for_demote for all huge page
3441 * sizes as it will not ref count pages.
3443 destroy_compound_hugetlb_page_for_demote(page
, huge_page_order(h
));
3446 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3447 * Without the mutex, pages added to target hstate could be marked
3450 * Note that we already hold h->resize_lock. To prevent deadlock,
3451 * use the convention of always taking larger size hstate mutex first.
3453 mutex_lock(&target_hstate
->resize_lock
);
3454 for (i
= 0; i
< pages_per_huge_page(h
);
3455 i
+= pages_per_huge_page(target_hstate
)) {
3456 if (hstate_is_gigantic(target_hstate
))
3457 prep_compound_gigantic_page_for_demote(page
+ i
,
3458 target_hstate
->order
);
3460 prep_compound_page(page
+ i
, target_hstate
->order
);
3461 set_page_private(page
+ i
, 0);
3462 set_page_refcounted(page
+ i
);
3463 prep_new_huge_page(target_hstate
, page
+ i
, nid
);
3466 mutex_unlock(&target_hstate
->resize_lock
);
3468 spin_lock_irq(&hugetlb_lock
);
3471 * Not absolutely necessary, but for consistency update max_huge_pages
3472 * based on pool changes for the demoted page.
3474 h
->max_huge_pages
--;
3475 target_hstate
->max_huge_pages
+= pages_per_huge_page(h
);
3480 static int demote_pool_huge_page(struct hstate
*h
, nodemask_t
*nodes_allowed
)
3481 __must_hold(&hugetlb_lock
)
3486 lockdep_assert_held(&hugetlb_lock
);
3488 /* We should never get here if no demote order */
3489 if (!h
->demote_order
) {
3490 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3491 return -EINVAL
; /* internal error */
3494 for_each_node_mask_to_free(h
, nr_nodes
, node
, nodes_allowed
) {
3495 list_for_each_entry(page
, &h
->hugepage_freelists
[node
], lru
) {
3496 if (PageHWPoison(page
))
3499 return demote_free_huge_page(h
, page
);
3504 * Only way to get here is if all pages on free lists are poisoned.
3505 * Return -EBUSY so that caller will not retry.
3510 #define HSTATE_ATTR_RO(_name) \
3511 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3513 #define HSTATE_ATTR_WO(_name) \
3514 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3516 #define HSTATE_ATTR(_name) \
3517 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3519 static struct kobject
*hugepages_kobj
;
3520 static struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
3522 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
);
3524 static struct hstate
*kobj_to_hstate(struct kobject
*kobj
, int *nidp
)
3528 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
3529 if (hstate_kobjs
[i
] == kobj
) {
3531 *nidp
= NUMA_NO_NODE
;
3535 return kobj_to_node_hstate(kobj
, nidp
);
3538 static ssize_t
nr_hugepages_show_common(struct kobject
*kobj
,
3539 struct kobj_attribute
*attr
, char *buf
)
3542 unsigned long nr_huge_pages
;
3545 h
= kobj_to_hstate(kobj
, &nid
);
3546 if (nid
== NUMA_NO_NODE
)
3547 nr_huge_pages
= h
->nr_huge_pages
;
3549 nr_huge_pages
= h
->nr_huge_pages_node
[nid
];
3551 return sysfs_emit(buf
, "%lu\n", nr_huge_pages
);
3554 static ssize_t
__nr_hugepages_store_common(bool obey_mempolicy
,
3555 struct hstate
*h
, int nid
,
3556 unsigned long count
, size_t len
)
3559 nodemask_t nodes_allowed
, *n_mask
;
3561 if (hstate_is_gigantic(h
) && !gigantic_page_runtime_supported())
3564 if (nid
== NUMA_NO_NODE
) {
3566 * global hstate attribute
3568 if (!(obey_mempolicy
&&
3569 init_nodemask_of_mempolicy(&nodes_allowed
)))
3570 n_mask
= &node_states
[N_MEMORY
];
3572 n_mask
= &nodes_allowed
;
3575 * Node specific request. count adjustment happens in
3576 * set_max_huge_pages() after acquiring hugetlb_lock.
3578 init_nodemask_of_node(&nodes_allowed
, nid
);
3579 n_mask
= &nodes_allowed
;
3582 err
= set_max_huge_pages(h
, count
, nid
, n_mask
);
3584 return err
? err
: len
;
3587 static ssize_t
nr_hugepages_store_common(bool obey_mempolicy
,
3588 struct kobject
*kobj
, const char *buf
,
3592 unsigned long count
;
3596 err
= kstrtoul(buf
, 10, &count
);
3600 h
= kobj_to_hstate(kobj
, &nid
);
3601 return __nr_hugepages_store_common(obey_mempolicy
, h
, nid
, count
, len
);
3604 static ssize_t
nr_hugepages_show(struct kobject
*kobj
,
3605 struct kobj_attribute
*attr
, char *buf
)
3607 return nr_hugepages_show_common(kobj
, attr
, buf
);
3610 static ssize_t
nr_hugepages_store(struct kobject
*kobj
,
3611 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3613 return nr_hugepages_store_common(false, kobj
, buf
, len
);
3615 HSTATE_ATTR(nr_hugepages
);
3620 * hstate attribute for optionally mempolicy-based constraint on persistent
3621 * huge page alloc/free.
3623 static ssize_t
nr_hugepages_mempolicy_show(struct kobject
*kobj
,
3624 struct kobj_attribute
*attr
,
3627 return nr_hugepages_show_common(kobj
, attr
, buf
);
3630 static ssize_t
nr_hugepages_mempolicy_store(struct kobject
*kobj
,
3631 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3633 return nr_hugepages_store_common(true, kobj
, buf
, len
);
3635 HSTATE_ATTR(nr_hugepages_mempolicy
);
3639 static ssize_t
nr_overcommit_hugepages_show(struct kobject
*kobj
,
3640 struct kobj_attribute
*attr
, char *buf
)
3642 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3643 return sysfs_emit(buf
, "%lu\n", h
->nr_overcommit_huge_pages
);
3646 static ssize_t
nr_overcommit_hugepages_store(struct kobject
*kobj
,
3647 struct kobj_attribute
*attr
, const char *buf
, size_t count
)
3650 unsigned long input
;
3651 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3653 if (hstate_is_gigantic(h
))
3656 err
= kstrtoul(buf
, 10, &input
);
3660 spin_lock_irq(&hugetlb_lock
);
3661 h
->nr_overcommit_huge_pages
= input
;
3662 spin_unlock_irq(&hugetlb_lock
);
3666 HSTATE_ATTR(nr_overcommit_hugepages
);
3668 static ssize_t
free_hugepages_show(struct kobject
*kobj
,
3669 struct kobj_attribute
*attr
, char *buf
)
3672 unsigned long free_huge_pages
;
3675 h
= kobj_to_hstate(kobj
, &nid
);
3676 if (nid
== NUMA_NO_NODE
)
3677 free_huge_pages
= h
->free_huge_pages
;
3679 free_huge_pages
= h
->free_huge_pages_node
[nid
];
3681 return sysfs_emit(buf
, "%lu\n", free_huge_pages
);
3683 HSTATE_ATTR_RO(free_hugepages
);
3685 static ssize_t
resv_hugepages_show(struct kobject
*kobj
,
3686 struct kobj_attribute
*attr
, char *buf
)
3688 struct hstate
*h
= kobj_to_hstate(kobj
, NULL
);
3689 return sysfs_emit(buf
, "%lu\n", h
->resv_huge_pages
);
3691 HSTATE_ATTR_RO(resv_hugepages
);
3693 static ssize_t
surplus_hugepages_show(struct kobject
*kobj
,
3694 struct kobj_attribute
*attr
, char *buf
)
3697 unsigned long surplus_huge_pages
;
3700 h
= kobj_to_hstate(kobj
, &nid
);
3701 if (nid
== NUMA_NO_NODE
)
3702 surplus_huge_pages
= h
->surplus_huge_pages
;
3704 surplus_huge_pages
= h
->surplus_huge_pages_node
[nid
];
3706 return sysfs_emit(buf
, "%lu\n", surplus_huge_pages
);
3708 HSTATE_ATTR_RO(surplus_hugepages
);
3710 static ssize_t
demote_store(struct kobject
*kobj
,
3711 struct kobj_attribute
*attr
, const char *buf
, size_t len
)
3713 unsigned long nr_demote
;
3714 unsigned long nr_available
;
3715 nodemask_t nodes_allowed
, *n_mask
;
3720 err
= kstrtoul(buf
, 10, &nr_demote
);
3723 h
= kobj_to_hstate(kobj
, &nid
);
3725 if (nid
!= NUMA_NO_NODE
) {
3726 init_nodemask_of_node(&nodes_allowed
, nid
);
3727 n_mask
= &nodes_allowed
;
3729 n_mask
= &node_states
[N_MEMORY
];
3732 /* Synchronize with other sysfs operations modifying huge pages */
3733 mutex_lock(&h
->resize_lock
);
3734 spin_lock_irq(&hugetlb_lock
);
3738 * Check for available pages to demote each time thorough the
3739 * loop as demote_pool_huge_page will drop hugetlb_lock.
3741 if (nid
!= NUMA_NO_NODE
)
3742 nr_available
= h
->free_huge_pages_node
[nid
];
3744 nr_available
= h
->free_huge_pages
;
3745 nr_available
-= h
->resv_huge_pages
;
3749 err
= demote_pool_huge_page(h
, n_mask
);
3756 spin_unlock_irq(&hugetlb_lock
);
3757 mutex_unlock(&h
->resize_lock
);
3763 HSTATE_ATTR_WO(demote
);
3765 static ssize_t
demote_size_show(struct kobject
*kobj
,
3766 struct kobj_attribute
*attr
, char *buf
)
3769 struct hstate
*h
= kobj_to_hstate(kobj
, &nid
);
3770 unsigned long demote_size
= (PAGE_SIZE
<< h
->demote_order
) / SZ_1K
;
3772 return sysfs_emit(buf
, "%lukB\n", demote_size
);
3775 static ssize_t
demote_size_store(struct kobject
*kobj
,
3776 struct kobj_attribute
*attr
,
3777 const char *buf
, size_t count
)
3779 struct hstate
*h
, *demote_hstate
;
3780 unsigned long demote_size
;
3781 unsigned int demote_order
;
3784 demote_size
= (unsigned long)memparse(buf
, NULL
);
3786 demote_hstate
= size_to_hstate(demote_size
);
3789 demote_order
= demote_hstate
->order
;
3790 if (demote_order
< HUGETLB_PAGE_ORDER
)
3793 /* demote order must be smaller than hstate order */
3794 h
= kobj_to_hstate(kobj
, &nid
);
3795 if (demote_order
>= h
->order
)
3798 /* resize_lock synchronizes access to demote size and writes */
3799 mutex_lock(&h
->resize_lock
);
3800 h
->demote_order
= demote_order
;
3801 mutex_unlock(&h
->resize_lock
);
3805 HSTATE_ATTR(demote_size
);
3807 static struct attribute
*hstate_attrs
[] = {
3808 &nr_hugepages_attr
.attr
,
3809 &nr_overcommit_hugepages_attr
.attr
,
3810 &free_hugepages_attr
.attr
,
3811 &resv_hugepages_attr
.attr
,
3812 &surplus_hugepages_attr
.attr
,
3814 &nr_hugepages_mempolicy_attr
.attr
,
3819 static const struct attribute_group hstate_attr_group
= {
3820 .attrs
= hstate_attrs
,
3823 static struct attribute
*hstate_demote_attrs
[] = {
3824 &demote_size_attr
.attr
,
3829 static const struct attribute_group hstate_demote_attr_group
= {
3830 .attrs
= hstate_demote_attrs
,
3833 static int hugetlb_sysfs_add_hstate(struct hstate
*h
, struct kobject
*parent
,
3834 struct kobject
**hstate_kobjs
,
3835 const struct attribute_group
*hstate_attr_group
)
3838 int hi
= hstate_index(h
);
3840 hstate_kobjs
[hi
] = kobject_create_and_add(h
->name
, parent
);
3841 if (!hstate_kobjs
[hi
])
3844 retval
= sysfs_create_group(hstate_kobjs
[hi
], hstate_attr_group
);
3846 kobject_put(hstate_kobjs
[hi
]);
3847 hstate_kobjs
[hi
] = NULL
;
3850 if (h
->demote_order
) {
3851 if (sysfs_create_group(hstate_kobjs
[hi
],
3852 &hstate_demote_attr_group
))
3853 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h
->name
);
3859 static void __init
hugetlb_sysfs_init(void)
3864 hugepages_kobj
= kobject_create_and_add("hugepages", mm_kobj
);
3865 if (!hugepages_kobj
)
3868 for_each_hstate(h
) {
3869 err
= hugetlb_sysfs_add_hstate(h
, hugepages_kobj
,
3870 hstate_kobjs
, &hstate_attr_group
);
3872 pr_err("HugeTLB: Unable to add hstate %s", h
->name
);
3879 * node_hstate/s - associate per node hstate attributes, via their kobjects,
3880 * with node devices in node_devices[] using a parallel array. The array
3881 * index of a node device or _hstate == node id.
3882 * This is here to avoid any static dependency of the node device driver, in
3883 * the base kernel, on the hugetlb module.
3885 struct node_hstate
{
3886 struct kobject
*hugepages_kobj
;
3887 struct kobject
*hstate_kobjs
[HUGE_MAX_HSTATE
];
3889 static struct node_hstate node_hstates
[MAX_NUMNODES
];
3892 * A subset of global hstate attributes for node devices
3894 static struct attribute
*per_node_hstate_attrs
[] = {
3895 &nr_hugepages_attr
.attr
,
3896 &free_hugepages_attr
.attr
,
3897 &surplus_hugepages_attr
.attr
,
3901 static const struct attribute_group per_node_hstate_attr_group
= {
3902 .attrs
= per_node_hstate_attrs
,
3906 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3907 * Returns node id via non-NULL nidp.
3909 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
3913 for (nid
= 0; nid
< nr_node_ids
; nid
++) {
3914 struct node_hstate
*nhs
= &node_hstates
[nid
];
3916 for (i
= 0; i
< HUGE_MAX_HSTATE
; i
++)
3917 if (nhs
->hstate_kobjs
[i
] == kobj
) {
3929 * Unregister hstate attributes from a single node device.
3930 * No-op if no hstate attributes attached.
3932 static void hugetlb_unregister_node(struct node
*node
)
3935 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
3937 if (!nhs
->hugepages_kobj
)
3938 return; /* no hstate attributes */
3940 for_each_hstate(h
) {
3941 int idx
= hstate_index(h
);
3942 if (nhs
->hstate_kobjs
[idx
]) {
3943 kobject_put(nhs
->hstate_kobjs
[idx
]);
3944 nhs
->hstate_kobjs
[idx
] = NULL
;
3948 kobject_put(nhs
->hugepages_kobj
);
3949 nhs
->hugepages_kobj
= NULL
;
3954 * Register hstate attributes for a single node device.
3955 * No-op if attributes already registered.
3957 static void hugetlb_register_node(struct node
*node
)
3960 struct node_hstate
*nhs
= &node_hstates
[node
->dev
.id
];
3963 if (nhs
->hugepages_kobj
)
3964 return; /* already allocated */
3966 nhs
->hugepages_kobj
= kobject_create_and_add("hugepages",
3968 if (!nhs
->hugepages_kobj
)
3971 for_each_hstate(h
) {
3972 err
= hugetlb_sysfs_add_hstate(h
, nhs
->hugepages_kobj
,
3974 &per_node_hstate_attr_group
);
3976 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3977 h
->name
, node
->dev
.id
);
3978 hugetlb_unregister_node(node
);
3985 * hugetlb init time: register hstate attributes for all registered node
3986 * devices of nodes that have memory. All on-line nodes should have
3987 * registered their associated device by this time.
3989 static void __init
hugetlb_register_all_nodes(void)
3993 for_each_node_state(nid
, N_MEMORY
) {
3994 struct node
*node
= node_devices
[nid
];
3995 if (node
->dev
.id
== nid
)
3996 hugetlb_register_node(node
);
4000 * Let the node device driver know we're here so it can
4001 * [un]register hstate attributes on node hotplug.
4003 register_hugetlbfs_with_node(hugetlb_register_node
,
4004 hugetlb_unregister_node
);
4006 #else /* !CONFIG_NUMA */
4008 static struct hstate
*kobj_to_node_hstate(struct kobject
*kobj
, int *nidp
)
4016 static void hugetlb_register_all_nodes(void) { }
4020 static int __init
hugetlb_init(void)
4024 BUILD_BUG_ON(sizeof_field(struct page
, private) * BITS_PER_BYTE
<
4027 if (!hugepages_supported()) {
4028 if (hugetlb_max_hstate
|| default_hstate_max_huge_pages
)
4029 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4034 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4035 * architectures depend on setup being done here.
4037 hugetlb_add_hstate(HUGETLB_PAGE_ORDER
);
4038 if (!parsed_default_hugepagesz
) {
4040 * If we did not parse a default huge page size, set
4041 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4042 * number of huge pages for this default size was implicitly
4043 * specified, set that here as well.
4044 * Note that the implicit setting will overwrite an explicit
4045 * setting. A warning will be printed in this case.
4047 default_hstate_idx
= hstate_index(size_to_hstate(HPAGE_SIZE
));
4048 if (default_hstate_max_huge_pages
) {
4049 if (default_hstate
.max_huge_pages
) {
4052 string_get_size(huge_page_size(&default_hstate
),
4053 1, STRING_UNITS_2
, buf
, 32);
4054 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4055 default_hstate
.max_huge_pages
, buf
);
4056 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4057 default_hstate_max_huge_pages
);
4059 default_hstate
.max_huge_pages
=
4060 default_hstate_max_huge_pages
;
4062 for_each_online_node(i
)
4063 default_hstate
.max_huge_pages_node
[i
] =
4064 default_hugepages_in_node
[i
];
4068 hugetlb_cma_check();
4069 hugetlb_init_hstates();
4070 gather_bootmem_prealloc();
4073 hugetlb_sysfs_init();
4074 hugetlb_register_all_nodes();
4075 hugetlb_cgroup_file_init();
4078 num_fault_mutexes
= roundup_pow_of_two(8 * num_possible_cpus());
4080 num_fault_mutexes
= 1;
4082 hugetlb_fault_mutex_table
=
4083 kmalloc_array(num_fault_mutexes
, sizeof(struct mutex
),
4085 BUG_ON(!hugetlb_fault_mutex_table
);
4087 for (i
= 0; i
< num_fault_mutexes
; i
++)
4088 mutex_init(&hugetlb_fault_mutex_table
[i
]);
4091 subsys_initcall(hugetlb_init
);
4093 /* Overwritten by architectures with more huge page sizes */
4094 bool __init
__attribute((weak
)) arch_hugetlb_valid_size(unsigned long size
)
4096 return size
== HPAGE_SIZE
;
4099 void __init
hugetlb_add_hstate(unsigned int order
)
4104 if (size_to_hstate(PAGE_SIZE
<< order
)) {
4107 BUG_ON(hugetlb_max_hstate
>= HUGE_MAX_HSTATE
);
4109 h
= &hstates
[hugetlb_max_hstate
++];
4110 mutex_init(&h
->resize_lock
);
4112 h
->mask
= ~(huge_page_size(h
) - 1);
4113 for (i
= 0; i
< MAX_NUMNODES
; ++i
)
4114 INIT_LIST_HEAD(&h
->hugepage_freelists
[i
]);
4115 INIT_LIST_HEAD(&h
->hugepage_activelist
);
4116 h
->next_nid_to_alloc
= first_memory_node
;
4117 h
->next_nid_to_free
= first_memory_node
;
4118 snprintf(h
->name
, HSTATE_NAME_LEN
, "hugepages-%lukB",
4119 huge_page_size(h
)/1024);
4124 bool __init __weak
hugetlb_node_alloc_supported(void)
4129 static void __init
hugepages_clear_pages_in_node(void)
4131 if (!hugetlb_max_hstate
) {
4132 default_hstate_max_huge_pages
= 0;
4133 memset(default_hugepages_in_node
, 0,
4134 MAX_NUMNODES
* sizeof(unsigned int));
4136 parsed_hstate
->max_huge_pages
= 0;
4137 memset(parsed_hstate
->max_huge_pages_node
, 0,
4138 MAX_NUMNODES
* sizeof(unsigned int));
4143 * hugepages command line processing
4144 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4145 * specification. If not, ignore the hugepages value. hugepages can also
4146 * be the first huge page command line option in which case it implicitly
4147 * specifies the number of huge pages for the default size.
4149 static int __init
hugepages_setup(char *s
)
4152 static unsigned long *last_mhp
;
4153 int node
= NUMA_NO_NODE
;
4158 if (!parsed_valid_hugepagesz
) {
4159 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s
);
4160 parsed_valid_hugepagesz
= true;
4165 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4166 * yet, so this hugepages= parameter goes to the "default hstate".
4167 * Otherwise, it goes with the previously parsed hugepagesz or
4168 * default_hugepagesz.
4170 else if (!hugetlb_max_hstate
)
4171 mhp
= &default_hstate_max_huge_pages
;
4173 mhp
= &parsed_hstate
->max_huge_pages
;
4175 if (mhp
== last_mhp
) {
4176 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s
);
4182 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4184 /* Parameter is node format */
4185 if (p
[count
] == ':') {
4186 if (!hugetlb_node_alloc_supported()) {
4187 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4190 if (tmp
>= MAX_NUMNODES
|| !node_online(tmp
))
4192 node
= array_index_nospec(tmp
, MAX_NUMNODES
);
4194 /* Parse hugepages */
4195 if (sscanf(p
, "%lu%n", &tmp
, &count
) != 1)
4197 if (!hugetlb_max_hstate
)
4198 default_hugepages_in_node
[node
] = tmp
;
4200 parsed_hstate
->max_huge_pages_node
[node
] = tmp
;
4202 /* Go to parse next node*/
4203 if (p
[count
] == ',')
4216 * Global state is always initialized later in hugetlb_init.
4217 * But we need to allocate gigantic hstates here early to still
4218 * use the bootmem allocator.
4220 if (hugetlb_max_hstate
&& hstate_is_gigantic(parsed_hstate
))
4221 hugetlb_hstate_alloc_pages(parsed_hstate
);
4228 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p
);
4229 hugepages_clear_pages_in_node();
4232 __setup("hugepages=", hugepages_setup
);
4235 * hugepagesz command line processing
4236 * A specific huge page size can only be specified once with hugepagesz.
4237 * hugepagesz is followed by hugepages on the command line. The global
4238 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4239 * hugepagesz argument was valid.
4241 static int __init
hugepagesz_setup(char *s
)
4246 parsed_valid_hugepagesz
= false;
4247 size
= (unsigned long)memparse(s
, NULL
);
4249 if (!arch_hugetlb_valid_size(size
)) {
4250 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s
);
4254 h
= size_to_hstate(size
);
4257 * hstate for this size already exists. This is normally
4258 * an error, but is allowed if the existing hstate is the
4259 * default hstate. More specifically, it is only allowed if
4260 * the number of huge pages for the default hstate was not
4261 * previously specified.
4263 if (!parsed_default_hugepagesz
|| h
!= &default_hstate
||
4264 default_hstate
.max_huge_pages
) {
4265 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s
);
4270 * No need to call hugetlb_add_hstate() as hstate already
4271 * exists. But, do set parsed_hstate so that a following
4272 * hugepages= parameter will be applied to this hstate.
4275 parsed_valid_hugepagesz
= true;
4279 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4280 parsed_valid_hugepagesz
= true;
4283 __setup("hugepagesz=", hugepagesz_setup
);
4286 * default_hugepagesz command line input
4287 * Only one instance of default_hugepagesz allowed on command line.
4289 static int __init
default_hugepagesz_setup(char *s
)
4294 parsed_valid_hugepagesz
= false;
4295 if (parsed_default_hugepagesz
) {
4296 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s
);
4300 size
= (unsigned long)memparse(s
, NULL
);
4302 if (!arch_hugetlb_valid_size(size
)) {
4303 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s
);
4307 hugetlb_add_hstate(ilog2(size
) - PAGE_SHIFT
);
4308 parsed_valid_hugepagesz
= true;
4309 parsed_default_hugepagesz
= true;
4310 default_hstate_idx
= hstate_index(size_to_hstate(size
));
4313 * The number of default huge pages (for this size) could have been
4314 * specified as the first hugetlb parameter: hugepages=X. If so,
4315 * then default_hstate_max_huge_pages is set. If the default huge
4316 * page size is gigantic (>= MAX_ORDER), then the pages must be
4317 * allocated here from bootmem allocator.
4319 if (default_hstate_max_huge_pages
) {
4320 default_hstate
.max_huge_pages
= default_hstate_max_huge_pages
;
4321 for_each_online_node(i
)
4322 default_hstate
.max_huge_pages_node
[i
] =
4323 default_hugepages_in_node
[i
];
4324 if (hstate_is_gigantic(&default_hstate
))
4325 hugetlb_hstate_alloc_pages(&default_hstate
);
4326 default_hstate_max_huge_pages
= 0;
4331 __setup("default_hugepagesz=", default_hugepagesz_setup
);
4333 static unsigned int allowed_mems_nr(struct hstate
*h
)
4336 unsigned int nr
= 0;
4337 nodemask_t
*mpol_allowed
;
4338 unsigned int *array
= h
->free_huge_pages_node
;
4339 gfp_t gfp_mask
= htlb_alloc_mask(h
);
4341 mpol_allowed
= policy_nodemask_current(gfp_mask
);
4343 for_each_node_mask(node
, cpuset_current_mems_allowed
) {
4344 if (!mpol_allowed
|| node_isset(node
, *mpol_allowed
))
4351 #ifdef CONFIG_SYSCTL
4352 static int proc_hugetlb_doulongvec_minmax(struct ctl_table
*table
, int write
,
4353 void *buffer
, size_t *length
,
4354 loff_t
*ppos
, unsigned long *out
)
4356 struct ctl_table dup_table
;
4359 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4360 * can duplicate the @table and alter the duplicate of it.
4363 dup_table
.data
= out
;
4365 return proc_doulongvec_minmax(&dup_table
, write
, buffer
, length
, ppos
);
4368 static int hugetlb_sysctl_handler_common(bool obey_mempolicy
,
4369 struct ctl_table
*table
, int write
,
4370 void *buffer
, size_t *length
, loff_t
*ppos
)
4372 struct hstate
*h
= &default_hstate
;
4373 unsigned long tmp
= h
->max_huge_pages
;
4376 if (!hugepages_supported())
4379 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4385 ret
= __nr_hugepages_store_common(obey_mempolicy
, h
,
4386 NUMA_NO_NODE
, tmp
, *length
);
4391 int hugetlb_sysctl_handler(struct ctl_table
*table
, int write
,
4392 void *buffer
, size_t *length
, loff_t
*ppos
)
4395 return hugetlb_sysctl_handler_common(false, table
, write
,
4396 buffer
, length
, ppos
);
4400 int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*table
, int write
,
4401 void *buffer
, size_t *length
, loff_t
*ppos
)
4403 return hugetlb_sysctl_handler_common(true, table
, write
,
4404 buffer
, length
, ppos
);
4406 #endif /* CONFIG_NUMA */
4408 int hugetlb_overcommit_handler(struct ctl_table
*table
, int write
,
4409 void *buffer
, size_t *length
, loff_t
*ppos
)
4411 struct hstate
*h
= &default_hstate
;
4415 if (!hugepages_supported())
4418 tmp
= h
->nr_overcommit_huge_pages
;
4420 if (write
&& hstate_is_gigantic(h
))
4423 ret
= proc_hugetlb_doulongvec_minmax(table
, write
, buffer
, length
, ppos
,
4429 spin_lock_irq(&hugetlb_lock
);
4430 h
->nr_overcommit_huge_pages
= tmp
;
4431 spin_unlock_irq(&hugetlb_lock
);
4437 #endif /* CONFIG_SYSCTL */
4439 void hugetlb_report_meminfo(struct seq_file
*m
)
4442 unsigned long total
= 0;
4444 if (!hugepages_supported())
4447 for_each_hstate(h
) {
4448 unsigned long count
= h
->nr_huge_pages
;
4450 total
+= huge_page_size(h
) * count
;
4452 if (h
== &default_hstate
)
4454 "HugePages_Total: %5lu\n"
4455 "HugePages_Free: %5lu\n"
4456 "HugePages_Rsvd: %5lu\n"
4457 "HugePages_Surp: %5lu\n"
4458 "Hugepagesize: %8lu kB\n",
4462 h
->surplus_huge_pages
,
4463 huge_page_size(h
) / SZ_1K
);
4466 seq_printf(m
, "Hugetlb: %8lu kB\n", total
/ SZ_1K
);
4469 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
4471 struct hstate
*h
= &default_hstate
;
4473 if (!hugepages_supported())
4476 return sysfs_emit_at(buf
, len
,
4477 "Node %d HugePages_Total: %5u\n"
4478 "Node %d HugePages_Free: %5u\n"
4479 "Node %d HugePages_Surp: %5u\n",
4480 nid
, h
->nr_huge_pages_node
[nid
],
4481 nid
, h
->free_huge_pages_node
[nid
],
4482 nid
, h
->surplus_huge_pages_node
[nid
]);
4485 void hugetlb_show_meminfo_node(int nid
)
4489 if (!hugepages_supported())
4493 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4495 h
->nr_huge_pages_node
[nid
],
4496 h
->free_huge_pages_node
[nid
],
4497 h
->surplus_huge_pages_node
[nid
],
4498 huge_page_size(h
) / SZ_1K
);
4501 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
)
4503 seq_printf(m
, "HugetlbPages:\t%8lu kB\n",
4504 atomic_long_read(&mm
->hugetlb_usage
) << (PAGE_SHIFT
- 10));
4507 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
4508 unsigned long hugetlb_total_pages(void)
4511 unsigned long nr_total_pages
= 0;
4514 nr_total_pages
+= h
->nr_huge_pages
* pages_per_huge_page(h
);
4515 return nr_total_pages
;
4518 static int hugetlb_acct_memory(struct hstate
*h
, long delta
)
4525 spin_lock_irq(&hugetlb_lock
);
4527 * When cpuset is configured, it breaks the strict hugetlb page
4528 * reservation as the accounting is done on a global variable. Such
4529 * reservation is completely rubbish in the presence of cpuset because
4530 * the reservation is not checked against page availability for the
4531 * current cpuset. Application can still potentially OOM'ed by kernel
4532 * with lack of free htlb page in cpuset that the task is in.
4533 * Attempt to enforce strict accounting with cpuset is almost
4534 * impossible (or too ugly) because cpuset is too fluid that
4535 * task or memory node can be dynamically moved between cpusets.
4537 * The change of semantics for shared hugetlb mapping with cpuset is
4538 * undesirable. However, in order to preserve some of the semantics,
4539 * we fall back to check against current free page availability as
4540 * a best attempt and hopefully to minimize the impact of changing
4541 * semantics that cpuset has.
4543 * Apart from cpuset, we also have memory policy mechanism that
4544 * also determines from which node the kernel will allocate memory
4545 * in a NUMA system. So similar to cpuset, we also should consider
4546 * the memory policy of the current task. Similar to the description
4550 if (gather_surplus_pages(h
, delta
) < 0)
4553 if (delta
> allowed_mems_nr(h
)) {
4554 return_unused_surplus_pages(h
, delta
);
4561 return_unused_surplus_pages(h
, (unsigned long) -delta
);
4564 spin_unlock_irq(&hugetlb_lock
);
4568 static void hugetlb_vm_op_open(struct vm_area_struct
*vma
)
4570 struct resv_map
*resv
= vma_resv_map(vma
);
4573 * This new VMA should share its siblings reservation map if present.
4574 * The VMA will only ever have a valid reservation map pointer where
4575 * it is being copied for another still existing VMA. As that VMA
4576 * has a reference to the reservation map it cannot disappear until
4577 * after this open call completes. It is therefore safe to take a
4578 * new reference here without additional locking.
4580 if (resv
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
)) {
4581 resv_map_dup_hugetlb_cgroup_uncharge_info(resv
);
4582 kref_get(&resv
->refs
);
4586 static void hugetlb_vm_op_close(struct vm_area_struct
*vma
)
4588 struct hstate
*h
= hstate_vma(vma
);
4589 struct resv_map
*resv
= vma_resv_map(vma
);
4590 struct hugepage_subpool
*spool
= subpool_vma(vma
);
4591 unsigned long reserve
, start
, end
;
4594 if (!resv
|| !is_vma_resv_set(vma
, HPAGE_RESV_OWNER
))
4597 start
= vma_hugecache_offset(h
, vma
, vma
->vm_start
);
4598 end
= vma_hugecache_offset(h
, vma
, vma
->vm_end
);
4600 reserve
= (end
- start
) - region_count(resv
, start
, end
);
4601 hugetlb_cgroup_uncharge_counter(resv
, start
, end
);
4604 * Decrement reserve counts. The global reserve count may be
4605 * adjusted if the subpool has a minimum size.
4607 gbl_reserve
= hugepage_subpool_put_pages(spool
, reserve
);
4608 hugetlb_acct_memory(h
, -gbl_reserve
);
4611 kref_put(&resv
->refs
, resv_map_release
);
4614 static int hugetlb_vm_op_split(struct vm_area_struct
*vma
, unsigned long addr
)
4616 if (addr
& ~(huge_page_mask(hstate_vma(vma
))))
4621 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct
*vma
)
4623 return huge_page_size(hstate_vma(vma
));
4627 * We cannot handle pagefaults against hugetlb pages at all. They cause
4628 * handle_mm_fault() to try to instantiate regular-sized pages in the
4629 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
4632 static vm_fault_t
hugetlb_vm_op_fault(struct vm_fault
*vmf
)
4639 * When a new function is introduced to vm_operations_struct and added
4640 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4641 * This is because under System V memory model, mappings created via
4642 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4643 * their original vm_ops are overwritten with shm_vm_ops.
4645 const struct vm_operations_struct hugetlb_vm_ops
= {
4646 .fault
= hugetlb_vm_op_fault
,
4647 .open
= hugetlb_vm_op_open
,
4648 .close
= hugetlb_vm_op_close
,
4649 .may_split
= hugetlb_vm_op_split
,
4650 .pagesize
= hugetlb_vm_op_pagesize
,
4653 static pte_t
make_huge_pte(struct vm_area_struct
*vma
, struct page
*page
,
4657 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
4660 entry
= huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page
,
4661 vma
->vm_page_prot
)));
4663 entry
= huge_pte_wrprotect(mk_huge_pte(page
,
4664 vma
->vm_page_prot
));
4666 entry
= pte_mkyoung(entry
);
4667 entry
= arch_make_huge_pte(entry
, shift
, vma
->vm_flags
);
4672 static void set_huge_ptep_writable(struct vm_area_struct
*vma
,
4673 unsigned long address
, pte_t
*ptep
)
4677 entry
= huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep
)));
4678 if (huge_ptep_set_access_flags(vma
, address
, ptep
, entry
, 1))
4679 update_mmu_cache(vma
, address
, ptep
);
4682 bool is_hugetlb_entry_migration(pte_t pte
)
4686 if (huge_pte_none(pte
) || pte_present(pte
))
4688 swp
= pte_to_swp_entry(pte
);
4689 if (is_migration_entry(swp
))
4695 static bool is_hugetlb_entry_hwpoisoned(pte_t pte
)
4699 if (huge_pte_none(pte
) || pte_present(pte
))
4701 swp
= pte_to_swp_entry(pte
);
4702 if (is_hwpoison_entry(swp
))
4709 hugetlb_install_page(struct vm_area_struct
*vma
, pte_t
*ptep
, unsigned long addr
,
4710 struct page
*new_page
)
4712 __SetPageUptodate(new_page
);
4713 hugepage_add_new_anon_rmap(new_page
, vma
, addr
);
4714 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, make_huge_pte(vma
, new_page
, 1));
4715 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma
)), vma
->vm_mm
);
4716 ClearHPageRestoreReserve(new_page
);
4717 SetHPageMigratable(new_page
);
4720 int copy_hugetlb_page_range(struct mm_struct
*dst
, struct mm_struct
*src
,
4721 struct vm_area_struct
*dst_vma
,
4722 struct vm_area_struct
*src_vma
)
4724 pte_t
*src_pte
, *dst_pte
, entry
, dst_entry
;
4725 struct page
*ptepage
;
4727 bool cow
= is_cow_mapping(src_vma
->vm_flags
);
4728 struct hstate
*h
= hstate_vma(src_vma
);
4729 unsigned long sz
= huge_page_size(h
);
4730 unsigned long npages
= pages_per_huge_page(h
);
4731 struct address_space
*mapping
= src_vma
->vm_file
->f_mapping
;
4732 struct mmu_notifier_range range
;
4733 unsigned long last_addr_mask
;
4737 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, src_vma
, src
,
4740 mmu_notifier_invalidate_range_start(&range
);
4741 mmap_assert_write_locked(src
);
4742 raw_write_seqcount_begin(&src
->write_protect_seq
);
4745 * For shared mappings i_mmap_rwsem must be held to call
4746 * huge_pte_alloc, otherwise the returned ptep could go
4747 * away if part of a shared pmd and another thread calls
4750 i_mmap_lock_read(mapping
);
4753 last_addr_mask
= hugetlb_mask_last_page(h
);
4754 for (addr
= src_vma
->vm_start
; addr
< src_vma
->vm_end
; addr
+= sz
) {
4755 spinlock_t
*src_ptl
, *dst_ptl
;
4756 src_pte
= huge_pte_offset(src
, addr
, sz
);
4758 addr
|= last_addr_mask
;
4761 dst_pte
= huge_pte_alloc(dst
, dst_vma
, addr
, sz
);
4768 * If the pagetables are shared don't copy or take references.
4769 * dst_pte == src_pte is the common case of src/dest sharing.
4771 * However, src could have 'unshared' and dst shares with
4772 * another vma. If dst_pte !none, this implies sharing.
4773 * Check here before taking page table lock, and once again
4774 * after taking the lock below.
4776 dst_entry
= huge_ptep_get(dst_pte
);
4777 if ((dst_pte
== src_pte
) || !huge_pte_none(dst_entry
)) {
4778 addr
|= last_addr_mask
;
4782 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
4783 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
4784 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
4785 entry
= huge_ptep_get(src_pte
);
4786 dst_entry
= huge_ptep_get(dst_pte
);
4788 if (huge_pte_none(entry
) || !huge_pte_none(dst_entry
)) {
4790 * Skip if src entry none. Also, skip in the
4791 * unlikely case dst entry !none as this implies
4792 * sharing with another vma.
4795 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
))) {
4796 bool uffd_wp
= huge_pte_uffd_wp(entry
);
4798 if (!userfaultfd_wp(dst_vma
) && uffd_wp
)
4799 entry
= huge_pte_clear_uffd_wp(entry
);
4800 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
4801 } else if (unlikely(is_hugetlb_entry_migration(entry
))) {
4802 swp_entry_t swp_entry
= pte_to_swp_entry(entry
);
4803 bool uffd_wp
= huge_pte_uffd_wp(entry
);
4805 if (!is_readable_migration_entry(swp_entry
) && cow
) {
4807 * COW mappings require pages in both
4808 * parent and child to be set to read.
4810 swp_entry
= make_readable_migration_entry(
4811 swp_offset(swp_entry
));
4812 entry
= swp_entry_to_pte(swp_entry
);
4813 if (userfaultfd_wp(src_vma
) && uffd_wp
)
4814 entry
= huge_pte_mkuffd_wp(entry
);
4815 set_huge_pte_at(src
, addr
, src_pte
, entry
);
4817 if (!userfaultfd_wp(dst_vma
) && uffd_wp
)
4818 entry
= huge_pte_clear_uffd_wp(entry
);
4819 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
4820 } else if (unlikely(is_pte_marker(entry
))) {
4822 * We copy the pte marker only if the dst vma has
4825 if (userfaultfd_wp(dst_vma
))
4826 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
4828 entry
= huge_ptep_get(src_pte
);
4829 ptepage
= pte_page(entry
);
4833 * Failing to duplicate the anon rmap is a rare case
4834 * where we see pinned hugetlb pages while they're
4835 * prone to COW. We need to do the COW earlier during
4838 * When pre-allocating the page or copying data, we
4839 * need to be without the pgtable locks since we could
4840 * sleep during the process.
4842 if (!PageAnon(ptepage
)) {
4843 page_dup_file_rmap(ptepage
, true);
4844 } else if (page_try_dup_anon_rmap(ptepage
, true,
4846 pte_t src_pte_old
= entry
;
4849 spin_unlock(src_ptl
);
4850 spin_unlock(dst_ptl
);
4851 /* Do not use reserve as it's private owned */
4852 new = alloc_huge_page(dst_vma
, addr
, 1);
4858 copy_user_huge_page(new, ptepage
, addr
, dst_vma
,
4862 /* Install the new huge page if src pte stable */
4863 dst_ptl
= huge_pte_lock(h
, dst
, dst_pte
);
4864 src_ptl
= huge_pte_lockptr(h
, src
, src_pte
);
4865 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
4866 entry
= huge_ptep_get(src_pte
);
4867 if (!pte_same(src_pte_old
, entry
)) {
4868 restore_reserve_on_error(h
, dst_vma
, addr
,
4871 /* dst_entry won't change as in child */
4874 hugetlb_install_page(dst_vma
, dst_pte
, addr
, new);
4875 spin_unlock(src_ptl
);
4876 spin_unlock(dst_ptl
);
4882 * No need to notify as we are downgrading page
4883 * table protection not changing it to point
4886 * See Documentation/mm/mmu_notifier.rst
4888 huge_ptep_set_wrprotect(src
, addr
, src_pte
);
4889 entry
= huge_pte_wrprotect(entry
);
4892 set_huge_pte_at(dst
, addr
, dst_pte
, entry
);
4893 hugetlb_count_add(npages
, dst
);
4895 spin_unlock(src_ptl
);
4896 spin_unlock(dst_ptl
);
4900 raw_write_seqcount_end(&src
->write_protect_seq
);
4901 mmu_notifier_invalidate_range_end(&range
);
4903 i_mmap_unlock_read(mapping
);
4909 static void move_huge_pte(struct vm_area_struct
*vma
, unsigned long old_addr
,
4910 unsigned long new_addr
, pte_t
*src_pte
, pte_t
*dst_pte
)
4912 struct hstate
*h
= hstate_vma(vma
);
4913 struct mm_struct
*mm
= vma
->vm_mm
;
4914 spinlock_t
*src_ptl
, *dst_ptl
;
4917 dst_ptl
= huge_pte_lock(h
, mm
, dst_pte
);
4918 src_ptl
= huge_pte_lockptr(h
, mm
, src_pte
);
4921 * We don't have to worry about the ordering of src and dst ptlocks
4922 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
4924 if (src_ptl
!= dst_ptl
)
4925 spin_lock_nested(src_ptl
, SINGLE_DEPTH_NESTING
);
4927 pte
= huge_ptep_get_and_clear(mm
, old_addr
, src_pte
);
4928 set_huge_pte_at(mm
, new_addr
, dst_pte
, pte
);
4930 if (src_ptl
!= dst_ptl
)
4931 spin_unlock(src_ptl
);
4932 spin_unlock(dst_ptl
);
4935 int move_hugetlb_page_tables(struct vm_area_struct
*vma
,
4936 struct vm_area_struct
*new_vma
,
4937 unsigned long old_addr
, unsigned long new_addr
,
4940 struct hstate
*h
= hstate_vma(vma
);
4941 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
4942 unsigned long sz
= huge_page_size(h
);
4943 struct mm_struct
*mm
= vma
->vm_mm
;
4944 unsigned long old_end
= old_addr
+ len
;
4945 unsigned long last_addr_mask
;
4946 pte_t
*src_pte
, *dst_pte
;
4947 struct mmu_notifier_range range
;
4948 bool shared_pmd
= false;
4950 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, mm
, old_addr
,
4952 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
4954 * In case of shared PMDs, we should cover the maximum possible
4957 flush_cache_range(vma
, range
.start
, range
.end
);
4959 mmu_notifier_invalidate_range_start(&range
);
4960 last_addr_mask
= hugetlb_mask_last_page(h
);
4961 /* Prevent race with file truncation */
4962 i_mmap_lock_write(mapping
);
4963 for (; old_addr
< old_end
; old_addr
+= sz
, new_addr
+= sz
) {
4964 src_pte
= huge_pte_offset(mm
, old_addr
, sz
);
4966 old_addr
|= last_addr_mask
;
4967 new_addr
|= last_addr_mask
;
4970 if (huge_pte_none(huge_ptep_get(src_pte
)))
4973 if (huge_pmd_unshare(mm
, vma
, old_addr
, src_pte
)) {
4975 old_addr
|= last_addr_mask
;
4976 new_addr
|= last_addr_mask
;
4980 dst_pte
= huge_pte_alloc(mm
, new_vma
, new_addr
, sz
);
4984 move_huge_pte(vma
, old_addr
, new_addr
, src_pte
, dst_pte
);
4988 flush_tlb_range(vma
, range
.start
, range
.end
);
4990 flush_tlb_range(vma
, old_end
- len
, old_end
);
4991 mmu_notifier_invalidate_range_end(&range
);
4992 i_mmap_unlock_write(mapping
);
4994 return len
+ old_addr
- old_end
;
4997 static void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
4998 unsigned long start
, unsigned long end
,
4999 struct page
*ref_page
, zap_flags_t zap_flags
)
5001 struct mm_struct
*mm
= vma
->vm_mm
;
5002 unsigned long address
;
5007 struct hstate
*h
= hstate_vma(vma
);
5008 unsigned long sz
= huge_page_size(h
);
5009 struct mmu_notifier_range range
;
5010 unsigned long last_addr_mask
;
5011 bool force_flush
= false;
5013 WARN_ON(!is_vm_hugetlb_page(vma
));
5014 BUG_ON(start
& ~huge_page_mask(h
));
5015 BUG_ON(end
& ~huge_page_mask(h
));
5018 * This is a hugetlb vma, all the pte entries should point
5021 tlb_change_page_size(tlb
, sz
);
5022 tlb_start_vma(tlb
, vma
);
5025 * If sharing possible, alert mmu notifiers of worst case.
5027 mmu_notifier_range_init(&range
, MMU_NOTIFY_UNMAP
, 0, vma
, mm
, start
,
5029 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
5030 mmu_notifier_invalidate_range_start(&range
);
5031 last_addr_mask
= hugetlb_mask_last_page(h
);
5033 for (; address
< end
; address
+= sz
) {
5034 ptep
= huge_pte_offset(mm
, address
, sz
);
5036 address
|= last_addr_mask
;
5040 ptl
= huge_pte_lock(h
, mm
, ptep
);
5041 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
5043 tlb_flush_pmd_range(tlb
, address
& PUD_MASK
, PUD_SIZE
);
5045 address
|= last_addr_mask
;
5049 pte
= huge_ptep_get(ptep
);
5050 if (huge_pte_none(pte
)) {
5056 * Migrating hugepage or HWPoisoned hugepage is already
5057 * unmapped and its refcount is dropped, so just clear pte here.
5059 if (unlikely(!pte_present(pte
))) {
5061 * If the pte was wr-protected by uffd-wp in any of the
5062 * swap forms, meanwhile the caller does not want to
5063 * drop the uffd-wp bit in this zap, then replace the
5064 * pte with a marker.
5066 if (pte_swp_uffd_wp_any(pte
) &&
5067 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5068 set_huge_pte_at(mm
, address
, ptep
,
5069 make_pte_marker(PTE_MARKER_UFFD_WP
));
5071 huge_pte_clear(mm
, address
, ptep
, sz
);
5076 page
= pte_page(pte
);
5078 * If a reference page is supplied, it is because a specific
5079 * page is being unmapped, not a range. Ensure the page we
5080 * are about to unmap is the actual page of interest.
5083 if (page
!= ref_page
) {
5088 * Mark the VMA as having unmapped its page so that
5089 * future faults in this VMA will fail rather than
5090 * looking like data was lost
5092 set_vma_resv_flags(vma
, HPAGE_RESV_UNMAPPED
);
5095 pte
= huge_ptep_get_and_clear(mm
, address
, ptep
);
5096 tlb_remove_huge_tlb_entry(h
, tlb
, ptep
, address
);
5097 if (huge_pte_dirty(pte
))
5098 set_page_dirty(page
);
5099 /* Leave a uffd-wp pte marker if needed */
5100 if (huge_pte_uffd_wp(pte
) &&
5101 !(zap_flags
& ZAP_FLAG_DROP_MARKER
))
5102 set_huge_pte_at(mm
, address
, ptep
,
5103 make_pte_marker(PTE_MARKER_UFFD_WP
));
5104 hugetlb_count_sub(pages_per_huge_page(h
), mm
);
5105 page_remove_rmap(page
, vma
, true);
5108 tlb_remove_page_size(tlb
, page
, huge_page_size(h
));
5110 * Bail out after unmapping reference page if supplied
5115 mmu_notifier_invalidate_range_end(&range
);
5116 tlb_end_vma(tlb
, vma
);
5119 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5120 * could defer the flush until now, since by holding i_mmap_rwsem we
5121 * guaranteed that the last refernece would not be dropped. But we must
5122 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5123 * dropped and the last reference to the shared PMDs page might be
5126 * In theory we could defer the freeing of the PMD pages as well, but
5127 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5128 * detect sharing, so we cannot defer the release of the page either.
5129 * Instead, do flush now.
5132 tlb_flush_mmu_tlbonly(tlb
);
5135 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
5136 struct vm_area_struct
*vma
, unsigned long start
,
5137 unsigned long end
, struct page
*ref_page
,
5138 zap_flags_t zap_flags
)
5140 __unmap_hugepage_range(tlb
, vma
, start
, end
, ref_page
, zap_flags
);
5143 * Clear this flag so that x86's huge_pmd_share page_table_shareable
5144 * test will fail on a vma being torn down, and not grab a page table
5145 * on its way out. We're lucky that the flag has such an appropriate
5146 * name, and can in fact be safely cleared here. We could clear it
5147 * before the __unmap_hugepage_range above, but all that's necessary
5148 * is to clear it before releasing the i_mmap_rwsem. This works
5149 * because in the context this is called, the VMA is about to be
5150 * destroyed and the i_mmap_rwsem is held.
5152 vma
->vm_flags
&= ~VM_MAYSHARE
;
5155 void unmap_hugepage_range(struct vm_area_struct
*vma
, unsigned long start
,
5156 unsigned long end
, struct page
*ref_page
,
5157 zap_flags_t zap_flags
)
5159 struct mmu_gather tlb
;
5161 tlb_gather_mmu(&tlb
, vma
->vm_mm
);
5162 __unmap_hugepage_range(&tlb
, vma
, start
, end
, ref_page
, zap_flags
);
5163 tlb_finish_mmu(&tlb
);
5167 * This is called when the original mapper is failing to COW a MAP_PRIVATE
5168 * mapping it owns the reserve page for. The intention is to unmap the page
5169 * from other VMAs and let the children be SIGKILLed if they are faulting the
5172 static void unmap_ref_private(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5173 struct page
*page
, unsigned long address
)
5175 struct hstate
*h
= hstate_vma(vma
);
5176 struct vm_area_struct
*iter_vma
;
5177 struct address_space
*mapping
;
5181 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5182 * from page cache lookup which is in HPAGE_SIZE units.
5184 address
= address
& huge_page_mask(h
);
5185 pgoff
= ((address
- vma
->vm_start
) >> PAGE_SHIFT
) +
5187 mapping
= vma
->vm_file
->f_mapping
;
5190 * Take the mapping lock for the duration of the table walk. As
5191 * this mapping should be shared between all the VMAs,
5192 * __unmap_hugepage_range() is called as the lock is already held
5194 i_mmap_lock_write(mapping
);
5195 vma_interval_tree_foreach(iter_vma
, &mapping
->i_mmap
, pgoff
, pgoff
) {
5196 /* Do not unmap the current VMA */
5197 if (iter_vma
== vma
)
5201 * Shared VMAs have their own reserves and do not affect
5202 * MAP_PRIVATE accounting but it is possible that a shared
5203 * VMA is using the same page so check and skip such VMAs.
5205 if (iter_vma
->vm_flags
& VM_MAYSHARE
)
5209 * Unmap the page from other VMAs without their own reserves.
5210 * They get marked to be SIGKILLed if they fault in these
5211 * areas. This is because a future no-page fault on this VMA
5212 * could insert a zeroed page instead of the data existing
5213 * from the time of fork. This would look like data corruption
5215 if (!is_vma_resv_set(iter_vma
, HPAGE_RESV_OWNER
))
5216 unmap_hugepage_range(iter_vma
, address
,
5217 address
+ huge_page_size(h
), page
, 0);
5219 i_mmap_unlock_write(mapping
);
5223 * hugetlb_wp() should be called with page lock of the original hugepage held.
5224 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5225 * cannot race with other handlers or page migration.
5226 * Keep the pte_same checks anyway to make transition from the mutex easier.
5228 static vm_fault_t
hugetlb_wp(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5229 unsigned long address
, pte_t
*ptep
, unsigned int flags
,
5230 struct page
*pagecache_page
, spinlock_t
*ptl
)
5232 const bool unshare
= flags
& FAULT_FLAG_UNSHARE
;
5234 struct hstate
*h
= hstate_vma(vma
);
5235 struct page
*old_page
, *new_page
;
5236 int outside_reserve
= 0;
5238 unsigned long haddr
= address
& huge_page_mask(h
);
5239 struct mmu_notifier_range range
;
5241 VM_BUG_ON(unshare
&& (flags
& FOLL_WRITE
));
5242 VM_BUG_ON(!unshare
&& !(flags
& FOLL_WRITE
));
5245 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5246 * PTE mapped R/O such as maybe_mkwrite() would do.
5248 if (WARN_ON_ONCE(!unshare
&& !(vma
->vm_flags
& VM_WRITE
)))
5249 return VM_FAULT_SIGSEGV
;
5251 /* Let's take out MAP_SHARED mappings first. */
5252 if (vma
->vm_flags
& VM_MAYSHARE
) {
5253 if (unlikely(unshare
))
5255 set_huge_ptep_writable(vma
, haddr
, ptep
);
5259 pte
= huge_ptep_get(ptep
);
5260 old_page
= pte_page(pte
);
5262 delayacct_wpcopy_start();
5266 * If no-one else is actually using this page, we're the exclusive
5267 * owner and can reuse this page.
5269 if (page_mapcount(old_page
) == 1 && PageAnon(old_page
)) {
5270 if (!PageAnonExclusive(old_page
))
5271 page_move_anon_rmap(old_page
, vma
);
5272 if (likely(!unshare
))
5273 set_huge_ptep_writable(vma
, haddr
, ptep
);
5275 delayacct_wpcopy_end();
5278 VM_BUG_ON_PAGE(PageAnon(old_page
) && PageAnonExclusive(old_page
),
5282 * If the process that created a MAP_PRIVATE mapping is about to
5283 * perform a COW due to a shared page count, attempt to satisfy
5284 * the allocation without using the existing reserves. The pagecache
5285 * page is used to determine if the reserve at this address was
5286 * consumed or not. If reserves were used, a partial faulted mapping
5287 * at the time of fork() could consume its reserves on COW instead
5288 * of the full address range.
5290 if (is_vma_resv_set(vma
, HPAGE_RESV_OWNER
) &&
5291 old_page
!= pagecache_page
)
5292 outside_reserve
= 1;
5297 * Drop page table lock as buddy allocator may be called. It will
5298 * be acquired again before returning to the caller, as expected.
5301 new_page
= alloc_huge_page(vma
, haddr
, outside_reserve
);
5303 if (IS_ERR(new_page
)) {
5305 * If a process owning a MAP_PRIVATE mapping fails to COW,
5306 * it is due to references held by a child and an insufficient
5307 * huge page pool. To guarantee the original mappers
5308 * reliability, unmap the page from child processes. The child
5309 * may get SIGKILLed if it later faults.
5311 if (outside_reserve
) {
5312 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
5317 BUG_ON(huge_pte_none(pte
));
5319 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
5320 * unmapping. unmapping needs to hold i_mmap_rwsem
5321 * in write mode. Dropping i_mmap_rwsem in read mode
5322 * here is OK as COW mappings do not interact with
5325 * Reacquire both after unmap operation.
5327 idx
= vma_hugecache_offset(h
, vma
, haddr
);
5328 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5329 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5330 i_mmap_unlock_read(mapping
);
5332 unmap_ref_private(mm
, vma
, old_page
, haddr
);
5334 i_mmap_lock_read(mapping
);
5335 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
5337 ptep
= huge_pte_offset(mm
, haddr
, huge_page_size(h
));
5339 pte_same(huge_ptep_get(ptep
), pte
)))
5340 goto retry_avoidcopy
;
5342 * race occurs while re-acquiring page table
5343 * lock, and our job is done.
5345 delayacct_wpcopy_end();
5349 ret
= vmf_error(PTR_ERR(new_page
));
5350 goto out_release_old
;
5354 * When the original hugepage is shared one, it does not have
5355 * anon_vma prepared.
5357 if (unlikely(anon_vma_prepare(vma
))) {
5359 goto out_release_all
;
5362 copy_user_huge_page(new_page
, old_page
, address
, vma
,
5363 pages_per_huge_page(h
));
5364 __SetPageUptodate(new_page
);
5366 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, mm
, haddr
,
5367 haddr
+ huge_page_size(h
));
5368 mmu_notifier_invalidate_range_start(&range
);
5371 * Retake the page table lock to check for racing updates
5372 * before the page tables are altered
5375 ptep
= huge_pte_offset(mm
, haddr
, huge_page_size(h
));
5376 if (likely(ptep
&& pte_same(huge_ptep_get(ptep
), pte
))) {
5377 ClearHPageRestoreReserve(new_page
);
5379 /* Break COW or unshare */
5380 huge_ptep_clear_flush(vma
, haddr
, ptep
);
5381 mmu_notifier_invalidate_range(mm
, range
.start
, range
.end
);
5382 page_remove_rmap(old_page
, vma
, true);
5383 hugepage_add_new_anon_rmap(new_page
, vma
, haddr
);
5384 set_huge_pte_at(mm
, haddr
, ptep
,
5385 make_huge_pte(vma
, new_page
, !unshare
));
5386 SetHPageMigratable(new_page
);
5387 /* Make the old page be freed below */
5388 new_page
= old_page
;
5391 mmu_notifier_invalidate_range_end(&range
);
5394 * No restore in case of successful pagetable update (Break COW or
5397 if (new_page
!= old_page
)
5398 restore_reserve_on_error(h
, vma
, haddr
, new_page
);
5403 spin_lock(ptl
); /* Caller expects lock to be held */
5405 delayacct_wpcopy_end();
5409 /* Return the pagecache page at a given address within a VMA */
5410 static struct page
*hugetlbfs_pagecache_page(struct hstate
*h
,
5411 struct vm_area_struct
*vma
, unsigned long address
)
5413 struct address_space
*mapping
;
5416 mapping
= vma
->vm_file
->f_mapping
;
5417 idx
= vma_hugecache_offset(h
, vma
, address
);
5419 return find_lock_page(mapping
, idx
);
5423 * Return whether there is a pagecache page to back given address within VMA.
5424 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
5426 static bool hugetlbfs_pagecache_present(struct hstate
*h
,
5427 struct vm_area_struct
*vma
, unsigned long address
)
5429 struct address_space
*mapping
;
5433 mapping
= vma
->vm_file
->f_mapping
;
5434 idx
= vma_hugecache_offset(h
, vma
, address
);
5436 page
= find_get_page(mapping
, idx
);
5439 return page
!= NULL
;
5442 int huge_add_to_page_cache(struct page
*page
, struct address_space
*mapping
,
5445 struct folio
*folio
= page_folio(page
);
5446 struct inode
*inode
= mapping
->host
;
5447 struct hstate
*h
= hstate_inode(inode
);
5450 __folio_set_locked(folio
);
5451 err
= __filemap_add_folio(mapping
, folio
, idx
, GFP_KERNEL
, NULL
);
5453 if (unlikely(err
)) {
5454 __folio_clear_locked(folio
);
5457 ClearHPageRestoreReserve(page
);
5460 * mark folio dirty so that it will not be removed from cache/file
5461 * by non-hugetlbfs specific code paths.
5463 folio_mark_dirty(folio
);
5465 spin_lock(&inode
->i_lock
);
5466 inode
->i_blocks
+= blocks_per_huge_page(h
);
5467 spin_unlock(&inode
->i_lock
);
5471 static inline vm_fault_t
hugetlb_handle_userfault(struct vm_area_struct
*vma
,
5472 struct address_space
*mapping
,
5475 unsigned long haddr
,
5477 unsigned long reason
)
5481 struct vm_fault vmf
= {
5484 .real_address
= addr
,
5488 * Hard to debug if it ends up being
5489 * used by a callee that assumes
5490 * something about the other
5491 * uninitialized fields... same as in
5497 * hugetlb_fault_mutex and i_mmap_rwsem must be
5498 * dropped before handling userfault. Reacquire
5499 * after handling fault to make calling code simpler.
5501 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5502 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5503 i_mmap_unlock_read(mapping
);
5504 ret
= handle_userfault(&vmf
, reason
);
5505 i_mmap_lock_read(mapping
);
5506 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
5511 static vm_fault_t
hugetlb_no_page(struct mm_struct
*mm
,
5512 struct vm_area_struct
*vma
,
5513 struct address_space
*mapping
, pgoff_t idx
,
5514 unsigned long address
, pte_t
*ptep
,
5515 pte_t old_pte
, unsigned int flags
)
5517 struct hstate
*h
= hstate_vma(vma
);
5518 vm_fault_t ret
= VM_FAULT_SIGBUS
;
5524 unsigned long haddr
= address
& huge_page_mask(h
);
5525 bool new_page
, new_pagecache_page
= false;
5528 * Currently, we are forced to kill the process in the event the
5529 * original mapper has unmapped pages from the child due to a failed
5530 * COW/unsharing. Warn that such a situation has occurred as it may not
5533 if (is_vma_resv_set(vma
, HPAGE_RESV_UNMAPPED
)) {
5534 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5540 * We can not race with truncation due to holding i_mmap_rwsem.
5541 * i_size is modified when holding i_mmap_rwsem, so check here
5542 * once for faults beyond end of file.
5544 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
5550 page
= find_lock_page(mapping
, idx
);
5552 /* Check for page in userfault range */
5553 if (userfaultfd_missing(vma
)) {
5554 ret
= hugetlb_handle_userfault(vma
, mapping
, idx
,
5555 flags
, haddr
, address
,
5560 page
= alloc_huge_page(vma
, haddr
, 0);
5563 * Returning error will result in faulting task being
5564 * sent SIGBUS. The hugetlb fault mutex prevents two
5565 * tasks from racing to fault in the same page which
5566 * could result in false unable to allocate errors.
5567 * Page migration does not take the fault mutex, but
5568 * does a clear then write of pte's under page table
5569 * lock. Page fault code could race with migration,
5570 * notice the clear pte and try to allocate a page
5571 * here. Before returning error, get ptl and make
5572 * sure there really is no pte entry.
5574 ptl
= huge_pte_lock(h
, mm
, ptep
);
5576 if (huge_pte_none(huge_ptep_get(ptep
)))
5577 ret
= vmf_error(PTR_ERR(page
));
5581 clear_huge_page(page
, address
, pages_per_huge_page(h
));
5582 __SetPageUptodate(page
);
5585 if (vma
->vm_flags
& VM_MAYSHARE
) {
5586 int err
= huge_add_to_page_cache(page
, mapping
, idx
);
5593 new_pagecache_page
= true;
5596 if (unlikely(anon_vma_prepare(vma
))) {
5598 goto backout_unlocked
;
5604 * If memory error occurs between mmap() and fault, some process
5605 * don't have hwpoisoned swap entry for errored virtual address.
5606 * So we need to block hugepage fault by PG_hwpoison bit check.
5608 if (unlikely(PageHWPoison(page
))) {
5609 ret
= VM_FAULT_HWPOISON_LARGE
|
5610 VM_FAULT_SET_HINDEX(hstate_index(h
));
5611 goto backout_unlocked
;
5614 /* Check for page in userfault range. */
5615 if (userfaultfd_minor(vma
)) {
5618 ret
= hugetlb_handle_userfault(vma
, mapping
, idx
,
5619 flags
, haddr
, address
,
5626 * If we are going to COW a private mapping later, we examine the
5627 * pending reservations for this page now. This will ensure that
5628 * any allocations necessary to record that reservation occur outside
5631 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
5632 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
5634 goto backout_unlocked
;
5636 /* Just decrements count, does not deallocate */
5637 vma_end_reservation(h
, vma
, haddr
);
5640 ptl
= huge_pte_lock(h
, mm
, ptep
);
5642 /* If pte changed from under us, retry */
5643 if (!pte_same(huge_ptep_get(ptep
), old_pte
))
5647 ClearHPageRestoreReserve(page
);
5648 hugepage_add_new_anon_rmap(page
, vma
, haddr
);
5650 page_dup_file_rmap(page
, true);
5651 new_pte
= make_huge_pte(vma
, page
, ((vma
->vm_flags
& VM_WRITE
)
5652 && (vma
->vm_flags
& VM_SHARED
)));
5654 * If this pte was previously wr-protected, keep it wr-protected even
5657 if (unlikely(pte_marker_uffd_wp(old_pte
)))
5658 new_pte
= huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte
));
5659 set_huge_pte_at(mm
, haddr
, ptep
, new_pte
);
5661 hugetlb_count_add(pages_per_huge_page(h
), mm
);
5662 if ((flags
& FAULT_FLAG_WRITE
) && !(vma
->vm_flags
& VM_SHARED
)) {
5663 /* Optimization, do the COW without a second fault */
5664 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
, page
, ptl
);
5670 * Only set HPageMigratable in newly allocated pages. Existing pages
5671 * found in the pagecache may not have HPageMigratableset if they have
5672 * been isolated for migration.
5675 SetHPageMigratable(page
);
5685 /* restore reserve for newly allocated pages not in page cache */
5686 if (new_page
&& !new_pagecache_page
)
5687 restore_reserve_on_error(h
, vma
, haddr
, page
);
5693 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
5695 unsigned long key
[2];
5698 key
[0] = (unsigned long) mapping
;
5701 hash
= jhash2((u32
*)&key
, sizeof(key
)/(sizeof(u32
)), 0);
5703 return hash
& (num_fault_mutexes
- 1);
5707 * For uniprocessor systems we always use a single mutex, so just
5708 * return 0 and avoid the hashing overhead.
5710 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
)
5716 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
5717 unsigned long address
, unsigned int flags
)
5724 struct page
*page
= NULL
;
5725 struct page
*pagecache_page
= NULL
;
5726 struct hstate
*h
= hstate_vma(vma
);
5727 struct address_space
*mapping
;
5728 int need_wait_lock
= 0;
5729 unsigned long haddr
= address
& huge_page_mask(h
);
5731 ptep
= huge_pte_offset(mm
, haddr
, huge_page_size(h
));
5734 * Since we hold no locks, ptep could be stale. That is
5735 * OK as we are only making decisions based on content and
5736 * not actually modifying content here.
5738 entry
= huge_ptep_get(ptep
);
5739 if (unlikely(is_hugetlb_entry_migration(entry
))) {
5740 migration_entry_wait_huge(vma
, ptep
);
5742 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry
)))
5743 return VM_FAULT_HWPOISON_LARGE
|
5744 VM_FAULT_SET_HINDEX(hstate_index(h
));
5748 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
5749 * until finished with ptep. This serves two purposes:
5750 * 1) It prevents huge_pmd_unshare from being called elsewhere
5751 * and making the ptep no longer valid.
5752 * 2) It synchronizes us with i_size modifications during truncation.
5754 * ptep could have already be assigned via huge_pte_offset. That
5755 * is OK, as huge_pte_alloc will return the same value unless
5756 * something has changed.
5758 mapping
= vma
->vm_file
->f_mapping
;
5759 i_mmap_lock_read(mapping
);
5760 ptep
= huge_pte_alloc(mm
, vma
, haddr
, huge_page_size(h
));
5762 i_mmap_unlock_read(mapping
);
5763 return VM_FAULT_OOM
;
5767 * Serialize hugepage allocation and instantiation, so that we don't
5768 * get spurious allocation failures if two CPUs race to instantiate
5769 * the same page in the page cache.
5771 idx
= vma_hugecache_offset(h
, vma
, haddr
);
5772 hash
= hugetlb_fault_mutex_hash(mapping
, idx
);
5773 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
5775 entry
= huge_ptep_get(ptep
);
5776 /* PTE markers should be handled the same way as none pte */
5777 if (huge_pte_none_mostly(entry
)) {
5778 ret
= hugetlb_no_page(mm
, vma
, mapping
, idx
, address
, ptep
,
5786 * entry could be a migration/hwpoison entry at this point, so this
5787 * check prevents the kernel from going below assuming that we have
5788 * an active hugepage in pagecache. This goto expects the 2nd page
5789 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
5790 * properly handle it.
5792 if (!pte_present(entry
))
5796 * If we are going to COW/unshare the mapping later, we examine the
5797 * pending reservations for this page now. This will ensure that any
5798 * allocations necessary to record that reservation occur outside the
5799 * spinlock. Also lookup the pagecache page now as it is used to
5800 * determine if a reservation has been consumed.
5802 if ((flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) &&
5803 !(vma
->vm_flags
& VM_MAYSHARE
) && !huge_pte_write(entry
)) {
5804 if (vma_needs_reservation(h
, vma
, haddr
) < 0) {
5808 /* Just decrements count, does not deallocate */
5809 vma_end_reservation(h
, vma
, haddr
);
5811 pagecache_page
= hugetlbfs_pagecache_page(h
, vma
, haddr
);
5814 ptl
= huge_pte_lock(h
, mm
, ptep
);
5816 /* Check for a racing update before calling hugetlb_wp() */
5817 if (unlikely(!pte_same(entry
, huge_ptep_get(ptep
))))
5820 /* Handle userfault-wp first, before trying to lock more pages */
5821 if (userfaultfd_wp(vma
) && huge_pte_uffd_wp(huge_ptep_get(ptep
)) &&
5822 (flags
& FAULT_FLAG_WRITE
) && !huge_pte_write(entry
)) {
5823 struct vm_fault vmf
= {
5826 .real_address
= address
,
5831 if (pagecache_page
) {
5832 unlock_page(pagecache_page
);
5833 put_page(pagecache_page
);
5835 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5836 i_mmap_unlock_read(mapping
);
5837 return handle_userfault(&vmf
, VM_UFFD_WP
);
5841 * hugetlb_wp() requires page locks of pte_page(entry) and
5842 * pagecache_page, so here we need take the former one
5843 * when page != pagecache_page or !pagecache_page.
5845 page
= pte_page(entry
);
5846 if (page
!= pagecache_page
)
5847 if (!trylock_page(page
)) {
5854 if (flags
& (FAULT_FLAG_WRITE
|FAULT_FLAG_UNSHARE
)) {
5855 if (!huge_pte_write(entry
)) {
5856 ret
= hugetlb_wp(mm
, vma
, address
, ptep
, flags
,
5857 pagecache_page
, ptl
);
5859 } else if (likely(flags
& FAULT_FLAG_WRITE
)) {
5860 entry
= huge_pte_mkdirty(entry
);
5863 entry
= pte_mkyoung(entry
);
5864 if (huge_ptep_set_access_flags(vma
, haddr
, ptep
, entry
,
5865 flags
& FAULT_FLAG_WRITE
))
5866 update_mmu_cache(vma
, haddr
, ptep
);
5868 if (page
!= pagecache_page
)
5874 if (pagecache_page
) {
5875 unlock_page(pagecache_page
);
5876 put_page(pagecache_page
);
5879 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
5880 i_mmap_unlock_read(mapping
);
5882 * Generally it's safe to hold refcount during waiting page lock. But
5883 * here we just wait to defer the next page fault to avoid busy loop and
5884 * the page is not used after unlocked before returning from the current
5885 * page fault. So we are safe from accessing freed page, even if we wait
5886 * here without taking refcount.
5889 wait_on_page_locked(page
);
5893 #ifdef CONFIG_USERFAULTFD
5895 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
5896 * modifications for huge pages.
5898 int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
,
5900 struct vm_area_struct
*dst_vma
,
5901 unsigned long dst_addr
,
5902 unsigned long src_addr
,
5903 enum mcopy_atomic_mode mode
,
5904 struct page
**pagep
,
5907 bool is_continue
= (mode
== MCOPY_ATOMIC_CONTINUE
);
5908 struct hstate
*h
= hstate_vma(dst_vma
);
5909 struct address_space
*mapping
= dst_vma
->vm_file
->f_mapping
;
5910 pgoff_t idx
= vma_hugecache_offset(h
, dst_vma
, dst_addr
);
5912 int vm_shared
= dst_vma
->vm_flags
& VM_SHARED
;
5918 bool page_in_pagecache
= false;
5922 page
= find_lock_page(mapping
, idx
);
5925 page_in_pagecache
= true;
5926 } else if (!*pagep
) {
5927 /* If a page already exists, then it's UFFDIO_COPY for
5928 * a non-missing case. Return -EEXIST.
5931 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
5936 page
= alloc_huge_page(dst_vma
, dst_addr
, 0);
5942 ret
= copy_huge_page_from_user(page
,
5943 (const void __user
*) src_addr
,
5944 pages_per_huge_page(h
), false);
5946 /* fallback to copy_from_user outside mmap_lock */
5947 if (unlikely(ret
)) {
5949 /* Free the allocated page which may have
5950 * consumed a reservation.
5952 restore_reserve_on_error(h
, dst_vma
, dst_addr
, page
);
5955 /* Allocate a temporary page to hold the copied
5958 page
= alloc_huge_page_vma(h
, dst_vma
, dst_addr
);
5964 /* Set the outparam pagep and return to the caller to
5965 * copy the contents outside the lock. Don't free the
5972 hugetlbfs_pagecache_present(h
, dst_vma
, dst_addr
)) {
5979 page
= alloc_huge_page(dst_vma
, dst_addr
, 0);
5986 copy_user_huge_page(page
, *pagep
, dst_addr
, dst_vma
,
5987 pages_per_huge_page(h
));
5993 * The memory barrier inside __SetPageUptodate makes sure that
5994 * preceding stores to the page contents become visible before
5995 * the set_pte_at() write.
5997 __SetPageUptodate(page
);
5999 /* Add shared, newly allocated pages to the page cache. */
6000 if (vm_shared
&& !is_continue
) {
6001 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
6004 goto out_release_nounlock
;
6007 * Serialization between remove_inode_hugepages() and
6008 * huge_add_to_page_cache() below happens through the
6009 * hugetlb_fault_mutex_table that here must be hold by
6012 ret
= huge_add_to_page_cache(page
, mapping
, idx
);
6014 goto out_release_nounlock
;
6015 page_in_pagecache
= true;
6018 ptl
= huge_pte_lockptr(h
, dst_mm
, dst_pte
);
6022 * Recheck the i_size after holding PT lock to make sure not
6023 * to leave any page mapped (as page_mapped()) beyond the end
6024 * of the i_size (remove_inode_hugepages() is strict about
6025 * enforcing that). If we bail out here, we'll also leave a
6026 * page in the radix tree in the vm_shared case beyond the end
6027 * of the i_size, but remove_inode_hugepages() will take care
6028 * of it as soon as we drop the hugetlb_fault_mutex_table.
6030 size
= i_size_read(mapping
->host
) >> huge_page_shift(h
);
6033 goto out_release_unlock
;
6037 * We allow to overwrite a pte marker: consider when both MISSING|WP
6038 * registered, we firstly wr-protect a none pte which has no page cache
6039 * page backing it, then access the page.
6041 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte
)))
6042 goto out_release_unlock
;
6044 if (page_in_pagecache
) {
6045 page_dup_file_rmap(page
, true);
6047 ClearHPageRestoreReserve(page
);
6048 hugepage_add_new_anon_rmap(page
, dst_vma
, dst_addr
);
6052 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6053 * with wp flag set, don't set pte write bit.
6055 if (wp_copy
|| (is_continue
&& !vm_shared
))
6058 writable
= dst_vma
->vm_flags
& VM_WRITE
;
6060 _dst_pte
= make_huge_pte(dst_vma
, page
, writable
);
6062 * Always mark UFFDIO_COPY page dirty; note that this may not be
6063 * extremely important for hugetlbfs for now since swapping is not
6064 * supported, but we should still be clear in that this page cannot be
6065 * thrown away at will, even if write bit not set.
6067 _dst_pte
= huge_pte_mkdirty(_dst_pte
);
6068 _dst_pte
= pte_mkyoung(_dst_pte
);
6071 _dst_pte
= huge_pte_mkuffd_wp(_dst_pte
);
6073 set_huge_pte_at(dst_mm
, dst_addr
, dst_pte
, _dst_pte
);
6075 hugetlb_count_add(pages_per_huge_page(h
), dst_mm
);
6077 /* No need to invalidate - it was non-present before */
6078 update_mmu_cache(dst_vma
, dst_addr
, dst_pte
);
6082 SetHPageMigratable(page
);
6083 if (vm_shared
|| is_continue
)
6090 if (vm_shared
|| is_continue
)
6092 out_release_nounlock
:
6093 if (!page_in_pagecache
)
6094 restore_reserve_on_error(h
, dst_vma
, dst_addr
, page
);
6098 #endif /* CONFIG_USERFAULTFD */
6100 static void record_subpages_vmas(struct page
*page
, struct vm_area_struct
*vma
,
6101 int refs
, struct page
**pages
,
6102 struct vm_area_struct
**vmas
)
6106 for (nr
= 0; nr
< refs
; nr
++) {
6108 pages
[nr
] = mem_map_offset(page
, nr
);
6114 static inline bool __follow_hugetlb_must_fault(unsigned int flags
, pte_t
*pte
,
6117 pte_t pteval
= huge_ptep_get(pte
);
6120 if (is_swap_pte(pteval
))
6122 if (huge_pte_write(pteval
))
6124 if (flags
& FOLL_WRITE
)
6126 if (gup_must_unshare(flags
, pte_page(pteval
))) {
6133 long follow_hugetlb_page(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6134 struct page
**pages
, struct vm_area_struct
**vmas
,
6135 unsigned long *position
, unsigned long *nr_pages
,
6136 long i
, unsigned int flags
, int *locked
)
6138 unsigned long pfn_offset
;
6139 unsigned long vaddr
= *position
;
6140 unsigned long remainder
= *nr_pages
;
6141 struct hstate
*h
= hstate_vma(vma
);
6142 int err
= -EFAULT
, refs
;
6144 while (vaddr
< vma
->vm_end
&& remainder
) {
6146 spinlock_t
*ptl
= NULL
;
6147 bool unshare
= false;
6152 * If we have a pending SIGKILL, don't keep faulting pages and
6153 * potentially allocating memory.
6155 if (fatal_signal_pending(current
)) {
6161 * Some archs (sparc64, sh*) have multiple pte_ts to
6162 * each hugepage. We have to make sure we get the
6163 * first, for the page indexing below to work.
6165 * Note that page table lock is not held when pte is null.
6167 pte
= huge_pte_offset(mm
, vaddr
& huge_page_mask(h
),
6170 ptl
= huge_pte_lock(h
, mm
, pte
);
6171 absent
= !pte
|| huge_pte_none(huge_ptep_get(pte
));
6174 * When coredumping, it suits get_dump_page if we just return
6175 * an error where there's an empty slot with no huge pagecache
6176 * to back it. This way, we avoid allocating a hugepage, and
6177 * the sparse dumpfile avoids allocating disk blocks, but its
6178 * huge holes still show up with zeroes where they need to be.
6180 if (absent
&& (flags
& FOLL_DUMP
) &&
6181 !hugetlbfs_pagecache_present(h
, vma
, vaddr
)) {
6189 * We need call hugetlb_fault for both hugepages under migration
6190 * (in which case hugetlb_fault waits for the migration,) and
6191 * hwpoisoned hugepages (in which case we need to prevent the
6192 * caller from accessing to them.) In order to do this, we use
6193 * here is_swap_pte instead of is_hugetlb_entry_migration and
6194 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
6195 * both cases, and because we can't follow correct pages
6196 * directly from any kind of swap entries.
6199 __follow_hugetlb_must_fault(flags
, pte
, &unshare
)) {
6201 unsigned int fault_flags
= 0;
6205 if (flags
& FOLL_WRITE
)
6206 fault_flags
|= FAULT_FLAG_WRITE
;
6208 fault_flags
|= FAULT_FLAG_UNSHARE
;
6210 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
|
6211 FAULT_FLAG_KILLABLE
;
6212 if (flags
& FOLL_NOWAIT
)
6213 fault_flags
|= FAULT_FLAG_ALLOW_RETRY
|
6214 FAULT_FLAG_RETRY_NOWAIT
;
6215 if (flags
& FOLL_TRIED
) {
6217 * Note: FAULT_FLAG_ALLOW_RETRY and
6218 * FAULT_FLAG_TRIED can co-exist
6220 fault_flags
|= FAULT_FLAG_TRIED
;
6222 ret
= hugetlb_fault(mm
, vma
, vaddr
, fault_flags
);
6223 if (ret
& VM_FAULT_ERROR
) {
6224 err
= vm_fault_to_errno(ret
, flags
);
6228 if (ret
& VM_FAULT_RETRY
) {
6230 !(fault_flags
& FAULT_FLAG_RETRY_NOWAIT
))
6234 * VM_FAULT_RETRY must not return an
6235 * error, it will return zero
6238 * No need to update "position" as the
6239 * caller will not check it after
6240 * *nr_pages is set to 0.
6247 pfn_offset
= (vaddr
& ~huge_page_mask(h
)) >> PAGE_SHIFT
;
6248 page
= pte_page(huge_ptep_get(pte
));
6250 VM_BUG_ON_PAGE((flags
& FOLL_PIN
) && PageAnon(page
) &&
6251 !PageAnonExclusive(page
), page
);
6254 * If subpage information not requested, update counters
6255 * and skip the same_page loop below.
6257 if (!pages
&& !vmas
&& !pfn_offset
&&
6258 (vaddr
+ huge_page_size(h
) < vma
->vm_end
) &&
6259 (remainder
>= pages_per_huge_page(h
))) {
6260 vaddr
+= huge_page_size(h
);
6261 remainder
-= pages_per_huge_page(h
);
6262 i
+= pages_per_huge_page(h
);
6267 /* vaddr may not be aligned to PAGE_SIZE */
6268 refs
= min3(pages_per_huge_page(h
) - pfn_offset
, remainder
,
6269 (vma
->vm_end
- ALIGN_DOWN(vaddr
, PAGE_SIZE
)) >> PAGE_SHIFT
);
6272 record_subpages_vmas(mem_map_offset(page
, pfn_offset
),
6274 likely(pages
) ? pages
+ i
: NULL
,
6275 vmas
? vmas
+ i
: NULL
);
6279 * try_grab_folio() should always succeed here,
6280 * because: a) we hold the ptl lock, and b) we've just
6281 * checked that the huge page is present in the page
6282 * tables. If the huge page is present, then the tail
6283 * pages must also be present. The ptl prevents the
6284 * head page and tail pages from being rearranged in
6285 * any way. So this page must be available at this
6286 * point, unless the page refcount overflowed:
6288 if (WARN_ON_ONCE(!try_grab_folio(pages
[i
], refs
,
6297 vaddr
+= (refs
<< PAGE_SHIFT
);
6303 *nr_pages
= remainder
;
6305 * setting position is actually required only if remainder is
6306 * not zero but it's faster not to add a "if (remainder)"
6314 unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
6315 unsigned long address
, unsigned long end
,
6316 pgprot_t newprot
, unsigned long cp_flags
)
6318 struct mm_struct
*mm
= vma
->vm_mm
;
6319 unsigned long start
= address
;
6322 struct hstate
*h
= hstate_vma(vma
);
6323 unsigned long pages
= 0, psize
= huge_page_size(h
);
6324 bool shared_pmd
= false;
6325 struct mmu_notifier_range range
;
6326 unsigned long last_addr_mask
;
6327 bool uffd_wp
= cp_flags
& MM_CP_UFFD_WP
;
6328 bool uffd_wp_resolve
= cp_flags
& MM_CP_UFFD_WP_RESOLVE
;
6331 * In the case of shared PMDs, the area to flush could be beyond
6332 * start/end. Set range.start/range.end to cover the maximum possible
6333 * range if PMD sharing is possible.
6335 mmu_notifier_range_init(&range
, MMU_NOTIFY_PROTECTION_VMA
,
6336 0, vma
, mm
, start
, end
);
6337 adjust_range_if_pmd_sharing_possible(vma
, &range
.start
, &range
.end
);
6339 BUG_ON(address
>= end
);
6340 flush_cache_range(vma
, range
.start
, range
.end
);
6342 mmu_notifier_invalidate_range_start(&range
);
6343 last_addr_mask
= hugetlb_mask_last_page(h
);
6344 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
6345 for (; address
< end
; address
+= psize
) {
6347 ptep
= huge_pte_offset(mm
, address
, psize
);
6349 address
|= last_addr_mask
;
6352 ptl
= huge_pte_lock(h
, mm
, ptep
);
6353 if (huge_pmd_unshare(mm
, vma
, address
, ptep
)) {
6355 * When uffd-wp is enabled on the vma, unshare
6356 * shouldn't happen at all. Warn about it if it
6357 * happened due to some reason.
6359 WARN_ON_ONCE(uffd_wp
|| uffd_wp_resolve
);
6363 address
|= last_addr_mask
;
6366 pte
= huge_ptep_get(ptep
);
6367 if (unlikely(is_hugetlb_entry_hwpoisoned(pte
))) {
6371 if (unlikely(is_hugetlb_entry_migration(pte
))) {
6372 swp_entry_t entry
= pte_to_swp_entry(pte
);
6373 struct page
*page
= pfn_swap_entry_to_page(entry
);
6375 if (!is_readable_migration_entry(entry
)) {
6379 entry
= make_readable_exclusive_migration_entry(
6382 entry
= make_readable_migration_entry(
6384 newpte
= swp_entry_to_pte(entry
);
6386 newpte
= pte_swp_mkuffd_wp(newpte
);
6387 else if (uffd_wp_resolve
)
6388 newpte
= pte_swp_clear_uffd_wp(newpte
);
6389 set_huge_pte_at(mm
, address
, ptep
, newpte
);
6395 if (unlikely(pte_marker_uffd_wp(pte
))) {
6397 * This is changing a non-present pte into a none pte,
6398 * no need for huge_ptep_modify_prot_start/commit().
6400 if (uffd_wp_resolve
)
6401 huge_pte_clear(mm
, address
, ptep
, psize
);
6403 if (!huge_pte_none(pte
)) {
6405 unsigned int shift
= huge_page_shift(hstate_vma(vma
));
6407 old_pte
= huge_ptep_modify_prot_start(vma
, address
, ptep
);
6408 pte
= huge_pte_modify(old_pte
, newprot
);
6409 pte
= arch_make_huge_pte(pte
, shift
, vma
->vm_flags
);
6411 pte
= huge_pte_mkuffd_wp(huge_pte_wrprotect(pte
));
6412 else if (uffd_wp_resolve
)
6413 pte
= huge_pte_clear_uffd_wp(pte
);
6414 huge_ptep_modify_prot_commit(vma
, address
, ptep
, old_pte
, pte
);
6418 if (unlikely(uffd_wp
))
6419 /* Safe to modify directly (none->non-present). */
6420 set_huge_pte_at(mm
, address
, ptep
,
6421 make_pte_marker(PTE_MARKER_UFFD_WP
));
6426 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6427 * may have cleared our pud entry and done put_page on the page table:
6428 * once we release i_mmap_rwsem, another task can do the final put_page
6429 * and that page table be reused and filled with junk. If we actually
6430 * did unshare a page of pmds, flush the range corresponding to the pud.
6433 flush_hugetlb_tlb_range(vma
, range
.start
, range
.end
);
6435 flush_hugetlb_tlb_range(vma
, start
, end
);
6437 * No need to call mmu_notifier_invalidate_range() we are downgrading
6438 * page table protection not changing it to point to a new page.
6440 * See Documentation/mm/mmu_notifier.rst
6442 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
6443 mmu_notifier_invalidate_range_end(&range
);
6445 return pages
<< h
->order
;
6448 /* Return true if reservation was successful, false otherwise. */
6449 bool hugetlb_reserve_pages(struct inode
*inode
,
6451 struct vm_area_struct
*vma
,
6452 vm_flags_t vm_flags
)
6455 struct hstate
*h
= hstate_inode(inode
);
6456 struct hugepage_subpool
*spool
= subpool_inode(inode
);
6457 struct resv_map
*resv_map
;
6458 struct hugetlb_cgroup
*h_cg
= NULL
;
6459 long gbl_reserve
, regions_needed
= 0;
6461 /* This should never happen */
6463 VM_WARN(1, "%s called with a negative range\n", __func__
);
6468 * Only apply hugepage reservation if asked. At fault time, an
6469 * attempt will be made for VM_NORESERVE to allocate a page
6470 * without using reserves
6472 if (vm_flags
& VM_NORESERVE
)
6476 * Shared mappings base their reservation on the number of pages that
6477 * are already allocated on behalf of the file. Private mappings need
6478 * to reserve the full area even if read-only as mprotect() may be
6479 * called to make the mapping read-write. Assume !vma is a shm mapping
6481 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
6483 * resv_map can not be NULL as hugetlb_reserve_pages is only
6484 * called for inodes for which resv_maps were created (see
6485 * hugetlbfs_get_inode).
6487 resv_map
= inode_resv_map(inode
);
6489 chg
= region_chg(resv_map
, from
, to
, ®ions_needed
);
6492 /* Private mapping. */
6493 resv_map
= resv_map_alloc();
6499 set_vma_resv_map(vma
, resv_map
);
6500 set_vma_resv_flags(vma
, HPAGE_RESV_OWNER
);
6506 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h
),
6507 chg
* pages_per_huge_page(h
), &h_cg
) < 0)
6510 if (vma
&& !(vma
->vm_flags
& VM_MAYSHARE
) && h_cg
) {
6511 /* For private mappings, the hugetlb_cgroup uncharge info hangs
6514 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map
, h_cg
, h
);
6518 * There must be enough pages in the subpool for the mapping. If
6519 * the subpool has a minimum size, there may be some global
6520 * reservations already in place (gbl_reserve).
6522 gbl_reserve
= hugepage_subpool_get_pages(spool
, chg
);
6523 if (gbl_reserve
< 0)
6524 goto out_uncharge_cgroup
;
6527 * Check enough hugepages are available for the reservation.
6528 * Hand the pages back to the subpool if there are not
6530 if (hugetlb_acct_memory(h
, gbl_reserve
) < 0)
6534 * Account for the reservations made. Shared mappings record regions
6535 * that have reservations as they are shared by multiple VMAs.
6536 * When the last VMA disappears, the region map says how much
6537 * the reservation was and the page cache tells how much of
6538 * the reservation was consumed. Private mappings are per-VMA and
6539 * only the consumed reservations are tracked. When the VMA
6540 * disappears, the original reservation is the VMA size and the
6541 * consumed reservations are stored in the map. Hence, nothing
6542 * else has to be done for private mappings here
6544 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
) {
6545 add
= region_add(resv_map
, from
, to
, regions_needed
, h
, h_cg
);
6547 if (unlikely(add
< 0)) {
6548 hugetlb_acct_memory(h
, -gbl_reserve
);
6550 } else if (unlikely(chg
> add
)) {
6552 * pages in this range were added to the reserve
6553 * map between region_chg and region_add. This
6554 * indicates a race with alloc_huge_page. Adjust
6555 * the subpool and reserve counts modified above
6556 * based on the difference.
6561 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6562 * reference to h_cg->css. See comment below for detail.
6564 hugetlb_cgroup_uncharge_cgroup_rsvd(
6566 (chg
- add
) * pages_per_huge_page(h
), h_cg
);
6568 rsv_adjust
= hugepage_subpool_put_pages(spool
,
6570 hugetlb_acct_memory(h
, -rsv_adjust
);
6573 * The file_regions will hold their own reference to
6574 * h_cg->css. So we should release the reference held
6575 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6578 hugetlb_cgroup_put_rsvd_cgroup(h_cg
);
6584 /* put back original number of pages, chg */
6585 (void)hugepage_subpool_put_pages(spool
, chg
);
6586 out_uncharge_cgroup
:
6587 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h
),
6588 chg
* pages_per_huge_page(h
), h_cg
);
6590 if (!vma
|| vma
->vm_flags
& VM_MAYSHARE
)
6591 /* Only call region_abort if the region_chg succeeded but the
6592 * region_add failed or didn't run.
6594 if (chg
>= 0 && add
< 0)
6595 region_abort(resv_map
, from
, to
, regions_needed
);
6596 if (vma
&& is_vma_resv_set(vma
, HPAGE_RESV_OWNER
))
6597 kref_put(&resv_map
->refs
, resv_map_release
);
6601 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
6604 struct hstate
*h
= hstate_inode(inode
);
6605 struct resv_map
*resv_map
= inode_resv_map(inode
);
6607 struct hugepage_subpool
*spool
= subpool_inode(inode
);
6611 * Since this routine can be called in the evict inode path for all
6612 * hugetlbfs inodes, resv_map could be NULL.
6615 chg
= region_del(resv_map
, start
, end
);
6617 * region_del() can fail in the rare case where a region
6618 * must be split and another region descriptor can not be
6619 * allocated. If end == LONG_MAX, it will not fail.
6625 spin_lock(&inode
->i_lock
);
6626 inode
->i_blocks
-= (blocks_per_huge_page(h
) * freed
);
6627 spin_unlock(&inode
->i_lock
);
6630 * If the subpool has a minimum size, the number of global
6631 * reservations to be released may be adjusted.
6633 * Note that !resv_map implies freed == 0. So (chg - freed)
6634 * won't go negative.
6636 gbl_reserve
= hugepage_subpool_put_pages(spool
, (chg
- freed
));
6637 hugetlb_acct_memory(h
, -gbl_reserve
);
6642 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6643 static unsigned long page_table_shareable(struct vm_area_struct
*svma
,
6644 struct vm_area_struct
*vma
,
6645 unsigned long addr
, pgoff_t idx
)
6647 unsigned long saddr
= ((idx
- svma
->vm_pgoff
) << PAGE_SHIFT
) +
6649 unsigned long sbase
= saddr
& PUD_MASK
;
6650 unsigned long s_end
= sbase
+ PUD_SIZE
;
6652 /* Allow segments to share if only one is marked locked */
6653 unsigned long vm_flags
= vma
->vm_flags
& VM_LOCKED_CLEAR_MASK
;
6654 unsigned long svm_flags
= svma
->vm_flags
& VM_LOCKED_CLEAR_MASK
;
6657 * match the virtual addresses, permission and the alignment of the
6660 if (pmd_index(addr
) != pmd_index(saddr
) ||
6661 vm_flags
!= svm_flags
||
6662 !range_in_vma(svma
, sbase
, s_end
))
6668 static bool vma_shareable(struct vm_area_struct
*vma
, unsigned long addr
)
6670 unsigned long base
= addr
& PUD_MASK
;
6671 unsigned long end
= base
+ PUD_SIZE
;
6674 * check on proper vm_flags and page table alignment
6676 if (vma
->vm_flags
& VM_MAYSHARE
&& range_in_vma(vma
, base
, end
))
6681 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
6683 #ifdef CONFIG_USERFAULTFD
6684 if (uffd_disable_huge_pmd_share(vma
))
6687 return vma_shareable(vma
, addr
);
6691 * Determine if start,end range within vma could be mapped by shared pmd.
6692 * If yes, adjust start and end to cover range associated with possible
6693 * shared pmd mappings.
6695 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
6696 unsigned long *start
, unsigned long *end
)
6698 unsigned long v_start
= ALIGN(vma
->vm_start
, PUD_SIZE
),
6699 v_end
= ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
);
6702 * vma needs to span at least one aligned PUD size, and the range
6703 * must be at least partially within in.
6705 if (!(vma
->vm_flags
& VM_MAYSHARE
) || !(v_end
> v_start
) ||
6706 (*end
<= v_start
) || (*start
>= v_end
))
6709 /* Extend the range to be PUD aligned for a worst case scenario */
6710 if (*start
> v_start
)
6711 *start
= ALIGN_DOWN(*start
, PUD_SIZE
);
6714 *end
= ALIGN(*end
, PUD_SIZE
);
6718 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6719 * and returns the corresponding pte. While this is not necessary for the
6720 * !shared pmd case because we can allocate the pmd later as well, it makes the
6721 * code much cleaner.
6723 * This routine must be called with i_mmap_rwsem held in at least read mode if
6724 * sharing is possible. For hugetlbfs, this prevents removal of any page
6725 * table entries associated with the address space. This is important as we
6726 * are setting up sharing based on existing page table entries (mappings).
6728 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6729 unsigned long addr
, pud_t
*pud
)
6731 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
6732 pgoff_t idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) +
6734 struct vm_area_struct
*svma
;
6735 unsigned long saddr
;
6740 i_mmap_assert_locked(mapping
);
6741 vma_interval_tree_foreach(svma
, &mapping
->i_mmap
, idx
, idx
) {
6745 saddr
= page_table_shareable(svma
, vma
, addr
, idx
);
6747 spte
= huge_pte_offset(svma
->vm_mm
, saddr
,
6748 vma_mmu_pagesize(svma
));
6750 get_page(virt_to_page(spte
));
6759 ptl
= huge_pte_lock(hstate_vma(vma
), mm
, spte
);
6760 if (pud_none(*pud
)) {
6761 pud_populate(mm
, pud
,
6762 (pmd_t
*)((unsigned long)spte
& PAGE_MASK
));
6765 put_page(virt_to_page(spte
));
6769 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
6774 * unmap huge page backed by shared pte.
6776 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
6777 * indicated by page_count > 1, unmap is achieved by clearing pud and
6778 * decrementing the ref count. If count == 1, the pte page is not shared.
6780 * Called with page table lock held and i_mmap_rwsem held in write mode.
6782 * returns: 1 successfully unmapped a shared pte page
6783 * 0 the underlying pte page is not shared, or it is the last user
6785 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6786 unsigned long addr
, pte_t
*ptep
)
6788 pgd_t
*pgd
= pgd_offset(mm
, addr
);
6789 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
6790 pud_t
*pud
= pud_offset(p4d
, addr
);
6792 i_mmap_assert_write_locked(vma
->vm_file
->f_mapping
);
6793 BUG_ON(page_count(virt_to_page(ptep
)) == 0);
6794 if (page_count(virt_to_page(ptep
)) == 1)
6798 put_page(virt_to_page(ptep
));
6803 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6804 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6805 unsigned long addr
, pud_t
*pud
)
6810 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6811 unsigned long addr
, pte_t
*ptep
)
6816 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
6817 unsigned long *start
, unsigned long *end
)
6821 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
)
6825 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
6827 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
6828 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
6829 unsigned long addr
, unsigned long sz
)
6836 pgd
= pgd_offset(mm
, addr
);
6837 p4d
= p4d_alloc(mm
, pgd
, addr
);
6840 pud
= pud_alloc(mm
, p4d
, addr
);
6842 if (sz
== PUD_SIZE
) {
6845 BUG_ON(sz
!= PMD_SIZE
);
6846 if (want_pmd_share(vma
, addr
) && pud_none(*pud
))
6847 pte
= huge_pmd_share(mm
, vma
, addr
, pud
);
6849 pte
= (pte_t
*)pmd_alloc(mm
, pud
, addr
);
6852 BUG_ON(pte
&& pte_present(*pte
) && !pte_huge(*pte
));
6858 * huge_pte_offset() - Walk the page table to resolve the hugepage
6859 * entry at address @addr
6861 * Return: Pointer to page table entry (PUD or PMD) for
6862 * address @addr, or NULL if a !p*d_present() entry is encountered and the
6863 * size @sz doesn't match the hugepage size at this level of the page
6866 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
6867 unsigned long addr
, unsigned long sz
)
6874 pgd
= pgd_offset(mm
, addr
);
6875 if (!pgd_present(*pgd
))
6877 p4d
= p4d_offset(pgd
, addr
);
6878 if (!p4d_present(*p4d
))
6881 pud
= pud_offset(p4d
, addr
);
6883 /* must be pud huge, non-present or none */
6884 return (pte_t
*)pud
;
6885 if (!pud_present(*pud
))
6887 /* must have a valid entry and size to go further */
6889 pmd
= pmd_offset(pud
, addr
);
6890 /* must be pmd huge, non-present or none */
6891 return (pte_t
*)pmd
;
6895 * Return a mask that can be used to update an address to the last huge
6896 * page in a page table page mapping size. Used to skip non-present
6897 * page table entries when linearly scanning address ranges. Architectures
6898 * with unique huge page to page table relationships can define their own
6899 * version of this routine.
6901 unsigned long hugetlb_mask_last_page(struct hstate
*h
)
6903 unsigned long hp_size
= huge_page_size(h
);
6905 if (hp_size
== PUD_SIZE
)
6906 return P4D_SIZE
- PUD_SIZE
;
6907 else if (hp_size
== PMD_SIZE
)
6908 return PUD_SIZE
- PMD_SIZE
;
6915 /* See description above. Architectures can provide their own version. */
6916 __weak
unsigned long hugetlb_mask_last_page(struct hstate
*h
)
6918 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
6919 if (huge_page_size(h
) == PMD_SIZE
)
6920 return PUD_SIZE
- PMD_SIZE
;
6925 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
6928 * These functions are overwritable if your architecture needs its own
6931 struct page
* __weak
6932 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
6935 return ERR_PTR(-EINVAL
);
6938 struct page
* __weak
6939 follow_huge_pd(struct vm_area_struct
*vma
,
6940 unsigned long address
, hugepd_t hpd
, int flags
, int pdshift
)
6942 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
6946 struct page
* __weak
6947 follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
6948 pmd_t
*pmd
, int flags
)
6950 struct page
*page
= NULL
;
6955 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via
6956 * follow_hugetlb_page().
6958 if (WARN_ON_ONCE(flags
& FOLL_PIN
))
6962 ptl
= pmd_lockptr(mm
, pmd
);
6965 * make sure that the address range covered by this pmd is not
6966 * unmapped from other threads.
6968 if (!pmd_huge(*pmd
))
6970 pte
= huge_ptep_get((pte_t
*)pmd
);
6971 if (pte_present(pte
)) {
6972 page
= pmd_page(*pmd
) + ((address
& ~PMD_MASK
) >> PAGE_SHIFT
);
6974 * try_grab_page() should always succeed here, because: a) we
6975 * hold the pmd (ptl) lock, and b) we've just checked that the
6976 * huge pmd (head) page is present in the page tables. The ptl
6977 * prevents the head page and tail pages from being rearranged
6978 * in any way. So this page must be available at this point,
6979 * unless the page refcount overflowed:
6981 if (WARN_ON_ONCE(!try_grab_page(page
, flags
))) {
6986 if (is_hugetlb_entry_migration(pte
)) {
6988 __migration_entry_wait_huge((pte_t
*)pmd
, ptl
);
6992 * hwpoisoned entry is treated as no_page_table in
6993 * follow_page_mask().
7001 struct page
* __weak
7002 follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
7003 pud_t
*pud
, int flags
)
7005 struct page
*page
= NULL
;
7009 if (WARN_ON_ONCE(flags
& FOLL_PIN
))
7013 ptl
= huge_pte_lock(hstate_sizelog(PUD_SHIFT
), mm
, (pte_t
*)pud
);
7014 if (!pud_huge(*pud
))
7016 pte
= huge_ptep_get((pte_t
*)pud
);
7017 if (pte_present(pte
)) {
7018 page
= pud_page(*pud
) + ((address
& ~PUD_MASK
) >> PAGE_SHIFT
);
7019 if (WARN_ON_ONCE(!try_grab_page(page
, flags
))) {
7024 if (is_hugetlb_entry_migration(pte
)) {
7026 __migration_entry_wait(mm
, (pte_t
*)pud
, ptl
);
7030 * hwpoisoned entry is treated as no_page_table in
7031 * follow_page_mask().
7039 struct page
* __weak
7040 follow_huge_pgd(struct mm_struct
*mm
, unsigned long address
, pgd_t
*pgd
, int flags
)
7042 if (flags
& (FOLL_GET
| FOLL_PIN
))
7045 return pte_page(*(pte_t
*)pgd
) + ((address
& ~PGDIR_MASK
) >> PAGE_SHIFT
);
7048 int isolate_hugetlb(struct page
*page
, struct list_head
*list
)
7052 spin_lock_irq(&hugetlb_lock
);
7053 if (!PageHeadHuge(page
) ||
7054 !HPageMigratable(page
) ||
7055 !get_page_unless_zero(page
)) {
7059 ClearHPageMigratable(page
);
7060 list_move_tail(&page
->lru
, list
);
7062 spin_unlock_irq(&hugetlb_lock
);
7066 int get_hwpoison_huge_page(struct page
*page
, bool *hugetlb
)
7071 spin_lock_irq(&hugetlb_lock
);
7072 if (PageHeadHuge(page
)) {
7074 if (HPageFreed(page
))
7076 else if (HPageMigratable(page
))
7077 ret
= get_page_unless_zero(page
);
7081 spin_unlock_irq(&hugetlb_lock
);
7085 int get_huge_page_for_hwpoison(unsigned long pfn
, int flags
)
7089 spin_lock_irq(&hugetlb_lock
);
7090 ret
= __get_huge_page_for_hwpoison(pfn
, flags
);
7091 spin_unlock_irq(&hugetlb_lock
);
7095 void putback_active_hugepage(struct page
*page
)
7097 spin_lock_irq(&hugetlb_lock
);
7098 SetHPageMigratable(page
);
7099 list_move_tail(&page
->lru
, &(page_hstate(page
))->hugepage_activelist
);
7100 spin_unlock_irq(&hugetlb_lock
);
7104 void move_hugetlb_state(struct page
*oldpage
, struct page
*newpage
, int reason
)
7106 struct hstate
*h
= page_hstate(oldpage
);
7108 hugetlb_cgroup_migrate(oldpage
, newpage
);
7109 set_page_owner_migrate_reason(newpage
, reason
);
7112 * transfer temporary state of the new huge page. This is
7113 * reverse to other transitions because the newpage is going to
7114 * be final while the old one will be freed so it takes over
7115 * the temporary status.
7117 * Also note that we have to transfer the per-node surplus state
7118 * here as well otherwise the global surplus count will not match
7121 if (HPageTemporary(newpage
)) {
7122 int old_nid
= page_to_nid(oldpage
);
7123 int new_nid
= page_to_nid(newpage
);
7125 SetHPageTemporary(oldpage
);
7126 ClearHPageTemporary(newpage
);
7129 * There is no need to transfer the per-node surplus state
7130 * when we do not cross the node.
7132 if (new_nid
== old_nid
)
7134 spin_lock_irq(&hugetlb_lock
);
7135 if (h
->surplus_huge_pages_node
[old_nid
]) {
7136 h
->surplus_huge_pages_node
[old_nid
]--;
7137 h
->surplus_huge_pages_node
[new_nid
]++;
7139 spin_unlock_irq(&hugetlb_lock
);
7144 * This function will unconditionally remove all the shared pmd pgtable entries
7145 * within the specific vma for a hugetlbfs memory range.
7147 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
)
7149 struct hstate
*h
= hstate_vma(vma
);
7150 unsigned long sz
= huge_page_size(h
);
7151 struct mm_struct
*mm
= vma
->vm_mm
;
7152 struct mmu_notifier_range range
;
7153 unsigned long address
, start
, end
;
7157 if (!(vma
->vm_flags
& VM_MAYSHARE
))
7160 start
= ALIGN(vma
->vm_start
, PUD_SIZE
);
7161 end
= ALIGN_DOWN(vma
->vm_end
, PUD_SIZE
);
7166 flush_cache_range(vma
, start
, end
);
7168 * No need to call adjust_range_if_pmd_sharing_possible(), because
7169 * we have already done the PUD_SIZE alignment.
7171 mmu_notifier_range_init(&range
, MMU_NOTIFY_CLEAR
, 0, vma
, mm
,
7173 mmu_notifier_invalidate_range_start(&range
);
7174 i_mmap_lock_write(vma
->vm_file
->f_mapping
);
7175 for (address
= start
; address
< end
; address
+= PUD_SIZE
) {
7176 ptep
= huge_pte_offset(mm
, address
, sz
);
7179 ptl
= huge_pte_lock(h
, mm
, ptep
);
7180 huge_pmd_unshare(mm
, vma
, address
, ptep
);
7183 flush_hugetlb_tlb_range(vma
, start
, end
);
7184 i_mmap_unlock_write(vma
->vm_file
->f_mapping
);
7186 * No need to call mmu_notifier_invalidate_range(), see
7187 * Documentation/mm/mmu_notifier.rst.
7189 mmu_notifier_invalidate_range_end(&range
);
7193 static bool cma_reserve_called __initdata
;
7195 static int __init
cmdline_parse_hugetlb_cma(char *p
)
7202 if (sscanf(s
, "%lu%n", &tmp
, &count
) != 1)
7205 if (s
[count
] == ':') {
7206 if (tmp
>= MAX_NUMNODES
)
7208 nid
= array_index_nospec(tmp
, MAX_NUMNODES
);
7211 tmp
= memparse(s
, &s
);
7212 hugetlb_cma_size_in_node
[nid
] = tmp
;
7213 hugetlb_cma_size
+= tmp
;
7216 * Skip the separator if have one, otherwise
7217 * break the parsing.
7224 hugetlb_cma_size
= memparse(p
, &p
);
7232 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma
);
7234 void __init
hugetlb_cma_reserve(int order
)
7236 unsigned long size
, reserved
, per_node
;
7237 bool node_specific_cma_alloc
= false;
7240 cma_reserve_called
= true;
7242 if (!hugetlb_cma_size
)
7245 for (nid
= 0; nid
< MAX_NUMNODES
; nid
++) {
7246 if (hugetlb_cma_size_in_node
[nid
] == 0)
7249 if (!node_online(nid
)) {
7250 pr_warn("hugetlb_cma: invalid node %d specified\n", nid
);
7251 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7252 hugetlb_cma_size_in_node
[nid
] = 0;
7256 if (hugetlb_cma_size_in_node
[nid
] < (PAGE_SIZE
<< order
)) {
7257 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7258 nid
, (PAGE_SIZE
<< order
) / SZ_1M
);
7259 hugetlb_cma_size
-= hugetlb_cma_size_in_node
[nid
];
7260 hugetlb_cma_size_in_node
[nid
] = 0;
7262 node_specific_cma_alloc
= true;
7266 /* Validate the CMA size again in case some invalid nodes specified. */
7267 if (!hugetlb_cma_size
)
7270 if (hugetlb_cma_size
< (PAGE_SIZE
<< order
)) {
7271 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7272 (PAGE_SIZE
<< order
) / SZ_1M
);
7273 hugetlb_cma_size
= 0;
7277 if (!node_specific_cma_alloc
) {
7279 * If 3 GB area is requested on a machine with 4 numa nodes,
7280 * let's allocate 1 GB on first three nodes and ignore the last one.
7282 per_node
= DIV_ROUND_UP(hugetlb_cma_size
, nr_online_nodes
);
7283 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7284 hugetlb_cma_size
/ SZ_1M
, per_node
/ SZ_1M
);
7288 for_each_online_node(nid
) {
7290 char name
[CMA_MAX_NAME
];
7292 if (node_specific_cma_alloc
) {
7293 if (hugetlb_cma_size_in_node
[nid
] == 0)
7296 size
= hugetlb_cma_size_in_node
[nid
];
7298 size
= min(per_node
, hugetlb_cma_size
- reserved
);
7301 size
= round_up(size
, PAGE_SIZE
<< order
);
7303 snprintf(name
, sizeof(name
), "hugetlb%d", nid
);
7305 * Note that 'order per bit' is based on smallest size that
7306 * may be returned to CMA allocator in the case of
7307 * huge page demotion.
7309 res
= cma_declare_contiguous_nid(0, size
, 0,
7310 PAGE_SIZE
<< HUGETLB_PAGE_ORDER
,
7312 &hugetlb_cma
[nid
], nid
);
7314 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7320 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7323 if (reserved
>= hugetlb_cma_size
)
7329 * hugetlb_cma_size is used to determine if allocations from
7330 * cma are possible. Set to zero if no cma regions are set up.
7332 hugetlb_cma_size
= 0;
7335 void __init
hugetlb_cma_check(void)
7337 if (!hugetlb_cma_size
|| cma_reserve_called
)
7340 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7343 #endif /* CONFIG_CMA */