1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
21 typedef struct { unsigned long pd
; } hugepd_t
;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
26 #ifdef CONFIG_HUGETLB_PAGE
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
32 struct hugepage_subpool
{
35 long max_hpages
; /* Maximum huge pages or -1 if no maximum. */
36 long used_hpages
; /* Used count against maximum, includes */
37 /* both alloced and reserved pages. */
38 struct hstate
*hstate
;
39 long min_hpages
; /* Minimum huge pages or -1 if no minimum. */
40 long rsv_hpages
; /* Pages reserved against global pool to */
41 /* satisfy minimum size. */
47 struct list_head regions
;
48 long adds_in_progress
;
49 struct list_head region_cache
;
50 long region_cache_count
;
51 #ifdef CONFIG_CGROUP_HUGETLB
53 * On private mappings, the counter to uncharge reservations is stored
54 * here. If these fields are 0, then either the mapping is shared, or
55 * cgroup accounting is disabled for this resv_map.
57 struct page_counter
*reservation_counter
;
58 unsigned long pages_per_hpage
;
59 struct cgroup_subsys_state
*css
;
64 * Region tracking -- allows tracking of reservations and instantiated pages
65 * across the pages in a mapping.
67 * The region data structures are embedded into a resv_map and protected
68 * by a resv_map's lock. The set of regions within the resv_map represent
69 * reservations for huge pages, or huge pages that have already been
70 * instantiated within the map. The from and to elements are huge page
71 * indicies into the associated mapping. from indicates the starting index
72 * of the region. to represents the first index past the end of the region.
74 * For example, a file region structure with from == 0 and to == 4 represents
75 * four huge pages in a mapping. It is important to note that the to element
76 * represents the first element past the end of the region. This is used in
77 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
79 * Interval notation of the form [from, to) will be used to indicate that
80 * the endpoint from is inclusive and to is exclusive.
83 struct list_head link
;
86 #ifdef CONFIG_CGROUP_HUGETLB
88 * On shared mappings, each reserved region appears as a struct
89 * file_region in resv_map. These fields hold the info needed to
90 * uncharge each reservation.
92 struct page_counter
*reservation_counter
;
93 struct cgroup_subsys_state
*css
;
97 extern struct resv_map
*resv_map_alloc(void);
98 void resv_map_release(struct kref
*ref
);
100 extern spinlock_t hugetlb_lock
;
101 extern int hugetlb_max_hstate __read_mostly
;
102 #define for_each_hstate(h) \
103 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
105 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
107 void hugepage_put_subpool(struct hugepage_subpool
*spool
);
109 void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
);
110 int hugetlb_sysctl_handler(struct ctl_table
*, int, void *, size_t *, loff_t
*);
111 int hugetlb_overcommit_handler(struct ctl_table
*, int, void *, size_t *,
113 int hugetlb_treat_movable_handler(struct ctl_table
*, int, void *, size_t *,
115 int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*, int, void *, size_t *,
118 int copy_hugetlb_page_range(struct mm_struct
*, struct mm_struct
*, struct vm_area_struct
*);
119 long follow_hugetlb_page(struct mm_struct
*, struct vm_area_struct
*,
120 struct page
**, struct vm_area_struct
**,
121 unsigned long *, unsigned long *, long, unsigned int,
123 void unmap_hugepage_range(struct vm_area_struct
*,
124 unsigned long, unsigned long, struct page
*);
125 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
126 struct vm_area_struct
*vma
,
127 unsigned long start
, unsigned long end
,
128 struct page
*ref_page
);
129 void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
130 unsigned long start
, unsigned long end
,
131 struct page
*ref_page
);
132 void hugetlb_report_meminfo(struct seq_file
*);
133 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
);
134 void hugetlb_show_meminfo(void);
135 unsigned long hugetlb_total_pages(void);
136 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
137 unsigned long address
, unsigned int flags
);
138 #ifdef CONFIG_USERFAULTFD
139 int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
, pte_t
*dst_pte
,
140 struct vm_area_struct
*dst_vma
,
141 unsigned long dst_addr
,
142 unsigned long src_addr
,
143 enum mcopy_atomic_mode mode
,
144 struct page
**pagep
);
145 #endif /* CONFIG_USERFAULTFD */
146 bool hugetlb_reserve_pages(struct inode
*inode
, long from
, long to
,
147 struct vm_area_struct
*vma
,
148 vm_flags_t vm_flags
);
149 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
151 bool isolate_huge_page(struct page
*page
, struct list_head
*list
);
152 int get_hwpoison_huge_page(struct page
*page
, bool *hugetlb
);
153 void putback_active_hugepage(struct page
*page
);
154 void move_hugetlb_state(struct page
*oldpage
, struct page
*newpage
, int reason
);
155 void free_huge_page(struct page
*page
);
156 void hugetlb_fix_reserve_counts(struct inode
*inode
);
157 extern struct mutex
*hugetlb_fault_mutex_table
;
158 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
);
160 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
161 unsigned long addr
, pud_t
*pud
);
163 struct address_space
*hugetlb_page_mapping_lock_write(struct page
*hpage
);
165 extern int sysctl_hugetlb_shm_group
;
166 extern struct list_head huge_boot_pages
;
170 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
171 unsigned long addr
, unsigned long sz
);
172 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
173 unsigned long addr
, unsigned long sz
);
174 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
175 unsigned long *addr
, pte_t
*ptep
);
176 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
177 unsigned long *start
, unsigned long *end
);
178 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
180 struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
181 unsigned long address
, hugepd_t hpd
,
182 int flags
, int pdshift
);
183 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
184 pmd_t
*pmd
, int flags
);
185 struct page
*follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
186 pud_t
*pud
, int flags
);
187 struct page
*follow_huge_pgd(struct mm_struct
*mm
, unsigned long address
,
188 pgd_t
*pgd
, int flags
);
190 int pmd_huge(pmd_t pmd
);
191 int pud_huge(pud_t pud
);
192 unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
193 unsigned long address
, unsigned long end
, pgprot_t newprot
);
195 bool is_hugetlb_entry_migration(pte_t pte
);
196 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
);
198 #else /* !CONFIG_HUGETLB_PAGE */
200 static inline void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
)
204 static inline unsigned long hugetlb_total_pages(void)
209 static inline struct address_space
*hugetlb_page_mapping_lock_write(
215 static inline int huge_pmd_unshare(struct mm_struct
*mm
,
216 struct vm_area_struct
*vma
,
217 unsigned long *addr
, pte_t
*ptep
)
222 static inline void adjust_range_if_pmd_sharing_possible(
223 struct vm_area_struct
*vma
,
224 unsigned long *start
, unsigned long *end
)
228 static inline long follow_hugetlb_page(struct mm_struct
*mm
,
229 struct vm_area_struct
*vma
, struct page
**pages
,
230 struct vm_area_struct
**vmas
, unsigned long *position
,
231 unsigned long *nr_pages
, long i
, unsigned int flags
,
238 static inline struct page
*follow_huge_addr(struct mm_struct
*mm
,
239 unsigned long address
, int write
)
241 return ERR_PTR(-EINVAL
);
244 static inline int copy_hugetlb_page_range(struct mm_struct
*dst
,
245 struct mm_struct
*src
, struct vm_area_struct
*vma
)
251 static inline void hugetlb_report_meminfo(struct seq_file
*m
)
255 static inline int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
260 static inline void hugetlb_show_meminfo(void)
264 static inline struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
265 unsigned long address
, hugepd_t hpd
, int flags
,
271 static inline struct page
*follow_huge_pmd(struct mm_struct
*mm
,
272 unsigned long address
, pmd_t
*pmd
, int flags
)
277 static inline struct page
*follow_huge_pud(struct mm_struct
*mm
,
278 unsigned long address
, pud_t
*pud
, int flags
)
283 static inline struct page
*follow_huge_pgd(struct mm_struct
*mm
,
284 unsigned long address
, pgd_t
*pgd
, int flags
)
289 static inline int prepare_hugepage_range(struct file
*file
,
290 unsigned long addr
, unsigned long len
)
295 static inline int pmd_huge(pmd_t pmd
)
300 static inline int pud_huge(pud_t pud
)
305 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
306 unsigned long addr
, unsigned long len
)
311 static inline void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
312 unsigned long addr
, unsigned long end
,
313 unsigned long floor
, unsigned long ceiling
)
318 #ifdef CONFIG_USERFAULTFD
319 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
,
321 struct vm_area_struct
*dst_vma
,
322 unsigned long dst_addr
,
323 unsigned long src_addr
,
324 enum mcopy_atomic_mode mode
,
330 #endif /* CONFIG_USERFAULTFD */
332 static inline pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
,
338 static inline bool isolate_huge_page(struct page
*page
, struct list_head
*list
)
343 static inline int get_hwpoison_huge_page(struct page
*page
, bool *hugetlb
)
348 static inline void putback_active_hugepage(struct page
*page
)
352 static inline void move_hugetlb_state(struct page
*oldpage
,
353 struct page
*newpage
, int reason
)
357 static inline unsigned long hugetlb_change_protection(
358 struct vm_area_struct
*vma
, unsigned long address
,
359 unsigned long end
, pgprot_t newprot
)
364 static inline void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
365 struct vm_area_struct
*vma
, unsigned long start
,
366 unsigned long end
, struct page
*ref_page
)
371 static inline void __unmap_hugepage_range(struct mmu_gather
*tlb
,
372 struct vm_area_struct
*vma
, unsigned long start
,
373 unsigned long end
, struct page
*ref_page
)
378 static inline vm_fault_t
hugetlb_fault(struct mm_struct
*mm
,
379 struct vm_area_struct
*vma
, unsigned long address
,
386 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
) { }
388 #endif /* !CONFIG_HUGETLB_PAGE */
390 * hugepages at page global directory. If arch support
391 * hugepages at pgd level, they need to define this.
394 #define pgd_huge(x) 0
397 #define p4d_huge(x) 0
401 static inline int pgd_write(pgd_t pgd
)
408 #define HUGETLB_ANON_FILE "anon_hugepage"
412 * The file will be used as an shm file so shmfs accounting rules
415 HUGETLB_SHMFS_INODE
= 1,
417 * The file is being created on the internal vfs mount and shmfs
418 * accounting rules do not apply
420 HUGETLB_ANONHUGE_INODE
= 2,
423 #ifdef CONFIG_HUGETLBFS
424 struct hugetlbfs_sb_info
{
425 long max_inodes
; /* inodes allowed */
426 long free_inodes
; /* inodes free */
427 spinlock_t stat_lock
;
428 struct hstate
*hstate
;
429 struct hugepage_subpool
*spool
;
435 static inline struct hugetlbfs_sb_info
*HUGETLBFS_SB(struct super_block
*sb
)
437 return sb
->s_fs_info
;
440 struct hugetlbfs_inode_info
{
441 struct shared_policy policy
;
442 struct inode vfs_inode
;
446 static inline struct hugetlbfs_inode_info
*HUGETLBFS_I(struct inode
*inode
)
448 return container_of(inode
, struct hugetlbfs_inode_info
, vfs_inode
);
451 extern const struct file_operations hugetlbfs_file_operations
;
452 extern const struct vm_operations_struct hugetlb_vm_ops
;
453 struct file
*hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acct
,
454 struct user_struct
**user
, int creat_flags
,
457 static inline bool is_file_hugepages(struct file
*file
)
459 if (file
->f_op
== &hugetlbfs_file_operations
)
462 return is_file_shm_hugepages(file
);
465 static inline struct hstate
*hstate_inode(struct inode
*i
)
467 return HUGETLBFS_SB(i
->i_sb
)->hstate
;
469 #else /* !CONFIG_HUGETLBFS */
471 #define is_file_hugepages(file) false
472 static inline struct file
*
473 hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acctflag
,
474 struct user_struct
**user
, int creat_flags
,
477 return ERR_PTR(-ENOSYS
);
480 static inline struct hstate
*hstate_inode(struct inode
*i
)
484 #endif /* !CONFIG_HUGETLBFS */
486 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
487 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
488 unsigned long len
, unsigned long pgoff
,
489 unsigned long flags
);
490 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
493 * huegtlb page specific state flags. These flags are located in page.private
494 * of the hugetlb head page. Functions created via the below macros should be
495 * used to manipulate these flags.
497 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
498 * allocation time. Cleared when page is fully instantiated. Free
499 * routine checks flag to restore a reservation on error paths.
500 * Synchronization: Examined or modified by code that knows it has
501 * the only reference to page. i.e. After allocation but before use
502 * or when the page is being freed.
503 * HPG_migratable - Set after a newly allocated page is added to the page
504 * cache and/or page tables. Indicates the page is a candidate for
506 * Synchronization: Initially set after new page allocation with no
507 * locking. When examined and modified during migration processing
508 * (isolate, migrate, putback) the hugetlb_lock is held.
509 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
510 * allocator. Typically used for migration target pages when no pages
511 * are available in the pool. The hugetlb free page path will
512 * immediately free pages with this flag set to the buddy allocator.
513 * Synchronization: Can be set after huge page allocation from buddy when
514 * code knows it has only reference. All other examinations and
515 * modifications require hugetlb_lock.
516 * HPG_freed - Set when page is on the free lists.
517 * Synchronization: hugetlb_lock held for examination and modification.
519 enum hugetlb_page_flags
{
520 HPG_restore_reserve
= 0,
528 * Macros to create test, set and clear function definitions for
529 * hugetlb specific page flags.
531 #ifdef CONFIG_HUGETLB_PAGE
532 #define TESTHPAGEFLAG(uname, flname) \
533 static inline int HPage##uname(struct page *page) \
534 { return test_bit(HPG_##flname, &(page->private)); }
536 #define SETHPAGEFLAG(uname, flname) \
537 static inline void SetHPage##uname(struct page *page) \
538 { set_bit(HPG_##flname, &(page->private)); }
540 #define CLEARHPAGEFLAG(uname, flname) \
541 static inline void ClearHPage##uname(struct page *page) \
542 { clear_bit(HPG_##flname, &(page->private)); }
544 #define TESTHPAGEFLAG(uname, flname) \
545 static inline int HPage##uname(struct page *page) \
548 #define SETHPAGEFLAG(uname, flname) \
549 static inline void SetHPage##uname(struct page *page) \
552 #define CLEARHPAGEFLAG(uname, flname) \
553 static inline void ClearHPage##uname(struct page *page) \
557 #define HPAGEFLAG(uname, flname) \
558 TESTHPAGEFLAG(uname, flname) \
559 SETHPAGEFLAG(uname, flname) \
560 CLEARHPAGEFLAG(uname, flname) \
563 * Create functions associated with hugetlb page flags
565 HPAGEFLAG(RestoreReserve
, restore_reserve
)
566 HPAGEFLAG(Migratable
, migratable
)
567 HPAGEFLAG(Temporary
, temporary
)
568 HPAGEFLAG(Freed
, freed
)
570 #ifdef CONFIG_HUGETLB_PAGE
572 #define HSTATE_NAME_LEN 32
573 /* Defines one hugetlb page size */
575 struct mutex resize_lock
;
576 int next_nid_to_alloc
;
577 int next_nid_to_free
;
580 unsigned long max_huge_pages
;
581 unsigned long nr_huge_pages
;
582 unsigned long free_huge_pages
;
583 unsigned long resv_huge_pages
;
584 unsigned long surplus_huge_pages
;
585 unsigned long nr_overcommit_huge_pages
;
586 struct list_head hugepage_activelist
;
587 struct list_head hugepage_freelists
[MAX_NUMNODES
];
588 unsigned int nr_huge_pages_node
[MAX_NUMNODES
];
589 unsigned int free_huge_pages_node
[MAX_NUMNODES
];
590 unsigned int surplus_huge_pages_node
[MAX_NUMNODES
];
591 #ifdef CONFIG_CGROUP_HUGETLB
592 /* cgroup control files */
593 struct cftype cgroup_files_dfl
[7];
594 struct cftype cgroup_files_legacy
[9];
596 char name
[HSTATE_NAME_LEN
];
599 struct huge_bootmem_page
{
600 struct list_head list
;
601 struct hstate
*hstate
;
604 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
);
605 struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
606 unsigned long addr
, int avoid_reserve
);
607 struct page
*alloc_huge_page_nodemask(struct hstate
*h
, int preferred_nid
,
608 nodemask_t
*nmask
, gfp_t gfp_mask
);
609 struct page
*alloc_huge_page_vma(struct hstate
*h
, struct vm_area_struct
*vma
,
610 unsigned long address
);
611 int huge_add_to_page_cache(struct page
*page
, struct address_space
*mapping
,
613 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
614 unsigned long address
, struct page
*page
);
617 int __init
__alloc_bootmem_huge_page(struct hstate
*h
);
618 int __init
alloc_bootmem_huge_page(struct hstate
*h
);
620 void __init
hugetlb_add_hstate(unsigned order
);
621 bool __init
arch_hugetlb_valid_size(unsigned long size
);
622 struct hstate
*size_to_hstate(unsigned long size
);
624 #ifndef HUGE_MAX_HSTATE
625 #define HUGE_MAX_HSTATE 1
628 extern struct hstate hstates
[HUGE_MAX_HSTATE
];
629 extern unsigned int default_hstate_idx
;
631 #define default_hstate (hstates[default_hstate_idx])
634 * hugetlb page subpool pointer located in hpage[1].private
636 static inline struct hugepage_subpool
*hugetlb_page_subpool(struct page
*hpage
)
638 return (struct hugepage_subpool
*)(hpage
+1)->private;
641 static inline void hugetlb_set_page_subpool(struct page
*hpage
,
642 struct hugepage_subpool
*subpool
)
644 set_page_private(hpage
+1, (unsigned long)subpool
);
647 static inline struct hstate
*hstate_file(struct file
*f
)
649 return hstate_inode(file_inode(f
));
652 static inline struct hstate
*hstate_sizelog(int page_size_log
)
655 return &default_hstate
;
657 return size_to_hstate(1UL << page_size_log
);
660 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
662 return hstate_file(vma
->vm_file
);
665 static inline unsigned long huge_page_size(struct hstate
*h
)
667 return (unsigned long)PAGE_SIZE
<< h
->order
;
670 extern unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
);
672 extern unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
);
674 static inline unsigned long huge_page_mask(struct hstate
*h
)
679 static inline unsigned int huge_page_order(struct hstate
*h
)
684 static inline unsigned huge_page_shift(struct hstate
*h
)
686 return h
->order
+ PAGE_SHIFT
;
689 static inline bool hstate_is_gigantic(struct hstate
*h
)
691 return huge_page_order(h
) >= MAX_ORDER
;
694 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
696 return 1 << h
->order
;
699 static inline unsigned int blocks_per_huge_page(struct hstate
*h
)
701 return huge_page_size(h
) / 512;
704 #include <asm/hugetlb.h>
706 #ifndef is_hugepage_only_range
707 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
708 unsigned long addr
, unsigned long len
)
712 #define is_hugepage_only_range is_hugepage_only_range
715 #ifndef arch_clear_hugepage_flags
716 static inline void arch_clear_hugepage_flags(struct page
*page
) { }
717 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
720 #ifndef arch_make_huge_pte
721 static inline pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
722 struct page
*page
, int writable
)
728 static inline struct hstate
*page_hstate(struct page
*page
)
730 VM_BUG_ON_PAGE(!PageHuge(page
), page
);
731 return size_to_hstate(page_size(page
));
734 static inline unsigned hstate_index_to_shift(unsigned index
)
736 return hstates
[index
].order
+ PAGE_SHIFT
;
739 static inline int hstate_index(struct hstate
*h
)
744 pgoff_t
__basepage_index(struct page
*page
);
746 /* Return page->index in PAGE_SIZE units */
747 static inline pgoff_t
basepage_index(struct page
*page
)
749 if (!PageCompound(page
))
752 return __basepage_index(page
);
755 extern int dissolve_free_huge_page(struct page
*page
);
756 extern int dissolve_free_huge_pages(unsigned long start_pfn
,
757 unsigned long end_pfn
);
759 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
760 #ifndef arch_hugetlb_migration_supported
761 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
763 if ((huge_page_shift(h
) == PMD_SHIFT
) ||
764 (huge_page_shift(h
) == PUD_SHIFT
) ||
765 (huge_page_shift(h
) == PGDIR_SHIFT
))
772 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
778 static inline bool hugepage_migration_supported(struct hstate
*h
)
780 return arch_hugetlb_migration_supported(h
);
784 * Movability check is different as compared to migration check.
785 * It determines whether or not a huge page should be placed on
786 * movable zone or not. Movability of any huge page should be
787 * required only if huge page size is supported for migration.
788 * There wont be any reason for the huge page to be movable if
789 * it is not migratable to start with. Also the size of the huge
790 * page should be large enough to be placed under a movable zone
791 * and still feasible enough to be migratable. Just the presence
792 * in movable zone does not make the migration feasible.
794 * So even though large huge page sizes like the gigantic ones
795 * are migratable they should not be movable because its not
796 * feasible to migrate them from movable zone.
798 static inline bool hugepage_movable_supported(struct hstate
*h
)
800 if (!hugepage_migration_supported(h
))
803 if (hstate_is_gigantic(h
))
808 /* Movability of hugepages depends on migration support. */
809 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
811 if (hugepage_movable_supported(h
))
812 return GFP_HIGHUSER_MOVABLE
;
817 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
819 gfp_t modified_mask
= htlb_alloc_mask(h
);
821 /* Some callers might want to enforce node */
822 modified_mask
|= (gfp_mask
& __GFP_THISNODE
);
824 modified_mask
|= (gfp_mask
& __GFP_NOWARN
);
826 return modified_mask
;
829 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
830 struct mm_struct
*mm
, pte_t
*pte
)
832 if (huge_page_size(h
) == PMD_SIZE
)
833 return pmd_lockptr(mm
, (pmd_t
*) pte
);
834 VM_BUG_ON(huge_page_size(h
) == PAGE_SIZE
);
835 return &mm
->page_table_lock
;
838 #ifndef hugepages_supported
840 * Some platform decide whether they support huge pages at boot
841 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
842 * when there is no such support
844 #define hugepages_supported() (HPAGE_SHIFT != 0)
847 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
);
849 static inline void hugetlb_count_add(long l
, struct mm_struct
*mm
)
851 atomic_long_add(l
, &mm
->hugetlb_usage
);
854 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
856 atomic_long_sub(l
, &mm
->hugetlb_usage
);
859 #ifndef set_huge_swap_pte_at
860 static inline void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
861 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
863 set_huge_pte_at(mm
, addr
, ptep
, pte
);
867 #ifndef huge_ptep_modify_prot_start
868 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
869 static inline pte_t
huge_ptep_modify_prot_start(struct vm_area_struct
*vma
,
870 unsigned long addr
, pte_t
*ptep
)
872 return huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
876 #ifndef huge_ptep_modify_prot_commit
877 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
878 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct
*vma
,
879 unsigned long addr
, pte_t
*ptep
,
880 pte_t old_pte
, pte_t pte
)
882 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, pte
);
886 #else /* CONFIG_HUGETLB_PAGE */
889 static inline int isolate_or_dissolve_huge_page(struct page
*page
,
890 struct list_head
*list
)
895 static inline struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
902 static inline struct page
*
903 alloc_huge_page_nodemask(struct hstate
*h
, int preferred_nid
,
904 nodemask_t
*nmask
, gfp_t gfp_mask
)
909 static inline struct page
*alloc_huge_page_vma(struct hstate
*h
,
910 struct vm_area_struct
*vma
,
911 unsigned long address
)
916 static inline int __alloc_bootmem_huge_page(struct hstate
*h
)
921 static inline struct hstate
*hstate_file(struct file
*f
)
926 static inline struct hstate
*hstate_sizelog(int page_size_log
)
931 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
936 static inline struct hstate
*page_hstate(struct page
*page
)
941 static inline unsigned long huge_page_size(struct hstate
*h
)
946 static inline unsigned long huge_page_mask(struct hstate
*h
)
951 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
956 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
961 static inline unsigned int huge_page_order(struct hstate
*h
)
966 static inline unsigned int huge_page_shift(struct hstate
*h
)
971 static inline bool hstate_is_gigantic(struct hstate
*h
)
976 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
981 static inline unsigned hstate_index_to_shift(unsigned index
)
986 static inline int hstate_index(struct hstate
*h
)
991 static inline pgoff_t
basepage_index(struct page
*page
)
996 static inline int dissolve_free_huge_page(struct page
*page
)
1001 static inline int dissolve_free_huge_pages(unsigned long start_pfn
,
1002 unsigned long end_pfn
)
1007 static inline bool hugepage_migration_supported(struct hstate
*h
)
1012 static inline bool hugepage_movable_supported(struct hstate
*h
)
1017 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
1022 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
1027 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
1028 struct mm_struct
*mm
, pte_t
*pte
)
1030 return &mm
->page_table_lock
;
1033 static inline void hugetlb_report_usage(struct seq_file
*f
, struct mm_struct
*m
)
1037 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
1041 static inline void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1042 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
1045 #endif /* CONFIG_HUGETLB_PAGE */
1047 static inline spinlock_t
*huge_pte_lock(struct hstate
*h
,
1048 struct mm_struct
*mm
, pte_t
*pte
)
1052 ptl
= huge_pte_lockptr(h
, mm
, pte
);
1057 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1058 extern void __init
hugetlb_cma_reserve(int order
);
1059 extern void __init
hugetlb_cma_check(void);
1061 static inline __init
void hugetlb_cma_reserve(int order
)
1064 static inline __init
void hugetlb_cma_check(void)
1069 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
);
1071 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1073 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1076 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1079 #endif /* _LINUX_HUGETLB_H */