1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
21 typedef struct { unsigned long pd
; } hugepd_t
;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
26 #ifdef CONFIG_HUGETLB_PAGE
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
33 * For HugeTLB page, there are more metadata to save in the struct page. But
34 * the head struct page cannot meet our needs, so we have to abuse other tail
35 * struct page to store the metadata. In order to avoid conflicts caused by
36 * subsequent use of more tail struct pages, we gather these discrete indexes
37 * of tail struct page here.
40 SUBPAGE_INDEX_SUBPOOL
= 1, /* reuse page->private */
41 #ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP
, /* reuse page->private */
43 SUBPAGE_INDEX_CGROUP_RSVD
, /* reuse page->private */
44 __MAX_CGROUP_SUBPAGE_INDEX
= SUBPAGE_INDEX_CGROUP_RSVD
,
49 struct hugepage_subpool
{
52 long max_hpages
; /* Maximum huge pages or -1 if no maximum. */
53 long used_hpages
; /* Used count against maximum, includes */
54 /* both alloced and reserved pages. */
55 struct hstate
*hstate
;
56 long min_hpages
; /* Minimum huge pages or -1 if no minimum. */
57 long rsv_hpages
; /* Pages reserved against global pool to */
58 /* satisfy minimum size. */
64 struct list_head regions
;
65 long adds_in_progress
;
66 struct list_head region_cache
;
67 long region_cache_count
;
68 #ifdef CONFIG_CGROUP_HUGETLB
70 * On private mappings, the counter to uncharge reservations is stored
71 * here. If these fields are 0, then either the mapping is shared, or
72 * cgroup accounting is disabled for this resv_map.
74 struct page_counter
*reservation_counter
;
75 unsigned long pages_per_hpage
;
76 struct cgroup_subsys_state
*css
;
81 * Region tracking -- allows tracking of reservations and instantiated pages
82 * across the pages in a mapping.
84 * The region data structures are embedded into a resv_map and protected
85 * by a resv_map's lock. The set of regions within the resv_map represent
86 * reservations for huge pages, or huge pages that have already been
87 * instantiated within the map. The from and to elements are huge page
88 * indicies into the associated mapping. from indicates the starting index
89 * of the region. to represents the first index past the end of the region.
91 * For example, a file region structure with from == 0 and to == 4 represents
92 * four huge pages in a mapping. It is important to note that the to element
93 * represents the first element past the end of the region. This is used in
94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
96 * Interval notation of the form [from, to) will be used to indicate that
97 * the endpoint from is inclusive and to is exclusive.
100 struct list_head link
;
103 #ifdef CONFIG_CGROUP_HUGETLB
105 * On shared mappings, each reserved region appears as a struct
106 * file_region in resv_map. These fields hold the info needed to
107 * uncharge each reservation.
109 struct page_counter
*reservation_counter
;
110 struct cgroup_subsys_state
*css
;
114 extern struct resv_map
*resv_map_alloc(void);
115 void resv_map_release(struct kref
*ref
);
117 extern spinlock_t hugetlb_lock
;
118 extern int hugetlb_max_hstate __read_mostly
;
119 #define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
122 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
124 void hugepage_put_subpool(struct hugepage_subpool
*spool
);
126 void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
);
127 int hugetlb_sysctl_handler(struct ctl_table
*, int, void *, size_t *, loff_t
*);
128 int hugetlb_overcommit_handler(struct ctl_table
*, int, void *, size_t *,
130 int hugetlb_treat_movable_handler(struct ctl_table
*, int, void *, size_t *,
132 int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*, int, void *, size_t *,
135 int copy_hugetlb_page_range(struct mm_struct
*, struct mm_struct
*, struct vm_area_struct
*);
136 long follow_hugetlb_page(struct mm_struct
*, struct vm_area_struct
*,
137 struct page
**, struct vm_area_struct
**,
138 unsigned long *, unsigned long *, long, unsigned int,
140 void unmap_hugepage_range(struct vm_area_struct
*,
141 unsigned long, unsigned long, struct page
*);
142 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
143 struct vm_area_struct
*vma
,
144 unsigned long start
, unsigned long end
,
145 struct page
*ref_page
);
146 void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
147 unsigned long start
, unsigned long end
,
148 struct page
*ref_page
);
149 void hugetlb_report_meminfo(struct seq_file
*);
150 int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
);
151 void hugetlb_show_meminfo(void);
152 unsigned long hugetlb_total_pages(void);
153 vm_fault_t
hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
154 unsigned long address
, unsigned int flags
);
155 #ifdef CONFIG_USERFAULTFD
156 int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
, pte_t
*dst_pte
,
157 struct vm_area_struct
*dst_vma
,
158 unsigned long dst_addr
,
159 unsigned long src_addr
,
160 enum mcopy_atomic_mode mode
,
161 struct page
**pagep
);
162 #endif /* CONFIG_USERFAULTFD */
163 bool hugetlb_reserve_pages(struct inode
*inode
, long from
, long to
,
164 struct vm_area_struct
*vma
,
165 vm_flags_t vm_flags
);
166 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
168 bool isolate_huge_page(struct page
*page
, struct list_head
*list
);
169 int get_hwpoison_huge_page(struct page
*page
, bool *hugetlb
);
170 void putback_active_hugepage(struct page
*page
);
171 void move_hugetlb_state(struct page
*oldpage
, struct page
*newpage
, int reason
);
172 void free_huge_page(struct page
*page
);
173 void hugetlb_fix_reserve_counts(struct inode
*inode
);
174 extern struct mutex
*hugetlb_fault_mutex_table
;
175 u32
hugetlb_fault_mutex_hash(struct address_space
*mapping
, pgoff_t idx
);
177 pte_t
*huge_pmd_share(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
178 unsigned long addr
, pud_t
*pud
);
180 struct address_space
*hugetlb_page_mapping_lock_write(struct page
*hpage
);
182 extern int sysctl_hugetlb_shm_group
;
183 extern struct list_head huge_boot_pages
;
187 pte_t
*huge_pte_alloc(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
188 unsigned long addr
, unsigned long sz
);
189 pte_t
*huge_pte_offset(struct mm_struct
*mm
,
190 unsigned long addr
, unsigned long sz
);
191 int huge_pmd_unshare(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
192 unsigned long *addr
, pte_t
*ptep
);
193 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct
*vma
,
194 unsigned long *start
, unsigned long *end
);
195 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
197 struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
198 unsigned long address
, hugepd_t hpd
,
199 int flags
, int pdshift
);
200 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
201 pmd_t
*pmd
, int flags
);
202 struct page
*follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
203 pud_t
*pud
, int flags
);
204 struct page
*follow_huge_pgd(struct mm_struct
*mm
, unsigned long address
,
205 pgd_t
*pgd
, int flags
);
207 int pmd_huge(pmd_t pmd
);
208 int pud_huge(pud_t pud
);
209 unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
210 unsigned long address
, unsigned long end
, pgprot_t newprot
);
212 bool is_hugetlb_entry_migration(pte_t pte
);
213 void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
);
215 #else /* !CONFIG_HUGETLB_PAGE */
217 static inline void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
)
221 static inline unsigned long hugetlb_total_pages(void)
226 static inline struct address_space
*hugetlb_page_mapping_lock_write(
232 static inline int huge_pmd_unshare(struct mm_struct
*mm
,
233 struct vm_area_struct
*vma
,
234 unsigned long *addr
, pte_t
*ptep
)
239 static inline void adjust_range_if_pmd_sharing_possible(
240 struct vm_area_struct
*vma
,
241 unsigned long *start
, unsigned long *end
)
245 static inline long follow_hugetlb_page(struct mm_struct
*mm
,
246 struct vm_area_struct
*vma
, struct page
**pages
,
247 struct vm_area_struct
**vmas
, unsigned long *position
,
248 unsigned long *nr_pages
, long i
, unsigned int flags
,
255 static inline struct page
*follow_huge_addr(struct mm_struct
*mm
,
256 unsigned long address
, int write
)
258 return ERR_PTR(-EINVAL
);
261 static inline int copy_hugetlb_page_range(struct mm_struct
*dst
,
262 struct mm_struct
*src
, struct vm_area_struct
*vma
)
268 static inline void hugetlb_report_meminfo(struct seq_file
*m
)
272 static inline int hugetlb_report_node_meminfo(char *buf
, int len
, int nid
)
277 static inline void hugetlb_show_meminfo(void)
281 static inline struct page
*follow_huge_pd(struct vm_area_struct
*vma
,
282 unsigned long address
, hugepd_t hpd
, int flags
,
288 static inline struct page
*follow_huge_pmd(struct mm_struct
*mm
,
289 unsigned long address
, pmd_t
*pmd
, int flags
)
294 static inline struct page
*follow_huge_pud(struct mm_struct
*mm
,
295 unsigned long address
, pud_t
*pud
, int flags
)
300 static inline struct page
*follow_huge_pgd(struct mm_struct
*mm
,
301 unsigned long address
, pgd_t
*pgd
, int flags
)
306 static inline int prepare_hugepage_range(struct file
*file
,
307 unsigned long addr
, unsigned long len
)
312 static inline int pmd_huge(pmd_t pmd
)
317 static inline int pud_huge(pud_t pud
)
322 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
323 unsigned long addr
, unsigned long len
)
328 static inline void hugetlb_free_pgd_range(struct mmu_gather
*tlb
,
329 unsigned long addr
, unsigned long end
,
330 unsigned long floor
, unsigned long ceiling
)
335 #ifdef CONFIG_USERFAULTFD
336 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
,
338 struct vm_area_struct
*dst_vma
,
339 unsigned long dst_addr
,
340 unsigned long src_addr
,
341 enum mcopy_atomic_mode mode
,
347 #endif /* CONFIG_USERFAULTFD */
349 static inline pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
,
355 static inline bool isolate_huge_page(struct page
*page
, struct list_head
*list
)
360 static inline int get_hwpoison_huge_page(struct page
*page
, bool *hugetlb
)
365 static inline void putback_active_hugepage(struct page
*page
)
369 static inline void move_hugetlb_state(struct page
*oldpage
,
370 struct page
*newpage
, int reason
)
374 static inline unsigned long hugetlb_change_protection(
375 struct vm_area_struct
*vma
, unsigned long address
,
376 unsigned long end
, pgprot_t newprot
)
381 static inline void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
382 struct vm_area_struct
*vma
, unsigned long start
,
383 unsigned long end
, struct page
*ref_page
)
388 static inline void __unmap_hugepage_range(struct mmu_gather
*tlb
,
389 struct vm_area_struct
*vma
, unsigned long start
,
390 unsigned long end
, struct page
*ref_page
)
395 static inline vm_fault_t
hugetlb_fault(struct mm_struct
*mm
,
396 struct vm_area_struct
*vma
, unsigned long address
,
403 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct
*vma
) { }
405 #endif /* !CONFIG_HUGETLB_PAGE */
407 * hugepages at page global directory. If arch support
408 * hugepages at pgd level, they need to define this.
411 #define pgd_huge(x) 0
414 #define p4d_huge(x) 0
418 static inline int pgd_write(pgd_t pgd
)
425 #define HUGETLB_ANON_FILE "anon_hugepage"
429 * The file will be used as an shm file so shmfs accounting rules
432 HUGETLB_SHMFS_INODE
= 1,
434 * The file is being created on the internal vfs mount and shmfs
435 * accounting rules do not apply
437 HUGETLB_ANONHUGE_INODE
= 2,
440 #ifdef CONFIG_HUGETLBFS
441 struct hugetlbfs_sb_info
{
442 long max_inodes
; /* inodes allowed */
443 long free_inodes
; /* inodes free */
444 spinlock_t stat_lock
;
445 struct hstate
*hstate
;
446 struct hugepage_subpool
*spool
;
452 static inline struct hugetlbfs_sb_info
*HUGETLBFS_SB(struct super_block
*sb
)
454 return sb
->s_fs_info
;
457 struct hugetlbfs_inode_info
{
458 struct shared_policy policy
;
459 struct inode vfs_inode
;
463 static inline struct hugetlbfs_inode_info
*HUGETLBFS_I(struct inode
*inode
)
465 return container_of(inode
, struct hugetlbfs_inode_info
, vfs_inode
);
468 extern const struct file_operations hugetlbfs_file_operations
;
469 extern const struct vm_operations_struct hugetlb_vm_ops
;
470 struct file
*hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acct
,
471 struct user_struct
**user
, int creat_flags
,
474 static inline bool is_file_hugepages(struct file
*file
)
476 if (file
->f_op
== &hugetlbfs_file_operations
)
479 return is_file_shm_hugepages(file
);
482 static inline struct hstate
*hstate_inode(struct inode
*i
)
484 return HUGETLBFS_SB(i
->i_sb
)->hstate
;
486 #else /* !CONFIG_HUGETLBFS */
488 #define is_file_hugepages(file) false
489 static inline struct file
*
490 hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acctflag
,
491 struct user_struct
**user
, int creat_flags
,
494 return ERR_PTR(-ENOSYS
);
497 static inline struct hstate
*hstate_inode(struct inode
*i
)
501 #endif /* !CONFIG_HUGETLBFS */
503 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
504 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
505 unsigned long len
, unsigned long pgoff
,
506 unsigned long flags
);
507 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
510 * huegtlb page specific state flags. These flags are located in page.private
511 * of the hugetlb head page. Functions created via the below macros should be
512 * used to manipulate these flags.
514 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
515 * allocation time. Cleared when page is fully instantiated. Free
516 * routine checks flag to restore a reservation on error paths.
517 * Synchronization: Examined or modified by code that knows it has
518 * the only reference to page. i.e. After allocation but before use
519 * or when the page is being freed.
520 * HPG_migratable - Set after a newly allocated page is added to the page
521 * cache and/or page tables. Indicates the page is a candidate for
523 * Synchronization: Initially set after new page allocation with no
524 * locking. When examined and modified during migration processing
525 * (isolate, migrate, putback) the hugetlb_lock is held.
526 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
527 * allocator. Typically used for migration target pages when no pages
528 * are available in the pool. The hugetlb free page path will
529 * immediately free pages with this flag set to the buddy allocator.
530 * Synchronization: Can be set after huge page allocation from buddy when
531 * code knows it has only reference. All other examinations and
532 * modifications require hugetlb_lock.
533 * HPG_freed - Set when page is on the free lists.
534 * Synchronization: hugetlb_lock held for examination and modification.
535 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
537 enum hugetlb_page_flags
{
538 HPG_restore_reserve
= 0,
542 HPG_vmemmap_optimized
,
547 * Macros to create test, set and clear function definitions for
548 * hugetlb specific page flags.
550 #ifdef CONFIG_HUGETLB_PAGE
551 #define TESTHPAGEFLAG(uname, flname) \
552 static inline int HPage##uname(struct page *page) \
553 { return test_bit(HPG_##flname, &(page->private)); }
555 #define SETHPAGEFLAG(uname, flname) \
556 static inline void SetHPage##uname(struct page *page) \
557 { set_bit(HPG_##flname, &(page->private)); }
559 #define CLEARHPAGEFLAG(uname, flname) \
560 static inline void ClearHPage##uname(struct page *page) \
561 { clear_bit(HPG_##flname, &(page->private)); }
563 #define TESTHPAGEFLAG(uname, flname) \
564 static inline int HPage##uname(struct page *page) \
567 #define SETHPAGEFLAG(uname, flname) \
568 static inline void SetHPage##uname(struct page *page) \
571 #define CLEARHPAGEFLAG(uname, flname) \
572 static inline void ClearHPage##uname(struct page *page) \
576 #define HPAGEFLAG(uname, flname) \
577 TESTHPAGEFLAG(uname, flname) \
578 SETHPAGEFLAG(uname, flname) \
579 CLEARHPAGEFLAG(uname, flname) \
582 * Create functions associated with hugetlb page flags
584 HPAGEFLAG(RestoreReserve
, restore_reserve
)
585 HPAGEFLAG(Migratable
, migratable
)
586 HPAGEFLAG(Temporary
, temporary
)
587 HPAGEFLAG(Freed
, freed
)
588 HPAGEFLAG(VmemmapOptimized
, vmemmap_optimized
)
590 #ifdef CONFIG_HUGETLB_PAGE
592 #define HSTATE_NAME_LEN 32
593 /* Defines one hugetlb page size */
595 struct mutex resize_lock
;
596 int next_nid_to_alloc
;
597 int next_nid_to_free
;
600 unsigned long max_huge_pages
;
601 unsigned long nr_huge_pages
;
602 unsigned long free_huge_pages
;
603 unsigned long resv_huge_pages
;
604 unsigned long surplus_huge_pages
;
605 unsigned long nr_overcommit_huge_pages
;
606 struct list_head hugepage_activelist
;
607 struct list_head hugepage_freelists
[MAX_NUMNODES
];
608 unsigned int nr_huge_pages_node
[MAX_NUMNODES
];
609 unsigned int free_huge_pages_node
[MAX_NUMNODES
];
610 unsigned int surplus_huge_pages_node
[MAX_NUMNODES
];
611 #ifdef CONFIG_CGROUP_HUGETLB
612 /* cgroup control files */
613 struct cftype cgroup_files_dfl
[7];
614 struct cftype cgroup_files_legacy
[9];
616 char name
[HSTATE_NAME_LEN
];
619 struct huge_bootmem_page
{
620 struct list_head list
;
621 struct hstate
*hstate
;
624 int isolate_or_dissolve_huge_page(struct page
*page
, struct list_head
*list
);
625 struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
626 unsigned long addr
, int avoid_reserve
);
627 struct page
*alloc_huge_page_nodemask(struct hstate
*h
, int preferred_nid
,
628 nodemask_t
*nmask
, gfp_t gfp_mask
);
629 struct page
*alloc_huge_page_vma(struct hstate
*h
, struct vm_area_struct
*vma
,
630 unsigned long address
);
631 int huge_add_to_page_cache(struct page
*page
, struct address_space
*mapping
,
633 void restore_reserve_on_error(struct hstate
*h
, struct vm_area_struct
*vma
,
634 unsigned long address
, struct page
*page
);
637 int __init
__alloc_bootmem_huge_page(struct hstate
*h
);
638 int __init
alloc_bootmem_huge_page(struct hstate
*h
);
640 void __init
hugetlb_add_hstate(unsigned order
);
641 bool __init
arch_hugetlb_valid_size(unsigned long size
);
642 struct hstate
*size_to_hstate(unsigned long size
);
644 #ifndef HUGE_MAX_HSTATE
645 #define HUGE_MAX_HSTATE 1
648 extern struct hstate hstates
[HUGE_MAX_HSTATE
];
649 extern unsigned int default_hstate_idx
;
651 #define default_hstate (hstates[default_hstate_idx])
654 * hugetlb page subpool pointer located in hpage[1].private
656 static inline struct hugepage_subpool
*hugetlb_page_subpool(struct page
*hpage
)
658 return (void *)page_private(hpage
+ SUBPAGE_INDEX_SUBPOOL
);
661 static inline void hugetlb_set_page_subpool(struct page
*hpage
,
662 struct hugepage_subpool
*subpool
)
664 set_page_private(hpage
+ SUBPAGE_INDEX_SUBPOOL
, (unsigned long)subpool
);
667 static inline struct hstate
*hstate_file(struct file
*f
)
669 return hstate_inode(file_inode(f
));
672 static inline struct hstate
*hstate_sizelog(int page_size_log
)
675 return &default_hstate
;
677 return size_to_hstate(1UL << page_size_log
);
680 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
682 return hstate_file(vma
->vm_file
);
685 static inline unsigned long huge_page_size(struct hstate
*h
)
687 return (unsigned long)PAGE_SIZE
<< h
->order
;
690 extern unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
);
692 extern unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
);
694 static inline unsigned long huge_page_mask(struct hstate
*h
)
699 static inline unsigned int huge_page_order(struct hstate
*h
)
704 static inline unsigned huge_page_shift(struct hstate
*h
)
706 return h
->order
+ PAGE_SHIFT
;
709 static inline bool hstate_is_gigantic(struct hstate
*h
)
711 return huge_page_order(h
) >= MAX_ORDER
;
714 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
716 return 1 << h
->order
;
719 static inline unsigned int blocks_per_huge_page(struct hstate
*h
)
721 return huge_page_size(h
) / 512;
724 #include <asm/hugetlb.h>
726 #ifndef is_hugepage_only_range
727 static inline int is_hugepage_only_range(struct mm_struct
*mm
,
728 unsigned long addr
, unsigned long len
)
732 #define is_hugepage_only_range is_hugepage_only_range
735 #ifndef arch_clear_hugepage_flags
736 static inline void arch_clear_hugepage_flags(struct page
*page
) { }
737 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
740 #ifndef arch_make_huge_pte
741 static inline pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
742 struct page
*page
, int writable
)
748 static inline struct hstate
*page_hstate(struct page
*page
)
750 VM_BUG_ON_PAGE(!PageHuge(page
), page
);
751 return size_to_hstate(page_size(page
));
754 static inline unsigned hstate_index_to_shift(unsigned index
)
756 return hstates
[index
].order
+ PAGE_SHIFT
;
759 static inline int hstate_index(struct hstate
*h
)
764 extern int dissolve_free_huge_page(struct page
*page
);
765 extern int dissolve_free_huge_pages(unsigned long start_pfn
,
766 unsigned long end_pfn
);
768 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
769 #ifndef arch_hugetlb_migration_supported
770 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
772 if ((huge_page_shift(h
) == PMD_SHIFT
) ||
773 (huge_page_shift(h
) == PUD_SHIFT
) ||
774 (huge_page_shift(h
) == PGDIR_SHIFT
))
781 static inline bool arch_hugetlb_migration_supported(struct hstate
*h
)
787 static inline bool hugepage_migration_supported(struct hstate
*h
)
789 return arch_hugetlb_migration_supported(h
);
793 * Movability check is different as compared to migration check.
794 * It determines whether or not a huge page should be placed on
795 * movable zone or not. Movability of any huge page should be
796 * required only if huge page size is supported for migration.
797 * There wont be any reason for the huge page to be movable if
798 * it is not migratable to start with. Also the size of the huge
799 * page should be large enough to be placed under a movable zone
800 * and still feasible enough to be migratable. Just the presence
801 * in movable zone does not make the migration feasible.
803 * So even though large huge page sizes like the gigantic ones
804 * are migratable they should not be movable because its not
805 * feasible to migrate them from movable zone.
807 static inline bool hugepage_movable_supported(struct hstate
*h
)
809 if (!hugepage_migration_supported(h
))
812 if (hstate_is_gigantic(h
))
817 /* Movability of hugepages depends on migration support. */
818 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
820 if (hugepage_movable_supported(h
))
821 return GFP_HIGHUSER_MOVABLE
;
826 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
828 gfp_t modified_mask
= htlb_alloc_mask(h
);
830 /* Some callers might want to enforce node */
831 modified_mask
|= (gfp_mask
& __GFP_THISNODE
);
833 modified_mask
|= (gfp_mask
& __GFP_NOWARN
);
835 return modified_mask
;
838 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
839 struct mm_struct
*mm
, pte_t
*pte
)
841 if (huge_page_size(h
) == PMD_SIZE
)
842 return pmd_lockptr(mm
, (pmd_t
*) pte
);
843 VM_BUG_ON(huge_page_size(h
) == PAGE_SIZE
);
844 return &mm
->page_table_lock
;
847 #ifndef hugepages_supported
849 * Some platform decide whether they support huge pages at boot
850 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
851 * when there is no such support
853 #define hugepages_supported() (HPAGE_SHIFT != 0)
856 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
);
858 static inline void hugetlb_count_add(long l
, struct mm_struct
*mm
)
860 atomic_long_add(l
, &mm
->hugetlb_usage
);
863 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
865 atomic_long_sub(l
, &mm
->hugetlb_usage
);
868 #ifndef set_huge_swap_pte_at
869 static inline void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
870 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
872 set_huge_pte_at(mm
, addr
, ptep
, pte
);
876 #ifndef huge_ptep_modify_prot_start
877 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
878 static inline pte_t
huge_ptep_modify_prot_start(struct vm_area_struct
*vma
,
879 unsigned long addr
, pte_t
*ptep
)
881 return huge_ptep_get_and_clear(vma
->vm_mm
, addr
, ptep
);
885 #ifndef huge_ptep_modify_prot_commit
886 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
887 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct
*vma
,
888 unsigned long addr
, pte_t
*ptep
,
889 pte_t old_pte
, pte_t pte
)
891 set_huge_pte_at(vma
->vm_mm
, addr
, ptep
, pte
);
895 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
896 extern bool hugetlb_free_vmemmap_enabled
;
898 static inline bool is_hugetlb_free_vmemmap_enabled(void)
900 return hugetlb_free_vmemmap_enabled
;
903 static inline bool is_hugetlb_free_vmemmap_enabled(void)
909 #else /* CONFIG_HUGETLB_PAGE */
912 static inline int isolate_or_dissolve_huge_page(struct page
*page
,
913 struct list_head
*list
)
918 static inline struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
925 static inline struct page
*
926 alloc_huge_page_nodemask(struct hstate
*h
, int preferred_nid
,
927 nodemask_t
*nmask
, gfp_t gfp_mask
)
932 static inline struct page
*alloc_huge_page_vma(struct hstate
*h
,
933 struct vm_area_struct
*vma
,
934 unsigned long address
)
939 static inline int __alloc_bootmem_huge_page(struct hstate
*h
)
944 static inline struct hstate
*hstate_file(struct file
*f
)
949 static inline struct hstate
*hstate_sizelog(int page_size_log
)
954 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
959 static inline struct hstate
*page_hstate(struct page
*page
)
964 static inline unsigned long huge_page_size(struct hstate
*h
)
969 static inline unsigned long huge_page_mask(struct hstate
*h
)
974 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
)
979 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
)
984 static inline unsigned int huge_page_order(struct hstate
*h
)
989 static inline unsigned int huge_page_shift(struct hstate
*h
)
994 static inline bool hstate_is_gigantic(struct hstate
*h
)
999 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
1004 static inline unsigned hstate_index_to_shift(unsigned index
)
1009 static inline int hstate_index(struct hstate
*h
)
1014 static inline int dissolve_free_huge_page(struct page
*page
)
1019 static inline int dissolve_free_huge_pages(unsigned long start_pfn
,
1020 unsigned long end_pfn
)
1025 static inline bool hugepage_migration_supported(struct hstate
*h
)
1030 static inline bool hugepage_movable_supported(struct hstate
*h
)
1035 static inline gfp_t
htlb_alloc_mask(struct hstate
*h
)
1040 static inline gfp_t
htlb_modify_alloc_mask(struct hstate
*h
, gfp_t gfp_mask
)
1045 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
1046 struct mm_struct
*mm
, pte_t
*pte
)
1048 return &mm
->page_table_lock
;
1051 static inline void hugetlb_report_usage(struct seq_file
*f
, struct mm_struct
*m
)
1055 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
1059 static inline void set_huge_swap_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1060 pte_t
*ptep
, pte_t pte
, unsigned long sz
)
1064 static inline bool is_hugetlb_free_vmemmap_enabled(void)
1068 #endif /* CONFIG_HUGETLB_PAGE */
1070 static inline spinlock_t
*huge_pte_lock(struct hstate
*h
,
1071 struct mm_struct
*mm
, pte_t
*pte
)
1075 ptl
= huge_pte_lockptr(h
, mm
, pte
);
1080 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1081 extern void __init
hugetlb_cma_reserve(int order
);
1082 extern void __init
hugetlb_cma_check(void);
1084 static inline __init
void hugetlb_cma_reserve(int order
)
1087 static inline __init
void hugetlb_cma_check(void)
1092 bool want_pmd_share(struct vm_area_struct
*vma
, unsigned long addr
);
1094 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1096 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1099 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1102 #endif /* _LINUX_HUGETLB_H */