]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/hugetlb.h
Merge tag 'dma-mapping-5.15-1' of git://git.infradead.org/users/hch/dma-mapping
[mirror_ubuntu-jammy-kernel.git] / include / linux / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
be93d8cf 5#include <linux/mm_types.h>
309381fe 6#include <linux/mmdebug.h>
4e950f6f 7#include <linux/fs.h>
8edf344c 8#include <linux/hugetlb_inline.h>
abb8206c 9#include <linux/cgroup.h>
9119a41e
JK
10#include <linux/list.h>
11#include <linux/kref.h>
ca5999fd 12#include <linux/pgtable.h>
d92bbc27 13#include <linux/gfp.h>
f6191471 14#include <linux/userfaultfd_k.h>
4e950f6f 15
e9ea0e2d
AM
16struct ctl_table;
17struct user_struct;
24669e58 18struct mmu_gather;
e9ea0e2d 19
e2299292 20#ifndef is_hugepd
e2299292
AK
21typedef struct { unsigned long pd; } hugepd_t;
22#define is_hugepd(hugepd) (0)
23#define __hugepd(x) ((hugepd_t) { (x) })
e2299292
AK
24#endif
25
1da177e4
LT
26#ifdef CONFIG_HUGETLB_PAGE
27
28#include <linux/mempolicy.h>
516dffdc 29#include <linux/shm.h>
63551ae0 30#include <asm/tlbflush.h>
1da177e4 31
cd39d4e9
MS
32/*
33 * For HugeTLB page, there are more metadata to save in the struct page. But
34 * the head struct page cannot meet our needs, so we have to abuse other tail
35 * struct page to store the metadata. In order to avoid conflicts caused by
36 * subsequent use of more tail struct pages, we gather these discrete indexes
37 * of tail struct page here.
38 */
39enum {
40 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
41#ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
43 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45#endif
46 __NR_USED_SUBPAGE,
47};
48
90481622
DG
49struct hugepage_subpool {
50 spinlock_t lock;
51 long count;
c6a91820
MK
52 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
53 long used_hpages; /* Used count against maximum, includes */
06c88398 54 /* both allocated and reserved pages. */
c6a91820
MK
55 struct hstate *hstate;
56 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
57 long rsv_hpages; /* Pages reserved against global pool to */
6c26d310 58 /* satisfy minimum size. */
90481622
DG
59};
60
9119a41e
JK
61struct resv_map {
62 struct kref refs;
7b24d861 63 spinlock_t lock;
9119a41e 64 struct list_head regions;
5e911373
MK
65 long adds_in_progress;
66 struct list_head region_cache;
67 long region_cache_count;
e9fe92ae
MA
68#ifdef CONFIG_CGROUP_HUGETLB
69 /*
70 * On private mappings, the counter to uncharge reservations is stored
71 * here. If these fields are 0, then either the mapping is shared, or
72 * cgroup accounting is disabled for this resv_map.
73 */
74 struct page_counter *reservation_counter;
75 unsigned long pages_per_hpage;
76 struct cgroup_subsys_state *css;
77#endif
9119a41e 78};
075a61d0
MA
79
80/*
81 * Region tracking -- allows tracking of reservations and instantiated pages
82 * across the pages in a mapping.
83 *
84 * The region data structures are embedded into a resv_map and protected
85 * by a resv_map's lock. The set of regions within the resv_map represent
86 * reservations for huge pages, or huge pages that have already been
87 * instantiated within the map. The from and to elements are huge page
06c88398 88 * indices into the associated mapping. from indicates the starting index
075a61d0
MA
89 * of the region. to represents the first index past the end of the region.
90 *
91 * For example, a file region structure with from == 0 and to == 4 represents
92 * four huge pages in a mapping. It is important to note that the to element
93 * represents the first element past the end of the region. This is used in
94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
95 *
96 * Interval notation of the form [from, to) will be used to indicate that
97 * the endpoint from is inclusive and to is exclusive.
98 */
99struct file_region {
100 struct list_head link;
101 long from;
102 long to;
103#ifdef CONFIG_CGROUP_HUGETLB
104 /*
105 * On shared mappings, each reserved region appears as a struct
106 * file_region in resv_map. These fields hold the info needed to
107 * uncharge each reservation.
108 */
109 struct page_counter *reservation_counter;
110 struct cgroup_subsys_state *css;
111#endif
112};
113
9119a41e
JK
114extern struct resv_map *resv_map_alloc(void);
115void resv_map_release(struct kref *ref);
116
c3f38a38
AK
117extern spinlock_t hugetlb_lock;
118extern int hugetlb_max_hstate __read_mostly;
119#define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121
7ca02d0a
MK
122struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 long min_hpages);
90481622
DG
124void hugepage_put_subpool(struct hugepage_subpool *spool);
125
a1e78772 126void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
32927393
CH
127int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
128int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
129 loff_t *);
130int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
131 loff_t *);
132int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
133 loff_t *);
06808b08 134
1da177e4 135int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716
ML
136long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
137 struct page **, struct vm_area_struct **,
87ffc118
AA
138 unsigned long *, unsigned long *, long, unsigned int,
139 int *);
04f2cbe3 140void unmap_hugepage_range(struct vm_area_struct *,
24669e58 141 unsigned long, unsigned long, struct page *);
d833352a
MG
142void __unmap_hugepage_range_final(struct mmu_gather *tlb,
143 struct vm_area_struct *vma,
144 unsigned long start, unsigned long end,
145 struct page *ref_page);
24669e58
AK
146void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
147 unsigned long start, unsigned long end,
148 struct page *ref_page);
e1759c21 149void hugetlb_report_meminfo(struct seq_file *);
7981593b 150int hugetlb_report_node_meminfo(char *buf, int len, int nid);
949f7ec5 151void hugetlb_show_meminfo(void);
1da177e4 152unsigned long hugetlb_total_pages(void);
2b740303 153vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 154 unsigned long address, unsigned int flags);
714c1891 155#ifdef CONFIG_USERFAULTFD
8fb5debc
MK
156int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
157 struct vm_area_struct *dst_vma,
158 unsigned long dst_addr,
159 unsigned long src_addr,
f6191471 160 enum mcopy_atomic_mode mode,
8fb5debc 161 struct page **pagep);
714c1891 162#endif /* CONFIG_USERFAULTFD */
33b8f84a 163bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 164 struct vm_area_struct *vma,
ca16d140 165 vm_flags_t vm_flags);
b5cec28d
MK
166long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
167 long freed);
31caf665 168bool isolate_huge_page(struct page *page, struct list_head *list);
25182f05 169int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
31caf665 170void putback_active_hugepage(struct page *page);
ab5ac90a 171void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
8f1d26d0 172void free_huge_page(struct page *page);
72e2936c 173void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2 174extern struct mutex *hugetlb_fault_mutex_table;
188b04a7 175u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1da177e4 176
aec44e0f
PX
177pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
178 unsigned long addr, pud_t *pud);
3212b535 179
c0d0381a
MK
180struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
181
1da177e4 182extern int sysctl_hugetlb_shm_group;
53ba51d2 183extern struct list_head huge_boot_pages;
1da177e4 184
63551ae0
DG
185/* arch callbacks */
186
aec44e0f 187pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
a5516438 188 unsigned long addr, unsigned long sz);
7868a208
PA
189pte_t *huge_pte_offset(struct mm_struct *mm,
190 unsigned long addr, unsigned long sz);
34ae204f
MK
191int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
192 unsigned long *addr, pte_t *ptep);
017b1660
MK
193void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
194 unsigned long *start, unsigned long *end);
63551ae0
DG
195struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
196 int write);
4dc71451
AK
197struct page *follow_huge_pd(struct vm_area_struct *vma,
198 unsigned long address, hugepd_t hpd,
199 int flags, int pdshift);
63551ae0 200struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 201 pmd_t *pmd, int flags);
ceb86879 202struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 203 pud_t *pud, int flags);
faaa5b62
AK
204struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
205 pgd_t *pgd, int flags);
206
63551ae0 207int pmd_huge(pmd_t pmd);
c2febafc 208int pud_huge(pud_t pud);
7da4d641 209unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591 210 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 211
d5ed7444 212bool is_hugetlb_entry_migration(pte_t pte);
6dfeaff9 213void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
ab5ac90a 214
1da177e4
LT
215#else /* !CONFIG_HUGETLB_PAGE */
216
a1e78772
MG
217static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
218{
219}
220
1da177e4
LT
221static inline unsigned long hugetlb_total_pages(void)
222{
223 return 0;
224}
225
c0d0381a
MK
226static inline struct address_space *hugetlb_page_mapping_lock_write(
227 struct page *hpage)
228{
229 return NULL;
230}
231
34ae204f
MK
232static inline int huge_pmd_unshare(struct mm_struct *mm,
233 struct vm_area_struct *vma,
234 unsigned long *addr, pte_t *ptep)
017b1660
MK
235{
236 return 0;
237}
238
239static inline void adjust_range_if_pmd_sharing_possible(
240 struct vm_area_struct *vma,
241 unsigned long *start, unsigned long *end)
242{
243}
244
1f9dccb2
MK
245static inline long follow_hugetlb_page(struct mm_struct *mm,
246 struct vm_area_struct *vma, struct page **pages,
247 struct vm_area_struct **vmas, unsigned long *position,
248 unsigned long *nr_pages, long i, unsigned int flags,
249 int *nonblocking)
250{
251 BUG();
252 return 0;
253}
254
255static inline struct page *follow_huge_addr(struct mm_struct *mm,
256 unsigned long address, int write)
257{
258 return ERR_PTR(-EINVAL);
259}
260
261static inline int copy_hugetlb_page_range(struct mm_struct *dst,
262 struct mm_struct *src, struct vm_area_struct *vma)
263{
264 BUG();
265 return 0;
266}
267
e1759c21
AD
268static inline void hugetlb_report_meminfo(struct seq_file *m)
269{
270}
1f9dccb2 271
7981593b 272static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1f9dccb2
MK
273{
274 return 0;
275}
276
949f7ec5
DR
277static inline void hugetlb_show_meminfo(void)
278{
279}
1f9dccb2
MK
280
281static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
282 unsigned long address, hugepd_t hpd, int flags,
283 int pdshift)
284{
285 return NULL;
286}
287
288static inline struct page *follow_huge_pmd(struct mm_struct *mm,
289 unsigned long address, pmd_t *pmd, int flags)
290{
291 return NULL;
292}
293
294static inline struct page *follow_huge_pud(struct mm_struct *mm,
295 unsigned long address, pud_t *pud, int flags)
296{
297 return NULL;
298}
299
300static inline struct page *follow_huge_pgd(struct mm_struct *mm,
301 unsigned long address, pgd_t *pgd, int flags)
302{
303 return NULL;
304}
305
306static inline int prepare_hugepage_range(struct file *file,
307 unsigned long addr, unsigned long len)
308{
309 return -EINVAL;
310}
311
312static inline int pmd_huge(pmd_t pmd)
313{
314 return 0;
315}
316
317static inline int pud_huge(pud_t pud)
318{
319 return 0;
320}
321
322static inline int is_hugepage_only_range(struct mm_struct *mm,
323 unsigned long addr, unsigned long len)
324{
325 return 0;
326}
327
328static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
329 unsigned long addr, unsigned long end,
330 unsigned long floor, unsigned long ceiling)
331{
332 BUG();
333}
334
714c1891 335#ifdef CONFIG_USERFAULTFD
1f9dccb2
MK
336static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
337 pte_t *dst_pte,
338 struct vm_area_struct *dst_vma,
339 unsigned long dst_addr,
340 unsigned long src_addr,
f6191471 341 enum mcopy_atomic_mode mode,
1f9dccb2
MK
342 struct page **pagep)
343{
344 BUG();
345 return 0;
346}
714c1891 347#endif /* CONFIG_USERFAULTFD */
1f9dccb2
MK
348
349static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
350 unsigned long sz)
351{
352 return NULL;
353}
24669e58 354
f40386a4
NH
355static inline bool isolate_huge_page(struct page *page, struct list_head *list)
356{
357 return false;
358}
1da177e4 359
25182f05
NH
360static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
361{
362 return 0;
363}
364
1f9dccb2
MK
365static inline void putback_active_hugepage(struct page *page)
366{
367}
368
369static inline void move_hugetlb_state(struct page *oldpage,
370 struct page *newpage, int reason)
371{
372}
373
374static inline unsigned long hugetlb_change_protection(
375 struct vm_area_struct *vma, unsigned long address,
376 unsigned long end, pgprot_t newprot)
7da4d641
PZ
377{
378 return 0;
379}
8f860591 380
d833352a
MG
381static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
382 struct vm_area_struct *vma, unsigned long start,
383 unsigned long end, struct page *ref_page)
384{
385 BUG();
386}
387
24669e58
AK
388static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
389 struct vm_area_struct *vma, unsigned long start,
390 unsigned long end, struct page *ref_page)
391{
392 BUG();
393}
1f9dccb2 394
a953e772 395static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
1f9dccb2
MK
396 struct vm_area_struct *vma, unsigned long address,
397 unsigned int flags)
a953e772
SJ
398{
399 BUG();
400 return 0;
401}
24669e58 402
6dfeaff9
PX
403static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
404
1da177e4 405#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
406/*
407 * hugepages at page global directory. If arch support
408 * hugepages at pgd level, they need to define this.
409 */
410#ifndef pgd_huge
411#define pgd_huge(x) 0
412#endif
c2febafc
KS
413#ifndef p4d_huge
414#define p4d_huge(x) 0
415#endif
f30c59e9
AK
416
417#ifndef pgd_write
418static inline int pgd_write(pgd_t pgd)
419{
420 BUG();
421 return 0;
422}
423#endif
424
4e52780d
EM
425#define HUGETLB_ANON_FILE "anon_hugepage"
426
6bfde05b
EM
427enum {
428 /*
429 * The file will be used as an shm file so shmfs accounting rules
430 * apply
431 */
432 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
433 /*
434 * The file is being created on the internal vfs mount and shmfs
435 * accounting rules do not apply
436 */
437 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
438};
439
1da177e4 440#ifdef CONFIG_HUGETLBFS
1da177e4 441struct hugetlbfs_sb_info {
1da177e4
LT
442 long max_inodes; /* inodes allowed */
443 long free_inodes; /* inodes free */
444 spinlock_t stat_lock;
a137e1cc 445 struct hstate *hstate;
90481622 446 struct hugepage_subpool *spool;
4a25220d
DH
447 kuid_t uid;
448 kgid_t gid;
449 umode_t mode;
1da177e4
LT
450};
451
1da177e4
LT
452static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
453{
454 return sb->s_fs_info;
455}
456
da14c1e5
MAL
457struct hugetlbfs_inode_info {
458 struct shared_policy policy;
459 struct inode vfs_inode;
ff62a342 460 unsigned int seals;
da14c1e5
MAL
461};
462
463static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
464{
465 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
466}
467
4b6f5d20 468extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 469extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 470struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
d7c9e99a 471 struct ucounts **ucounts, int creat_flags,
42d7395f 472 int page_size_log);
1da177e4 473
719ff321 474static inline bool is_file_hugepages(struct file *file)
1da177e4 475{
516dffdc 476 if (file->f_op == &hugetlbfs_file_operations)
719ff321 477 return true;
516dffdc 478
719ff321 479 return is_file_shm_hugepages(file);
1da177e4
LT
480}
481
bb297bb2
CL
482static inline struct hstate *hstate_inode(struct inode *i)
483{
484 return HUGETLBFS_SB(i->i_sb)->hstate;
485}
1da177e4
LT
486#else /* !CONFIG_HUGETLBFS */
487
719ff321 488#define is_file_hugepages(file) false
40716e29 489static inline struct file *
af73e4d9 490hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
d7c9e99a 491 struct ucounts **ucounts, int creat_flags,
42d7395f 492 int page_size_log)
e9ea0e2d
AM
493{
494 return ERR_PTR(-ENOSYS);
495}
1da177e4 496
bb297bb2
CL
497static inline struct hstate *hstate_inode(struct inode *i)
498{
499 return NULL;
500}
1da177e4
LT
501#endif /* !CONFIG_HUGETLBFS */
502
d2ba27e8
AB
503#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
504unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
505 unsigned long len, unsigned long pgoff,
506 unsigned long flags);
507#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
508
d6995da3
MK
509/*
510 * huegtlb page specific state flags. These flags are located in page.private
511 * of the hugetlb head page. Functions created via the below macros should be
512 * used to manipulate these flags.
513 *
514 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
515 * allocation time. Cleared when page is fully instantiated. Free
516 * routine checks flag to restore a reservation on error paths.
d95c0337
MK
517 * Synchronization: Examined or modified by code that knows it has
518 * the only reference to page. i.e. After allocation but before use
519 * or when the page is being freed.
8f251a3d
MK
520 * HPG_migratable - Set after a newly allocated page is added to the page
521 * cache and/or page tables. Indicates the page is a candidate for
522 * migration.
d95c0337
MK
523 * Synchronization: Initially set after new page allocation with no
524 * locking. When examined and modified during migration processing
525 * (isolate, migrate, putback) the hugetlb_lock is held.
9157c311
MK
526 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
527 * allocator. Typically used for migration target pages when no pages
528 * are available in the pool. The hugetlb free page path will
529 * immediately free pages with this flag set to the buddy allocator.
d95c0337
MK
530 * Synchronization: Can be set after huge page allocation from buddy when
531 * code knows it has only reference. All other examinations and
532 * modifications require hugetlb_lock.
6c037149 533 * HPG_freed - Set when page is on the free lists.
d95c0337 534 * Synchronization: hugetlb_lock held for examination and modification.
ad2fa371 535 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
d6995da3
MK
536 */
537enum hugetlb_page_flags {
538 HPG_restore_reserve = 0,
8f251a3d 539 HPG_migratable,
9157c311 540 HPG_temporary,
6c037149 541 HPG_freed,
ad2fa371 542 HPG_vmemmap_optimized,
d6995da3
MK
543 __NR_HPAGEFLAGS,
544};
545
546/*
547 * Macros to create test, set and clear function definitions for
548 * hugetlb specific page flags.
549 */
550#ifdef CONFIG_HUGETLB_PAGE
551#define TESTHPAGEFLAG(uname, flname) \
552static inline int HPage##uname(struct page *page) \
553 { return test_bit(HPG_##flname, &(page->private)); }
554
555#define SETHPAGEFLAG(uname, flname) \
556static inline void SetHPage##uname(struct page *page) \
557 { set_bit(HPG_##flname, &(page->private)); }
558
559#define CLEARHPAGEFLAG(uname, flname) \
560static inline void ClearHPage##uname(struct page *page) \
561 { clear_bit(HPG_##flname, &(page->private)); }
562#else
563#define TESTHPAGEFLAG(uname, flname) \
564static inline int HPage##uname(struct page *page) \
565 { return 0; }
566
567#define SETHPAGEFLAG(uname, flname) \
568static inline void SetHPage##uname(struct page *page) \
569 { }
570
571#define CLEARHPAGEFLAG(uname, flname) \
572static inline void ClearHPage##uname(struct page *page) \
573 { }
574#endif
575
576#define HPAGEFLAG(uname, flname) \
577 TESTHPAGEFLAG(uname, flname) \
578 SETHPAGEFLAG(uname, flname) \
579 CLEARHPAGEFLAG(uname, flname) \
580
581/*
582 * Create functions associated with hugetlb page flags
583 */
584HPAGEFLAG(RestoreReserve, restore_reserve)
8f251a3d 585HPAGEFLAG(Migratable, migratable)
9157c311 586HPAGEFLAG(Temporary, temporary)
6c037149 587HPAGEFLAG(Freed, freed)
ad2fa371 588HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
d6995da3 589
a5516438
AK
590#ifdef CONFIG_HUGETLB_PAGE
591
a3437870 592#define HSTATE_NAME_LEN 32
a5516438
AK
593/* Defines one hugetlb page size */
594struct hstate {
29383967 595 struct mutex resize_lock;
e8c5c824
LS
596 int next_nid_to_alloc;
597 int next_nid_to_free;
a5516438
AK
598 unsigned int order;
599 unsigned long mask;
600 unsigned long max_huge_pages;
601 unsigned long nr_huge_pages;
602 unsigned long free_huge_pages;
603 unsigned long resv_huge_pages;
604 unsigned long surplus_huge_pages;
605 unsigned long nr_overcommit_huge_pages;
0edaecfa 606 struct list_head hugepage_activelist;
a5516438
AK
607 struct list_head hugepage_freelists[MAX_NUMNODES];
608 unsigned int nr_huge_pages_node[MAX_NUMNODES];
609 unsigned int free_huge_pages_node[MAX_NUMNODES];
610 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
77490587
MS
611#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
612 unsigned int nr_free_vmemmap_pages;
613#endif
abb8206c
AK
614#ifdef CONFIG_CGROUP_HUGETLB
615 /* cgroup control files */
cdc2fcfe
MA
616 struct cftype cgroup_files_dfl[7];
617 struct cftype cgroup_files_legacy[9];
abb8206c 618#endif
a3437870 619 char name[HSTATE_NAME_LEN];
a5516438
AK
620};
621
53ba51d2
JT
622struct huge_bootmem_page {
623 struct list_head list;
624 struct hstate *hstate;
625};
626
ae37c7ff 627int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
70c3547e
MK
628struct page *alloc_huge_page(struct vm_area_struct *vma,
629 unsigned long addr, int avoid_reserve);
3e59fcb0 630struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 631 nodemask_t *nmask, gfp_t gfp_mask);
389c8178
MH
632struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
633 unsigned long address);
ab76ad54
MK
634int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
635 pgoff_t idx);
846be085
MK
636void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
637 unsigned long address, struct page *page);
bf50bab2 638
53ba51d2 639/* arch callback */
e24a1307 640int __init __alloc_bootmem_huge_page(struct hstate *h);
53ba51d2
JT
641int __init alloc_bootmem_huge_page(struct hstate *h);
642
e5ff2159 643void __init hugetlb_add_hstate(unsigned order);
ae94da89 644bool __init arch_hugetlb_valid_size(unsigned long size);
e5ff2159
AK
645struct hstate *size_to_hstate(unsigned long size);
646
647#ifndef HUGE_MAX_HSTATE
648#define HUGE_MAX_HSTATE 1
649#endif
650
651extern struct hstate hstates[HUGE_MAX_HSTATE];
652extern unsigned int default_hstate_idx;
653
654#define default_hstate (hstates[default_hstate_idx])
a5516438 655
d6995da3
MK
656/*
657 * hugetlb page subpool pointer located in hpage[1].private
658 */
659static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
660{
cd39d4e9 661 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
d6995da3
MK
662}
663
664static inline void hugetlb_set_page_subpool(struct page *hpage,
665 struct hugepage_subpool *subpool)
666{
cd39d4e9 667 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
d6995da3
MK
668}
669
a5516438
AK
670static inline struct hstate *hstate_file(struct file *f)
671{
496ad9aa 672 return hstate_inode(file_inode(f));
a5516438
AK
673}
674
af73e4d9
NH
675static inline struct hstate *hstate_sizelog(int page_size_log)
676{
677 if (!page_size_log)
678 return &default_hstate;
97ad2be1
SL
679
680 return size_to_hstate(1UL << page_size_log);
af73e4d9
NH
681}
682
a137e1cc 683static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 684{
a137e1cc 685 return hstate_file(vma->vm_file);
a5516438
AK
686}
687
688static inline unsigned long huge_page_size(struct hstate *h)
689{
690 return (unsigned long)PAGE_SIZE << h->order;
691}
692
08fba699
MG
693extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
694
3340289d
MG
695extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
696
a5516438
AK
697static inline unsigned long huge_page_mask(struct hstate *h)
698{
699 return h->mask;
700}
701
702static inline unsigned int huge_page_order(struct hstate *h)
703{
704 return h->order;
705}
706
707static inline unsigned huge_page_shift(struct hstate *h)
708{
709 return h->order + PAGE_SHIFT;
710}
711
bae7f4ae
LC
712static inline bool hstate_is_gigantic(struct hstate *h)
713{
714 return huge_page_order(h) >= MAX_ORDER;
715}
716
a5516438
AK
717static inline unsigned int pages_per_huge_page(struct hstate *h)
718{
719 return 1 << h->order;
720}
721
722static inline unsigned int blocks_per_huge_page(struct hstate *h)
723{
724 return huge_page_size(h) / 512;
725}
726
727#include <asm/hugetlb.h>
728
b0eae98c
AK
729#ifndef is_hugepage_only_range
730static inline int is_hugepage_only_range(struct mm_struct *mm,
731 unsigned long addr, unsigned long len)
732{
733 return 0;
734}
735#define is_hugepage_only_range is_hugepage_only_range
736#endif
737
5be99343
AK
738#ifndef arch_clear_hugepage_flags
739static inline void arch_clear_hugepage_flags(struct page *page) { }
740#define arch_clear_hugepage_flags arch_clear_hugepage_flags
741#endif
742
d9ed9faa 743#ifndef arch_make_huge_pte
79c1c594
CL
744static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
745 vm_flags_t flags)
d9ed9faa
CM
746{
747 return entry;
748}
749#endif
750
e5ff2159
AK
751static inline struct hstate *page_hstate(struct page *page)
752{
309381fe 753 VM_BUG_ON_PAGE(!PageHuge(page), page);
a50b854e 754 return size_to_hstate(page_size(page));
e5ff2159
AK
755}
756
aa50d3a7
AK
757static inline unsigned hstate_index_to_shift(unsigned index)
758{
759 return hstates[index].order + PAGE_SHIFT;
760}
761
972dc4de
AK
762static inline int hstate_index(struct hstate *h)
763{
764 return h - hstates;
765}
766
c3114a84 767extern int dissolve_free_huge_page(struct page *page);
082d5b6b
GS
768extern int dissolve_free_huge_pages(unsigned long start_pfn,
769 unsigned long end_pfn);
e693de18 770
c177c81e 771#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
e693de18
AK
772#ifndef arch_hugetlb_migration_supported
773static inline bool arch_hugetlb_migration_supported(struct hstate *h)
774{
94310cbc 775 if ((huge_page_shift(h) == PMD_SHIFT) ||
9b553bf5
AK
776 (huge_page_shift(h) == PUD_SHIFT) ||
777 (huge_page_shift(h) == PGDIR_SHIFT))
94310cbc
AK
778 return true;
779 else
780 return false;
e693de18
AK
781}
782#endif
c177c81e 783#else
e693de18
AK
784static inline bool arch_hugetlb_migration_supported(struct hstate *h)
785{
d70c17d4 786 return false;
e693de18 787}
c177c81e 788#endif
e693de18
AK
789
790static inline bool hugepage_migration_supported(struct hstate *h)
791{
792 return arch_hugetlb_migration_supported(h);
83467efb 793}
c8721bbb 794
7ed2c31d
AK
795/*
796 * Movability check is different as compared to migration check.
797 * It determines whether or not a huge page should be placed on
798 * movable zone or not. Movability of any huge page should be
799 * required only if huge page size is supported for migration.
06c88398 800 * There won't be any reason for the huge page to be movable if
7ed2c31d
AK
801 * it is not migratable to start with. Also the size of the huge
802 * page should be large enough to be placed under a movable zone
803 * and still feasible enough to be migratable. Just the presence
804 * in movable zone does not make the migration feasible.
805 *
806 * So even though large huge page sizes like the gigantic ones
807 * are migratable they should not be movable because its not
808 * feasible to migrate them from movable zone.
809 */
810static inline bool hugepage_movable_supported(struct hstate *h)
811{
812 if (!hugepage_migration_supported(h))
813 return false;
814
815 if (hstate_is_gigantic(h))
816 return false;
817 return true;
818}
819
d92bbc27
JK
820/* Movability of hugepages depends on migration support. */
821static inline gfp_t htlb_alloc_mask(struct hstate *h)
822{
823 if (hugepage_movable_supported(h))
824 return GFP_HIGHUSER_MOVABLE;
825 else
826 return GFP_HIGHUSER;
827}
828
19fc7bed
JK
829static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
830{
831 gfp_t modified_mask = htlb_alloc_mask(h);
832
833 /* Some callers might want to enforce node */
834 modified_mask |= (gfp_mask & __GFP_THISNODE);
835
41b4dc14
JK
836 modified_mask |= (gfp_mask & __GFP_NOWARN);
837
19fc7bed
JK
838 return modified_mask;
839}
840
cb900f41
KS
841static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
842 struct mm_struct *mm, pte_t *pte)
843{
844 if (huge_page_size(h) == PMD_SIZE)
845 return pmd_lockptr(mm, (pmd_t *) pte);
846 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
847 return &mm->page_table_lock;
848}
849
2531c8cf
DD
850#ifndef hugepages_supported
851/*
852 * Some platform decide whether they support huge pages at boot
853 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
854 * when there is no such support
855 */
856#define hugepages_supported() (HPAGE_SHIFT != 0)
857#endif
457c1b27 858
5d317b2b
NH
859void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
860
13db8c50
LZ
861static inline void hugetlb_count_init(struct mm_struct *mm)
862{
863 atomic_long_set(&mm->hugetlb_usage, 0);
864}
865
5d317b2b
NH
866static inline void hugetlb_count_add(long l, struct mm_struct *mm)
867{
868 atomic_long_add(l, &mm->hugetlb_usage);
869}
870
871static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
872{
873 atomic_long_sub(l, &mm->hugetlb_usage);
874}
e5251fd4
PA
875
876#ifndef set_huge_swap_pte_at
877static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
878 pte_t *ptep, pte_t pte, unsigned long sz)
879{
880 set_huge_pte_at(mm, addr, ptep, pte);
881}
882#endif
023bdd00
AK
883
884#ifndef huge_ptep_modify_prot_start
885#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
886static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
887 unsigned long addr, pte_t *ptep)
888{
889 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
890}
891#endif
892
893#ifndef huge_ptep_modify_prot_commit
894#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
895static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
896 unsigned long addr, pte_t *ptep,
897 pte_t old_pte, pte_t pte)
898{
899 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
900}
901#endif
902
af73e4d9 903#else /* CONFIG_HUGETLB_PAGE */
a5516438 904struct hstate {};
442a5a9a 905
6acfb5ba
MS
906static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
907{
908 return NULL;
909}
910
ae37c7ff
OS
911static inline int isolate_or_dissolve_huge_page(struct page *page,
912 struct list_head *list)
369fa227
OS
913{
914 return -ENOMEM;
915}
916
442a5a9a
JG
917static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
918 unsigned long addr,
919 int avoid_reserve)
920{
921 return NULL;
922}
923
442a5a9a 924static inline struct page *
d92bbc27
JK
925alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
926 nodemask_t *nmask, gfp_t gfp_mask)
442a5a9a
JG
927{
928 return NULL;
929}
930
931static inline struct page *alloc_huge_page_vma(struct hstate *h,
932 struct vm_area_struct *vma,
933 unsigned long address)
934{
935 return NULL;
936}
937
938static inline int __alloc_bootmem_huge_page(struct hstate *h)
939{
940 return 0;
941}
942
943static inline struct hstate *hstate_file(struct file *f)
944{
945 return NULL;
946}
947
948static inline struct hstate *hstate_sizelog(int page_size_log)
949{
950 return NULL;
951}
952
953static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
954{
955 return NULL;
442a5a9a
JG
956}
957
958static inline struct hstate *page_hstate(struct page *page)
959{
960 return NULL;
961}
962
963static inline unsigned long huge_page_size(struct hstate *h)
964{
965 return PAGE_SIZE;
966}
967
968static inline unsigned long huge_page_mask(struct hstate *h)
969{
970 return PAGE_MASK;
971}
972
973static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
974{
975 return PAGE_SIZE;
976}
977
978static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
979{
980 return PAGE_SIZE;
981}
982
983static inline unsigned int huge_page_order(struct hstate *h)
984{
985 return 0;
986}
987
988static inline unsigned int huge_page_shift(struct hstate *h)
989{
990 return PAGE_SHIFT;
991}
992
94310cbc
AK
993static inline bool hstate_is_gigantic(struct hstate *h)
994{
995 return false;
996}
997
510a35d4
AR
998static inline unsigned int pages_per_huge_page(struct hstate *h)
999{
1000 return 1;
1001}
c3114a84
AK
1002
1003static inline unsigned hstate_index_to_shift(unsigned index)
1004{
1005 return 0;
1006}
1007
1008static inline int hstate_index(struct hstate *h)
1009{
1010 return 0;
1011}
13d60f4b 1012
c3114a84
AK
1013static inline int dissolve_free_huge_page(struct page *page)
1014{
1015 return 0;
1016}
1017
1018static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1019 unsigned long end_pfn)
1020{
1021 return 0;
1022}
1023
1024static inline bool hugepage_migration_supported(struct hstate *h)
1025{
1026 return false;
1027}
cb900f41 1028
7ed2c31d
AK
1029static inline bool hugepage_movable_supported(struct hstate *h)
1030{
1031 return false;
1032}
1033
d92bbc27
JK
1034static inline gfp_t htlb_alloc_mask(struct hstate *h)
1035{
1036 return 0;
1037}
1038
19fc7bed
JK
1039static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1040{
1041 return 0;
1042}
1043
cb900f41
KS
1044static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1045 struct mm_struct *mm, pte_t *pte)
1046{
1047 return &mm->page_table_lock;
1048}
5d317b2b 1049
13db8c50
LZ
1050static inline void hugetlb_count_init(struct mm_struct *mm)
1051{
1052}
1053
5d317b2b
NH
1054static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1055{
1056}
1057
1058static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1059{
1060}
e5251fd4
PA
1061
1062static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1063 pte_t *ptep, pte_t pte, unsigned long sz)
1064{
1065}
af73e4d9 1066#endif /* CONFIG_HUGETLB_PAGE */
a5516438 1067
2d7a2171
MS
1068#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1069extern bool hugetlb_free_vmemmap_enabled;
1070#else
1071#define hugetlb_free_vmemmap_enabled false
1072#endif
1073
cb900f41
KS
1074static inline spinlock_t *huge_pte_lock(struct hstate *h,
1075 struct mm_struct *mm, pte_t *pte)
1076{
1077 spinlock_t *ptl;
1078
1079 ptl = huge_pte_lockptr(h, mm, pte);
1080 spin_lock(ptl);
1081 return ptl;
1082}
1083
cf11e85f
RG
1084#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1085extern void __init hugetlb_cma_reserve(int order);
1086extern void __init hugetlb_cma_check(void);
1087#else
1088static inline __init void hugetlb_cma_reserve(int order)
1089{
1090}
1091static inline __init void hugetlb_cma_check(void)
1092{
1093}
1094#endif
1095
c1991e07
PX
1096bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1097
537cf30b
PX
1098#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1099/*
1100 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1101 * implement this.
1102 */
1103#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1104#endif
1105
1da177e4 1106#endif /* _LINUX_HUGETLB_H */