1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11 #include <asm/pgtable.h>
17 #ifdef CONFIG_HUGETLB_PAGE
19 #include <linux/mempolicy.h>
20 #include <linux/shm.h>
21 #include <asm/tlbflush.h>
23 struct hugepage_subpool
{
26 long max_hpages
; /* Maximum huge pages or -1 if no maximum. */
27 long used_hpages
; /* Used count against maximum, includes */
28 /* both alloced and reserved pages. */
29 struct hstate
*hstate
;
30 long min_hpages
; /* Minimum huge pages or -1 if no minimum. */
31 long rsv_hpages
; /* Pages reserved against global pool to */
32 /* sasitfy minimum size. */
38 struct list_head regions
;
39 long adds_in_progress
;
40 struct list_head region_cache
;
41 long region_cache_count
;
43 extern struct resv_map
*resv_map_alloc(void);
44 void resv_map_release(struct kref
*ref
);
46 extern spinlock_t hugetlb_lock
;
47 extern int hugetlb_max_hstate __read_mostly
;
48 #define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
51 struct hugepage_subpool
*hugepage_new_subpool(struct hstate
*h
, long max_hpages
,
53 void hugepage_put_subpool(struct hugepage_subpool
*spool
);
55 void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
);
56 int hugetlb_sysctl_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
57 int hugetlb_overcommit_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
58 int hugetlb_treat_movable_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
61 int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*, int,
62 void __user
*, size_t *, loff_t
*);
65 int copy_hugetlb_page_range(struct mm_struct
*, struct mm_struct
*, struct vm_area_struct
*);
66 long follow_hugetlb_page(struct mm_struct
*, struct vm_area_struct
*,
67 struct page
**, struct vm_area_struct
**,
68 unsigned long *, unsigned long *, long, unsigned int,
70 void unmap_hugepage_range(struct vm_area_struct
*,
71 unsigned long, unsigned long, struct page
*);
72 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
73 struct vm_area_struct
*vma
,
74 unsigned long start
, unsigned long end
,
75 struct page
*ref_page
);
76 void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
77 unsigned long start
, unsigned long end
,
78 struct page
*ref_page
);
79 void hugetlb_report_meminfo(struct seq_file
*);
80 int hugetlb_report_node_meminfo(int, char *);
81 void hugetlb_show_meminfo(void);
82 unsigned long hugetlb_total_pages(void);
83 int hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
84 unsigned long address
, unsigned int flags
);
85 int hugetlb_mcopy_atomic_pte(struct mm_struct
*dst_mm
, pte_t
*dst_pte
,
86 struct vm_area_struct
*dst_vma
,
87 unsigned long dst_addr
,
88 unsigned long src_addr
,
90 int hugetlb_reserve_pages(struct inode
*inode
, long from
, long to
,
91 struct vm_area_struct
*vma
,
93 long hugetlb_unreserve_pages(struct inode
*inode
, long start
, long end
,
95 int dequeue_hwpoisoned_huge_page(struct page
*page
);
96 bool isolate_huge_page(struct page
*page
, struct list_head
*list
);
97 void putback_active_hugepage(struct page
*page
);
98 void free_huge_page(struct page
*page
);
99 void hugetlb_fix_reserve_counts(struct inode
*inode
);
100 extern struct mutex
*hugetlb_fault_mutex_table
;
101 u32
hugetlb_fault_mutex_hash(struct hstate
*h
, struct mm_struct
*mm
,
102 struct vm_area_struct
*vma
,
103 struct address_space
*mapping
,
104 pgoff_t idx
, unsigned long address
);
106 pte_t
*huge_pmd_share(struct mm_struct
*mm
, unsigned long addr
, pud_t
*pud
);
108 extern int hugepages_treat_as_movable
;
109 extern int sysctl_hugetlb_shm_group
;
110 extern struct list_head huge_boot_pages
;
114 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
115 unsigned long addr
, unsigned long sz
);
116 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
);
117 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
);
118 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
120 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
121 pmd_t
*pmd
, int flags
);
122 struct page
*follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
123 pud_t
*pud
, int flags
);
124 int pmd_huge(pmd_t pmd
);
125 int pud_huge(pud_t pud
);
126 unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
127 unsigned long address
, unsigned long end
, pgprot_t newprot
);
129 #else /* !CONFIG_HUGETLB_PAGE */
131 static inline void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
)
135 static inline unsigned long hugetlb_total_pages(void)
140 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
141 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
142 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
143 static inline void hugetlb_report_meminfo(struct seq_file
*m
)
146 #define hugetlb_report_node_meminfo(n, buf) 0
147 static inline void hugetlb_show_meminfo(void)
150 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
151 #define follow_huge_pud(mm, addr, pud, flags) NULL
152 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
153 #define pmd_huge(x) 0
154 #define pud_huge(x) 0
155 #define is_hugepage_only_range(mm, addr, len) 0
156 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
157 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
158 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
159 src_addr, pagep) ({ BUG(); 0; })
160 #define huge_pte_offset(mm, address) 0
161 static inline int dequeue_hwpoisoned_huge_page(struct page
*page
)
166 static inline bool isolate_huge_page(struct page
*page
, struct list_head
*list
)
170 #define putback_active_hugepage(p) do {} while (0)
172 static inline unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
173 unsigned long address
, unsigned long end
, pgprot_t newprot
)
178 static inline void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
179 struct vm_area_struct
*vma
, unsigned long start
,
180 unsigned long end
, struct page
*ref_page
)
185 static inline void __unmap_hugepage_range(struct mmu_gather
*tlb
,
186 struct vm_area_struct
*vma
, unsigned long start
,
187 unsigned long end
, struct page
*ref_page
)
192 #endif /* !CONFIG_HUGETLB_PAGE */
194 * hugepages at page global directory. If arch support
195 * hugepages at pgd level, they need to define this.
198 #define pgd_huge(x) 0
201 #define p4d_huge(x) 0
205 static inline int pgd_write(pgd_t pgd
)
213 static inline int pud_write(pud_t pud
)
222 * Some architectures requires a hugepage directory format that is
223 * required to support multiple hugepage sizes. For example
224 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
225 * introduced the same on powerpc. This allows for a more flexible hugepage
228 typedef struct { unsigned long pd
; } hugepd_t
;
229 #define is_hugepd(hugepd) (0)
230 #define __hugepd(x) ((hugepd_t) { (x) })
231 static inline int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
232 unsigned pdshift
, unsigned long end
,
233 int write
, struct page
**pages
, int *nr
)
238 extern int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
239 unsigned pdshift
, unsigned long end
,
240 int write
, struct page
**pages
, int *nr
);
243 #define HUGETLB_ANON_FILE "anon_hugepage"
247 * The file will be used as an shm file so shmfs accounting rules
250 HUGETLB_SHMFS_INODE
= 1,
252 * The file is being created on the internal vfs mount and shmfs
253 * accounting rules do not apply
255 HUGETLB_ANONHUGE_INODE
= 2,
258 #ifdef CONFIG_HUGETLBFS
259 struct hugetlbfs_sb_info
{
260 long max_inodes
; /* inodes allowed */
261 long free_inodes
; /* inodes free */
262 spinlock_t stat_lock
;
263 struct hstate
*hstate
;
264 struct hugepage_subpool
*spool
;
267 static inline struct hugetlbfs_sb_info
*HUGETLBFS_SB(struct super_block
*sb
)
269 return sb
->s_fs_info
;
272 extern const struct file_operations hugetlbfs_file_operations
;
273 extern const struct vm_operations_struct hugetlb_vm_ops
;
274 struct file
*hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acct
,
275 struct user_struct
**user
, int creat_flags
,
278 static inline bool is_file_hugepages(struct file
*file
)
280 if (file
->f_op
== &hugetlbfs_file_operations
)
283 return is_file_shm_hugepages(file
);
287 #else /* !CONFIG_HUGETLBFS */
289 #define is_file_hugepages(file) false
290 static inline struct file
*
291 hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acctflag
,
292 struct user_struct
**user
, int creat_flags
,
295 return ERR_PTR(-ENOSYS
);
298 #endif /* !CONFIG_HUGETLBFS */
300 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
301 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
302 unsigned long len
, unsigned long pgoff
,
303 unsigned long flags
);
304 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
306 #ifdef CONFIG_HUGETLB_PAGE
308 #define HSTATE_NAME_LEN 32
309 /* Defines one hugetlb page size */
311 int next_nid_to_alloc
;
312 int next_nid_to_free
;
315 unsigned long max_huge_pages
;
316 unsigned long nr_huge_pages
;
317 unsigned long free_huge_pages
;
318 unsigned long resv_huge_pages
;
319 unsigned long surplus_huge_pages
;
320 unsigned long nr_overcommit_huge_pages
;
321 struct list_head hugepage_activelist
;
322 struct list_head hugepage_freelists
[MAX_NUMNODES
];
323 unsigned int nr_huge_pages_node
[MAX_NUMNODES
];
324 unsigned int free_huge_pages_node
[MAX_NUMNODES
];
325 unsigned int surplus_huge_pages_node
[MAX_NUMNODES
];
326 #ifdef CONFIG_CGROUP_HUGETLB
327 /* cgroup control files */
328 struct cftype cgroup_files
[5];
330 char name
[HSTATE_NAME_LEN
];
333 struct huge_bootmem_page
{
334 struct list_head list
;
335 struct hstate
*hstate
;
336 #ifdef CONFIG_HIGHMEM
341 struct page
*alloc_huge_page(struct vm_area_struct
*vma
,
342 unsigned long addr
, int avoid_reserve
);
343 struct page
*alloc_huge_page_node(struct hstate
*h
, int nid
);
344 struct page
*alloc_huge_page_noerr(struct vm_area_struct
*vma
,
345 unsigned long addr
, int avoid_reserve
);
346 int huge_add_to_page_cache(struct page
*page
, struct address_space
*mapping
,
350 int __init
alloc_bootmem_huge_page(struct hstate
*h
);
352 void __init
hugetlb_bad_size(void);
353 void __init
hugetlb_add_hstate(unsigned order
);
354 struct hstate
*size_to_hstate(unsigned long size
);
356 #ifndef HUGE_MAX_HSTATE
357 #define HUGE_MAX_HSTATE 1
360 extern struct hstate hstates
[HUGE_MAX_HSTATE
];
361 extern unsigned int default_hstate_idx
;
363 #define default_hstate (hstates[default_hstate_idx])
365 static inline struct hstate
*hstate_inode(struct inode
*i
)
367 return HUGETLBFS_SB(i
->i_sb
)->hstate
;
370 static inline struct hstate
*hstate_file(struct file
*f
)
372 return hstate_inode(file_inode(f
));
375 static inline struct hstate
*hstate_sizelog(int page_size_log
)
378 return &default_hstate
;
380 return size_to_hstate(1UL << page_size_log
);
383 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
385 return hstate_file(vma
->vm_file
);
388 static inline unsigned long huge_page_size(struct hstate
*h
)
390 return (unsigned long)PAGE_SIZE
<< h
->order
;
393 extern unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
);
395 extern unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
);
397 static inline unsigned long huge_page_mask(struct hstate
*h
)
402 static inline unsigned int huge_page_order(struct hstate
*h
)
407 static inline unsigned huge_page_shift(struct hstate
*h
)
409 return h
->order
+ PAGE_SHIFT
;
412 static inline bool hstate_is_gigantic(struct hstate
*h
)
414 return huge_page_order(h
) >= MAX_ORDER
;
417 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
419 return 1 << h
->order
;
422 static inline unsigned int blocks_per_huge_page(struct hstate
*h
)
424 return huge_page_size(h
) / 512;
427 #include <asm/hugetlb.h>
429 #ifndef arch_make_huge_pte
430 static inline pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
431 struct page
*page
, int writable
)
437 static inline struct hstate
*page_hstate(struct page
*page
)
439 VM_BUG_ON_PAGE(!PageHuge(page
), page
);
440 return size_to_hstate(PAGE_SIZE
<< compound_order(page
));
443 static inline unsigned hstate_index_to_shift(unsigned index
)
445 return hstates
[index
].order
+ PAGE_SHIFT
;
448 static inline int hstate_index(struct hstate
*h
)
453 pgoff_t
__basepage_index(struct page
*page
);
455 /* Return page->index in PAGE_SIZE units */
456 static inline pgoff_t
basepage_index(struct page
*page
)
458 if (!PageCompound(page
))
461 return __basepage_index(page
);
464 extern int dissolve_free_huge_pages(unsigned long start_pfn
,
465 unsigned long end_pfn
);
466 static inline bool hugepage_migration_supported(struct hstate
*h
)
468 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
469 return huge_page_shift(h
) == PMD_SHIFT
;
475 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
476 struct mm_struct
*mm
, pte_t
*pte
)
478 if (huge_page_size(h
) == PMD_SIZE
)
479 return pmd_lockptr(mm
, (pmd_t
*) pte
);
480 VM_BUG_ON(huge_page_size(h
) == PAGE_SIZE
);
481 return &mm
->page_table_lock
;
484 #ifndef hugepages_supported
486 * Some platform decide whether they support huge pages at boot
487 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
488 * when there is no such support
490 #define hugepages_supported() (HPAGE_SHIFT != 0)
493 void hugetlb_report_usage(struct seq_file
*m
, struct mm_struct
*mm
);
495 static inline void hugetlb_count_add(long l
, struct mm_struct
*mm
)
497 atomic_long_add(l
, &mm
->hugetlb_usage
);
500 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
502 atomic_long_sub(l
, &mm
->hugetlb_usage
);
504 #else /* CONFIG_HUGETLB_PAGE */
506 #define alloc_huge_page(v, a, r) NULL
507 #define alloc_huge_page_node(h, nid) NULL
508 #define alloc_huge_page_noerr(v, a, r) NULL
509 #define alloc_bootmem_huge_page(h) NULL
510 #define hstate_file(f) NULL
511 #define hstate_sizelog(s) NULL
512 #define hstate_vma(v) NULL
513 #define hstate_inode(i) NULL
514 #define page_hstate(page) NULL
515 #define huge_page_size(h) PAGE_SIZE
516 #define huge_page_mask(h) PAGE_MASK
517 #define vma_kernel_pagesize(v) PAGE_SIZE
518 #define vma_mmu_pagesize(v) PAGE_SIZE
519 #define huge_page_order(h) 0
520 #define huge_page_shift(h) PAGE_SHIFT
521 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
525 #define hstate_index_to_shift(index) 0
526 #define hstate_index(h) 0
528 static inline pgoff_t
basepage_index(struct page
*page
)
532 #define dissolve_free_huge_pages(s, e) 0
533 #define hugepage_migration_supported(h) false
535 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
536 struct mm_struct
*mm
, pte_t
*pte
)
538 return &mm
->page_table_lock
;
541 static inline void hugetlb_report_usage(struct seq_file
*f
, struct mm_struct
*m
)
545 static inline void hugetlb_count_sub(long l
, struct mm_struct
*mm
)
548 #endif /* CONFIG_HUGETLB_PAGE */
550 static inline spinlock_t
*huge_pte_lock(struct hstate
*h
,
551 struct mm_struct
*mm
, pte_t
*pte
)
555 ptl
= huge_pte_lockptr(h
, mm
, pte
);
560 #endif /* _LINUX_HUGETLB_H */