1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
16 #ifdef CONFIG_HUGETLB_PAGE
18 #include <linux/mempolicy.h>
19 #include <linux/shm.h>
20 #include <asm/tlbflush.h>
22 struct hugepage_subpool
{
25 long max_hpages
, used_hpages
;
31 struct list_head regions
;
33 extern struct resv_map
*resv_map_alloc(void);
34 void resv_map_release(struct kref
*ref
);
36 extern spinlock_t hugetlb_lock
;
37 extern int hugetlb_max_hstate __read_mostly
;
38 #define for_each_hstate(h) \
39 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
41 struct hugepage_subpool
*hugepage_new_subpool(long nr_blocks
);
42 void hugepage_put_subpool(struct hugepage_subpool
*spool
);
44 int PageHuge(struct page
*page
);
46 void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
);
47 int hugetlb_sysctl_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
48 int hugetlb_overcommit_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
49 int hugetlb_treat_movable_handler(struct ctl_table
*, int, void __user
*, size_t *, loff_t
*);
52 int hugetlb_mempolicy_sysctl_handler(struct ctl_table
*, int,
53 void __user
*, size_t *, loff_t
*);
56 int copy_hugetlb_page_range(struct mm_struct
*, struct mm_struct
*, struct vm_area_struct
*);
57 long follow_hugetlb_page(struct mm_struct
*, struct vm_area_struct
*,
58 struct page
**, struct vm_area_struct
**,
59 unsigned long *, unsigned long *, long, unsigned int);
60 void unmap_hugepage_range(struct vm_area_struct
*,
61 unsigned long, unsigned long, struct page
*);
62 void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
63 struct vm_area_struct
*vma
,
64 unsigned long start
, unsigned long end
,
65 struct page
*ref_page
);
66 void __unmap_hugepage_range(struct mmu_gather
*tlb
, struct vm_area_struct
*vma
,
67 unsigned long start
, unsigned long end
,
68 struct page
*ref_page
);
69 void hugetlb_report_meminfo(struct seq_file
*);
70 int hugetlb_report_node_meminfo(int, char *);
71 void hugetlb_show_meminfo(void);
72 unsigned long hugetlb_total_pages(void);
73 int hugetlb_fault(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
74 unsigned long address
, unsigned int flags
);
75 int hugetlb_reserve_pages(struct inode
*inode
, long from
, long to
,
76 struct vm_area_struct
*vma
,
78 void hugetlb_unreserve_pages(struct inode
*inode
, long offset
, long freed
);
79 int dequeue_hwpoisoned_huge_page(struct page
*page
);
80 bool isolate_huge_page(struct page
*page
, struct list_head
*list
);
81 void putback_active_hugepage(struct page
*page
);
82 bool is_hugepage_active(struct page
*page
);
83 void free_huge_page(struct page
*page
);
85 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
86 pte_t
*huge_pmd_share(struct mm_struct
*mm
, unsigned long addr
, pud_t
*pud
);
89 extern int hugepages_treat_as_movable
;
90 extern int sysctl_hugetlb_shm_group
;
91 extern struct list_head huge_boot_pages
;
95 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
96 unsigned long addr
, unsigned long sz
);
97 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
);
98 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
);
99 struct page
*follow_huge_addr(struct mm_struct
*mm
, unsigned long address
,
101 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
102 pmd_t
*pmd
, int flags
);
103 struct page
*follow_huge_pud(struct mm_struct
*mm
, unsigned long address
,
104 pud_t
*pud
, int flags
);
105 int pmd_huge(pmd_t pmd
);
106 int pud_huge(pud_t pmd
);
107 unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
108 unsigned long address
, unsigned long end
, pgprot_t newprot
);
110 #else /* !CONFIG_HUGETLB_PAGE */
112 static inline int PageHuge(struct page
*page
)
117 static inline void reset_vma_resv_huge_pages(struct vm_area_struct
*vma
)
121 static inline unsigned long hugetlb_total_pages(void)
126 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
127 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
128 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
129 static inline void hugetlb_report_meminfo(struct seq_file
*m
)
132 #define hugetlb_report_node_meminfo(n, buf) 0
133 static inline void hugetlb_show_meminfo(void)
136 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
137 #define follow_huge_pud(mm, addr, pud, flags) NULL
138 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
139 #define pmd_huge(x) 0
140 #define pud_huge(x) 0
141 #define is_hugepage_only_range(mm, addr, len) 0
142 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
143 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
144 #define huge_pte_offset(mm, address) 0
145 static inline int dequeue_hwpoisoned_huge_page(struct page
*page
)
150 static inline bool isolate_huge_page(struct page
*page
, struct list_head
*list
)
154 #define putback_active_hugepage(p) do {} while (0)
155 #define is_hugepage_active(x) false
157 static inline unsigned long hugetlb_change_protection(struct vm_area_struct
*vma
,
158 unsigned long address
, unsigned long end
, pgprot_t newprot
)
163 static inline void __unmap_hugepage_range_final(struct mmu_gather
*tlb
,
164 struct vm_area_struct
*vma
, unsigned long start
,
165 unsigned long end
, struct page
*ref_page
)
170 static inline void __unmap_hugepage_range(struct mmu_gather
*tlb
,
171 struct vm_area_struct
*vma
, unsigned long start
,
172 unsigned long end
, struct page
*ref_page
)
177 #endif /* !CONFIG_HUGETLB_PAGE */
179 * hugepages at page global directory. If arch support
180 * hugepages at pgd level, they need to define this.
183 #define pgd_huge(x) 0
187 static inline int pgd_write(pgd_t pgd
)
195 static inline int pud_write(pud_t pud
)
204 * Some architectures requires a hugepage directory format that is
205 * required to support multiple hugepage sizes. For example
206 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
207 * introduced the same on powerpc. This allows for a more flexible hugepage
210 typedef struct { unsigned long pd
; } hugepd_t
;
211 #define is_hugepd(hugepd) (0)
212 #define __hugepd(x) ((hugepd_t) { (x) })
213 static inline int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
214 unsigned pdshift
, unsigned long end
,
215 int write
, struct page
**pages
, int *nr
)
220 extern int gup_huge_pd(hugepd_t hugepd
, unsigned long addr
,
221 unsigned pdshift
, unsigned long end
,
222 int write
, struct page
**pages
, int *nr
);
225 #define HUGETLB_ANON_FILE "anon_hugepage"
229 * The file will be used as an shm file so shmfs accounting rules
232 HUGETLB_SHMFS_INODE
= 1,
234 * The file is being created on the internal vfs mount and shmfs
235 * accounting rules do not apply
237 HUGETLB_ANONHUGE_INODE
= 2,
240 #ifdef CONFIG_HUGETLBFS
241 struct hugetlbfs_sb_info
{
242 long max_inodes
; /* inodes allowed */
243 long free_inodes
; /* inodes free */
244 spinlock_t stat_lock
;
245 struct hstate
*hstate
;
246 struct hugepage_subpool
*spool
;
249 static inline struct hugetlbfs_sb_info
*HUGETLBFS_SB(struct super_block
*sb
)
251 return sb
->s_fs_info
;
254 extern const struct file_operations hugetlbfs_file_operations
;
255 extern const struct vm_operations_struct hugetlb_vm_ops
;
256 struct file
*hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acct
,
257 struct user_struct
**user
, int creat_flags
,
260 static inline int is_file_hugepages(struct file
*file
)
262 if (file
->f_op
== &hugetlbfs_file_operations
)
264 if (is_file_shm_hugepages(file
))
271 #else /* !CONFIG_HUGETLBFS */
273 #define is_file_hugepages(file) 0
274 static inline struct file
*
275 hugetlb_file_setup(const char *name
, size_t size
, vm_flags_t acctflag
,
276 struct user_struct
**user
, int creat_flags
,
279 return ERR_PTR(-ENOSYS
);
282 #endif /* !CONFIG_HUGETLBFS */
284 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
285 unsigned long hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
286 unsigned long len
, unsigned long pgoff
,
287 unsigned long flags
);
288 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
290 #ifdef CONFIG_HUGETLB_PAGE
292 #define HSTATE_NAME_LEN 32
293 /* Defines one hugetlb page size */
295 int next_nid_to_alloc
;
296 int next_nid_to_free
;
299 unsigned long max_huge_pages
;
300 unsigned long nr_huge_pages
;
301 unsigned long free_huge_pages
;
302 unsigned long resv_huge_pages
;
303 unsigned long surplus_huge_pages
;
304 unsigned long nr_overcommit_huge_pages
;
305 struct list_head hugepage_activelist
;
306 struct list_head hugepage_freelists
[MAX_NUMNODES
];
307 unsigned int nr_huge_pages_node
[MAX_NUMNODES
];
308 unsigned int free_huge_pages_node
[MAX_NUMNODES
];
309 unsigned int surplus_huge_pages_node
[MAX_NUMNODES
];
310 #ifdef CONFIG_CGROUP_HUGETLB
311 /* cgroup control files */
312 struct cftype cgroup_files
[5];
314 char name
[HSTATE_NAME_LEN
];
317 struct huge_bootmem_page
{
318 struct list_head list
;
319 struct hstate
*hstate
;
320 #ifdef CONFIG_HIGHMEM
325 struct page
*alloc_huge_page_node(struct hstate
*h
, int nid
);
326 struct page
*alloc_huge_page_noerr(struct vm_area_struct
*vma
,
327 unsigned long addr
, int avoid_reserve
);
330 int __init
alloc_bootmem_huge_page(struct hstate
*h
);
332 void __init
hugetlb_add_hstate(unsigned order
);
333 struct hstate
*size_to_hstate(unsigned long size
);
335 #ifndef HUGE_MAX_HSTATE
336 #define HUGE_MAX_HSTATE 1
339 extern struct hstate hstates
[HUGE_MAX_HSTATE
];
340 extern unsigned int default_hstate_idx
;
342 #define default_hstate (hstates[default_hstate_idx])
344 static inline struct hstate
*hstate_inode(struct inode
*i
)
346 struct hugetlbfs_sb_info
*hsb
;
347 hsb
= HUGETLBFS_SB(i
->i_sb
);
351 static inline struct hstate
*hstate_file(struct file
*f
)
353 return hstate_inode(file_inode(f
));
356 static inline struct hstate
*hstate_sizelog(int page_size_log
)
359 return &default_hstate
;
361 return size_to_hstate(1UL << page_size_log
);
364 static inline struct hstate
*hstate_vma(struct vm_area_struct
*vma
)
366 return hstate_file(vma
->vm_file
);
369 static inline unsigned long huge_page_size(struct hstate
*h
)
371 return (unsigned long)PAGE_SIZE
<< h
->order
;
374 extern unsigned long vma_kernel_pagesize(struct vm_area_struct
*vma
);
376 extern unsigned long vma_mmu_pagesize(struct vm_area_struct
*vma
);
378 static inline unsigned long huge_page_mask(struct hstate
*h
)
383 static inline unsigned int huge_page_order(struct hstate
*h
)
388 static inline unsigned huge_page_shift(struct hstate
*h
)
390 return h
->order
+ PAGE_SHIFT
;
393 static inline bool hstate_is_gigantic(struct hstate
*h
)
395 return huge_page_order(h
) >= MAX_ORDER
;
398 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
400 return 1 << h
->order
;
403 static inline unsigned int blocks_per_huge_page(struct hstate
*h
)
405 return huge_page_size(h
) / 512;
408 #include <asm/hugetlb.h>
410 #ifndef arch_make_huge_pte
411 static inline pte_t
arch_make_huge_pte(pte_t entry
, struct vm_area_struct
*vma
,
412 struct page
*page
, int writable
)
418 static inline struct hstate
*page_hstate(struct page
*page
)
420 VM_BUG_ON_PAGE(!PageHuge(page
), page
);
421 return size_to_hstate(PAGE_SIZE
<< compound_order(page
));
424 static inline unsigned hstate_index_to_shift(unsigned index
)
426 return hstates
[index
].order
+ PAGE_SHIFT
;
429 static inline int hstate_index(struct hstate
*h
)
434 pgoff_t
__basepage_index(struct page
*page
);
436 /* Return page->index in PAGE_SIZE units */
437 static inline pgoff_t
basepage_index(struct page
*page
)
439 if (!PageCompound(page
))
442 return __basepage_index(page
);
445 extern void dissolve_free_huge_pages(unsigned long start_pfn
,
446 unsigned long end_pfn
);
447 static inline int hugepage_migration_supported(struct hstate
*h
)
449 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
450 return huge_page_shift(h
) == PMD_SHIFT
;
456 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
457 struct mm_struct
*mm
, pte_t
*pte
)
459 if (huge_page_size(h
) == PMD_SIZE
)
460 return pmd_lockptr(mm
, (pmd_t
*) pte
);
461 VM_BUG_ON(huge_page_size(h
) == PAGE_SIZE
);
462 return &mm
->page_table_lock
;
465 static inline bool hugepages_supported(void)
468 * Some platform decide whether they support huge pages at boot
469 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
470 * there is no such support
472 return HPAGE_SHIFT
!= 0;
475 #else /* CONFIG_HUGETLB_PAGE */
477 #define alloc_huge_page_node(h, nid) NULL
478 #define alloc_huge_page_noerr(v, a, r) NULL
479 #define alloc_bootmem_huge_page(h) NULL
480 #define hstate_file(f) NULL
481 #define hstate_sizelog(s) NULL
482 #define hstate_vma(v) NULL
483 #define hstate_inode(i) NULL
484 #define page_hstate(page) NULL
485 #define huge_page_size(h) PAGE_SIZE
486 #define huge_page_mask(h) PAGE_MASK
487 #define vma_kernel_pagesize(v) PAGE_SIZE
488 #define vma_mmu_pagesize(v) PAGE_SIZE
489 #define huge_page_order(h) 0
490 #define huge_page_shift(h) PAGE_SHIFT
491 static inline unsigned int pages_per_huge_page(struct hstate
*h
)
495 #define hstate_index_to_shift(index) 0
496 #define hstate_index(h) 0
498 static inline pgoff_t
basepage_index(struct page
*page
)
502 #define dissolve_free_huge_pages(s, e) do {} while (0)
503 #define hugepage_migration_supported(h) 0
505 static inline spinlock_t
*huge_pte_lockptr(struct hstate
*h
,
506 struct mm_struct
*mm
, pte_t
*pte
)
508 return &mm
->page_table_lock
;
510 #endif /* CONFIG_HUGETLB_PAGE */
512 static inline spinlock_t
*huge_pte_lock(struct hstate
*h
,
513 struct mm_struct
*mm
, pte_t
*pte
)
517 ptl
= huge_pte_lockptr(h
, mm
, pte
);
522 #endif /* _LINUX_HUGETLB_H */