]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/hugetlb.h
hugetlb: ensure hugepage access is denied if hugepages are not supported
[mirror_ubuntu-bionic-kernel.git] / include / linux / hugetlb.h
1 #ifndef _LINUX_HUGETLB_H
2 #define _LINUX_HUGETLB_H
3
4 #include <linux/mm_types.h>
5 #include <linux/mmdebug.h>
6 #include <linux/fs.h>
7 #include <linux/hugetlb_inline.h>
8 #include <linux/cgroup.h>
9 #include <linux/list.h>
10 #include <linux/kref.h>
11
12 struct ctl_table;
13 struct user_struct;
14 struct mmu_gather;
15
16 #ifdef CONFIG_HUGETLB_PAGE
17
18 #include <linux/mempolicy.h>
19 #include <linux/shm.h>
20 #include <asm/tlbflush.h>
21
22 struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
25 long max_hpages, used_hpages;
26 };
27
28 struct resv_map {
29 struct kref refs;
30 spinlock_t lock;
31 struct list_head regions;
32 };
33 extern struct resv_map *resv_map_alloc(void);
34 void resv_map_release(struct kref *ref);
35
36 extern spinlock_t hugetlb_lock;
37 extern int hugetlb_max_hstate __read_mostly;
38 #define for_each_hstate(h) \
39 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
40
41 struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
42 void hugepage_put_subpool(struct hugepage_subpool *spool);
43
44 int PageHuge(struct page *page);
45
46 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
47 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
48 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
49 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
50
51 #ifdef CONFIG_NUMA
52 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
53 void __user *, size_t *, loff_t *);
54 #endif
55
56 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
57 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
58 struct page **, struct vm_area_struct **,
59 unsigned long *, unsigned long *, long, unsigned int);
60 void unmap_hugepage_range(struct vm_area_struct *,
61 unsigned long, unsigned long, struct page *);
62 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
63 struct vm_area_struct *vma,
64 unsigned long start, unsigned long end,
65 struct page *ref_page);
66 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
67 unsigned long start, unsigned long end,
68 struct page *ref_page);
69 void hugetlb_report_meminfo(struct seq_file *);
70 int hugetlb_report_node_meminfo(int, char *);
71 void hugetlb_show_meminfo(void);
72 unsigned long hugetlb_total_pages(void);
73 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74 unsigned long address, unsigned int flags);
75 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
76 struct vm_area_struct *vma,
77 vm_flags_t vm_flags);
78 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
79 int dequeue_hwpoisoned_huge_page(struct page *page);
80 bool isolate_huge_page(struct page *page, struct list_head *list);
81 void putback_active_hugepage(struct page *page);
82 bool is_hugepage_active(struct page *page);
83
84 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
85 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
86 #endif
87
88 extern unsigned long hugepages_treat_as_movable;
89 extern const unsigned long hugetlb_zero, hugetlb_infinity;
90 extern int sysctl_hugetlb_shm_group;
91 extern struct list_head huge_boot_pages;
92
93 /* arch callbacks */
94
95 pte_t *huge_pte_alloc(struct mm_struct *mm,
96 unsigned long addr, unsigned long sz);
97 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
98 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
99 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
100 int write);
101 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
102 pmd_t *pmd, int write);
103 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
104 pud_t *pud, int write);
105 int pmd_huge(pmd_t pmd);
106 int pud_huge(pud_t pmd);
107 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
108 unsigned long address, unsigned long end, pgprot_t newprot);
109
110 #else /* !CONFIG_HUGETLB_PAGE */
111
112 static inline int PageHuge(struct page *page)
113 {
114 return 0;
115 }
116
117 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
118 {
119 }
120
121 static inline unsigned long hugetlb_total_pages(void)
122 {
123 return 0;
124 }
125
126 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
127 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
128 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
129 static inline void hugetlb_report_meminfo(struct seq_file *m)
130 {
131 }
132 #define hugetlb_report_node_meminfo(n, buf) 0
133 static inline void hugetlb_show_meminfo(void)
134 {
135 }
136 #define follow_huge_pmd(mm, addr, pmd, write) NULL
137 #define follow_huge_pud(mm, addr, pud, write) NULL
138 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
139 #define pmd_huge(x) 0
140 #define pud_huge(x) 0
141 #define is_hugepage_only_range(mm, addr, len) 0
142 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
143 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
144 #define huge_pte_offset(mm, address) 0
145 static inline int dequeue_hwpoisoned_huge_page(struct page *page)
146 {
147 return 0;
148 }
149
150 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
151 {
152 return false;
153 }
154 #define putback_active_hugepage(p) do {} while (0)
155 #define is_hugepage_active(x) false
156
157 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 unsigned long address, unsigned long end, pgprot_t newprot)
159 {
160 return 0;
161 }
162
163 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
164 struct vm_area_struct *vma, unsigned long start,
165 unsigned long end, struct page *ref_page)
166 {
167 BUG();
168 }
169
170 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
171 struct vm_area_struct *vma, unsigned long start,
172 unsigned long end, struct page *ref_page)
173 {
174 BUG();
175 }
176
177 #endif /* !CONFIG_HUGETLB_PAGE */
178
179 #define HUGETLB_ANON_FILE "anon_hugepage"
180
181 enum {
182 /*
183 * The file will be used as an shm file so shmfs accounting rules
184 * apply
185 */
186 HUGETLB_SHMFS_INODE = 1,
187 /*
188 * The file is being created on the internal vfs mount and shmfs
189 * accounting rules do not apply
190 */
191 HUGETLB_ANONHUGE_INODE = 2,
192 };
193
194 #ifdef CONFIG_HUGETLBFS
195 struct hugetlbfs_sb_info {
196 long max_inodes; /* inodes allowed */
197 long free_inodes; /* inodes free */
198 spinlock_t stat_lock;
199 struct hstate *hstate;
200 struct hugepage_subpool *spool;
201 };
202
203 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
204 {
205 return sb->s_fs_info;
206 }
207
208 extern const struct file_operations hugetlbfs_file_operations;
209 extern const struct vm_operations_struct hugetlb_vm_ops;
210 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
211 struct user_struct **user, int creat_flags,
212 int page_size_log);
213
214 static inline int is_file_hugepages(struct file *file)
215 {
216 if (file->f_op == &hugetlbfs_file_operations)
217 return 1;
218 if (is_file_shm_hugepages(file))
219 return 1;
220
221 return 0;
222 }
223
224
225 #else /* !CONFIG_HUGETLBFS */
226
227 #define is_file_hugepages(file) 0
228 static inline struct file *
229 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
230 struct user_struct **user, int creat_flags,
231 int page_size_log)
232 {
233 return ERR_PTR(-ENOSYS);
234 }
235
236 #endif /* !CONFIG_HUGETLBFS */
237
238 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
239 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
240 unsigned long len, unsigned long pgoff,
241 unsigned long flags);
242 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
243
244 #ifdef CONFIG_HUGETLB_PAGE
245
246 #define HSTATE_NAME_LEN 32
247 /* Defines one hugetlb page size */
248 struct hstate {
249 int next_nid_to_alloc;
250 int next_nid_to_free;
251 unsigned int order;
252 unsigned long mask;
253 unsigned long max_huge_pages;
254 unsigned long nr_huge_pages;
255 unsigned long free_huge_pages;
256 unsigned long resv_huge_pages;
257 unsigned long surplus_huge_pages;
258 unsigned long nr_overcommit_huge_pages;
259 struct list_head hugepage_activelist;
260 struct list_head hugepage_freelists[MAX_NUMNODES];
261 unsigned int nr_huge_pages_node[MAX_NUMNODES];
262 unsigned int free_huge_pages_node[MAX_NUMNODES];
263 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
264 #ifdef CONFIG_CGROUP_HUGETLB
265 /* cgroup control files */
266 struct cftype cgroup_files[5];
267 #endif
268 char name[HSTATE_NAME_LEN];
269 };
270
271 struct huge_bootmem_page {
272 struct list_head list;
273 struct hstate *hstate;
274 #ifdef CONFIG_HIGHMEM
275 phys_addr_t phys;
276 #endif
277 };
278
279 struct page *alloc_huge_page_node(struct hstate *h, int nid);
280 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
281 unsigned long addr, int avoid_reserve);
282
283 /* arch callback */
284 int __init alloc_bootmem_huge_page(struct hstate *h);
285
286 void __init hugetlb_add_hstate(unsigned order);
287 struct hstate *size_to_hstate(unsigned long size);
288
289 #ifndef HUGE_MAX_HSTATE
290 #define HUGE_MAX_HSTATE 1
291 #endif
292
293 extern struct hstate hstates[HUGE_MAX_HSTATE];
294 extern unsigned int default_hstate_idx;
295
296 #define default_hstate (hstates[default_hstate_idx])
297
298 static inline struct hstate *hstate_inode(struct inode *i)
299 {
300 struct hugetlbfs_sb_info *hsb;
301 hsb = HUGETLBFS_SB(i->i_sb);
302 return hsb->hstate;
303 }
304
305 static inline struct hstate *hstate_file(struct file *f)
306 {
307 return hstate_inode(file_inode(f));
308 }
309
310 static inline struct hstate *hstate_sizelog(int page_size_log)
311 {
312 if (!page_size_log)
313 return &default_hstate;
314 return size_to_hstate(1 << page_size_log);
315 }
316
317 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
318 {
319 return hstate_file(vma->vm_file);
320 }
321
322 static inline unsigned long huge_page_size(struct hstate *h)
323 {
324 return (unsigned long)PAGE_SIZE << h->order;
325 }
326
327 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
328
329 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
330
331 static inline unsigned long huge_page_mask(struct hstate *h)
332 {
333 return h->mask;
334 }
335
336 static inline unsigned int huge_page_order(struct hstate *h)
337 {
338 return h->order;
339 }
340
341 static inline unsigned huge_page_shift(struct hstate *h)
342 {
343 return h->order + PAGE_SHIFT;
344 }
345
346 static inline unsigned int pages_per_huge_page(struct hstate *h)
347 {
348 return 1 << h->order;
349 }
350
351 static inline unsigned int blocks_per_huge_page(struct hstate *h)
352 {
353 return huge_page_size(h) / 512;
354 }
355
356 #include <asm/hugetlb.h>
357
358 #ifndef arch_make_huge_pte
359 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
360 struct page *page, int writable)
361 {
362 return entry;
363 }
364 #endif
365
366 static inline struct hstate *page_hstate(struct page *page)
367 {
368 VM_BUG_ON_PAGE(!PageHuge(page), page);
369 return size_to_hstate(PAGE_SIZE << compound_order(page));
370 }
371
372 static inline unsigned hstate_index_to_shift(unsigned index)
373 {
374 return hstates[index].order + PAGE_SHIFT;
375 }
376
377 static inline int hstate_index(struct hstate *h)
378 {
379 return h - hstates;
380 }
381
382 pgoff_t __basepage_index(struct page *page);
383
384 /* Return page->index in PAGE_SIZE units */
385 static inline pgoff_t basepage_index(struct page *page)
386 {
387 if (!PageCompound(page))
388 return page->index;
389
390 return __basepage_index(page);
391 }
392
393 extern void dissolve_free_huge_pages(unsigned long start_pfn,
394 unsigned long end_pfn);
395 int pmd_huge_support(void);
396 /*
397 * Currently hugepage migration is enabled only for pmd-based hugepage.
398 * This function will be updated when hugepage migration is more widely
399 * supported.
400 */
401 static inline int hugepage_migration_support(struct hstate *h)
402 {
403 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
404 }
405
406 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
407 struct mm_struct *mm, pte_t *pte)
408 {
409 if (huge_page_size(h) == PMD_SIZE)
410 return pmd_lockptr(mm, (pmd_t *) pte);
411 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
412 return &mm->page_table_lock;
413 }
414
415 static inline bool hugepages_supported(void)
416 {
417 /*
418 * Some platform decide whether they support huge pages at boot
419 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
420 * there is no such support
421 */
422 return HPAGE_SHIFT != 0;
423 }
424
425 #else /* CONFIG_HUGETLB_PAGE */
426 struct hstate {};
427 #define alloc_huge_page_node(h, nid) NULL
428 #define alloc_huge_page_noerr(v, a, r) NULL
429 #define alloc_bootmem_huge_page(h) NULL
430 #define hstate_file(f) NULL
431 #define hstate_sizelog(s) NULL
432 #define hstate_vma(v) NULL
433 #define hstate_inode(i) NULL
434 #define page_hstate(page) NULL
435 #define huge_page_size(h) PAGE_SIZE
436 #define huge_page_mask(h) PAGE_MASK
437 #define vma_kernel_pagesize(v) PAGE_SIZE
438 #define vma_mmu_pagesize(v) PAGE_SIZE
439 #define huge_page_order(h) 0
440 #define huge_page_shift(h) PAGE_SHIFT
441 static inline unsigned int pages_per_huge_page(struct hstate *h)
442 {
443 return 1;
444 }
445 #define hstate_index_to_shift(index) 0
446 #define hstate_index(h) 0
447
448 static inline pgoff_t basepage_index(struct page *page)
449 {
450 return page->index;
451 }
452 #define dissolve_free_huge_pages(s, e) do {} while (0)
453 #define pmd_huge_support() 0
454 #define hugepage_migration_support(h) 0
455
456 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
457 struct mm_struct *mm, pte_t *pte)
458 {
459 return &mm->page_table_lock;
460 }
461 #endif /* CONFIG_HUGETLB_PAGE */
462
463 static inline spinlock_t *huge_pte_lock(struct hstate *h,
464 struct mm_struct *mm, pte_t *pte)
465 {
466 spinlock_t *ptl;
467
468 ptl = huge_pte_lockptr(h, mm, pte);
469 spin_lock(ptl);
470 return ptl;
471 }
472
473 #endif /* _LINUX_HUGETLB_H */