]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/hugetlb.h
mm/swap.c: put activate_page_pvecs and other pagevecs together
[mirror_ubuntu-artful-kernel.git] / include / linux / hugetlb.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
be93d8cf 4#include <linux/mm_types.h>
309381fe 5#include <linux/mmdebug.h>
4e950f6f 6#include <linux/fs.h>
8edf344c 7#include <linux/hugetlb_inline.h>
abb8206c 8#include <linux/cgroup.h>
9119a41e
JK
9#include <linux/list.h>
10#include <linux/kref.h>
888cdbc2 11#include <asm/pgtable.h>
4e950f6f 12
e9ea0e2d
AM
13struct ctl_table;
14struct user_struct;
24669e58 15struct mmu_gather;
e9ea0e2d 16
1da177e4
LT
17#ifdef CONFIG_HUGETLB_PAGE
18
19#include <linux/mempolicy.h>
516dffdc 20#include <linux/shm.h>
63551ae0 21#include <asm/tlbflush.h>
1da177e4 22
90481622
DG
23struct hugepage_subpool {
24 spinlock_t lock;
25 long count;
c6a91820
MK
26 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
27 long used_hpages; /* Used count against maximum, includes */
28 /* both alloced and reserved pages. */
29 struct hstate *hstate;
30 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
31 long rsv_hpages; /* Pages reserved against global pool to */
32 /* sasitfy minimum size. */
90481622
DG
33};
34
9119a41e
JK
35struct resv_map {
36 struct kref refs;
7b24d861 37 spinlock_t lock;
9119a41e 38 struct list_head regions;
5e911373
MK
39 long adds_in_progress;
40 struct list_head region_cache;
41 long region_cache_count;
9119a41e
JK
42};
43extern struct resv_map *resv_map_alloc(void);
44void resv_map_release(struct kref *ref);
45
c3f38a38
AK
46extern spinlock_t hugetlb_lock;
47extern int hugetlb_max_hstate __read_mostly;
48#define for_each_hstate(h) \
49 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
50
7ca02d0a
MK
51struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
52 long min_hpages);
90481622
DG
53void hugepage_put_subpool(struct hugepage_subpool *spool);
54
a1e78772 55void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
8d65af78
AD
56int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
06808b08
LS
59
60#ifdef CONFIG_NUMA
61int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
62 void __user *, size_t *, loff_t *);
63#endif
64
1da177e4 65int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716
ML
66long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
67 struct page **, struct vm_area_struct **,
68 unsigned long *, unsigned long *, long, unsigned int);
04f2cbe3 69void unmap_hugepage_range(struct vm_area_struct *,
24669e58 70 unsigned long, unsigned long, struct page *);
d833352a
MG
71void __unmap_hugepage_range_final(struct mmu_gather *tlb,
72 struct vm_area_struct *vma,
73 unsigned long start, unsigned long end,
74 struct page *ref_page);
24669e58
AK
75void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
76 unsigned long start, unsigned long end,
77 struct page *ref_page);
e1759c21 78void hugetlb_report_meminfo(struct seq_file *);
1da177e4 79int hugetlb_report_node_meminfo(int, char *);
949f7ec5 80void hugetlb_show_meminfo(void);
1da177e4 81unsigned long hugetlb_total_pages(void);
ac9b9c66 82int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 83 unsigned long address, unsigned int flags);
a1e78772 84int hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 85 struct vm_area_struct *vma,
ca16d140 86 vm_flags_t vm_flags);
b5cec28d
MK
87long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
88 long freed);
6de2b1aa 89int dequeue_hwpoisoned_huge_page(struct page *page);
31caf665
NH
90bool isolate_huge_page(struct page *page, struct list_head *list);
91void putback_active_hugepage(struct page *page);
8f1d26d0 92void free_huge_page(struct page *page);
b5cec28d 93void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
c672c7f2
MK
94extern struct mutex *hugetlb_fault_mutex_table;
95u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
96 struct vm_area_struct *vma,
97 struct address_space *mapping,
98 pgoff_t idx, unsigned long address);
1da177e4 99
3212b535 100pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
3212b535 101
753162cd 102extern int hugepages_treat_as_movable;
1da177e4 103extern int sysctl_hugetlb_shm_group;
53ba51d2 104extern struct list_head huge_boot_pages;
1da177e4 105
63551ae0
DG
106/* arch callbacks */
107
a5516438
AK
108pte_t *huge_pte_alloc(struct mm_struct *mm,
109 unsigned long addr, unsigned long sz);
63551ae0 110pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
39dde65c 111int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
112struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
113 int write);
114struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 115 pmd_t *pmd, int flags);
ceb86879 116struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 117 pud_t *pud, int flags);
63551ae0 118int pmd_huge(pmd_t pmd);
ceb86879 119int pud_huge(pud_t pmd);
7da4d641 120unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591 121 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 122
1da177e4
LT
123#else /* !CONFIG_HUGETLB_PAGE */
124
a1e78772
MG
125static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
126{
127}
128
1da177e4
LT
129static inline unsigned long hugetlb_total_pages(void)
130{
131 return 0;
132}
133
5b23dbe8 134#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
1da177e4
LT
135#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
136#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
e1759c21
AD
137static inline void hugetlb_report_meminfo(struct seq_file *m)
138{
139}
1da177e4 140#define hugetlb_report_node_meminfo(n, buf) 0
949f7ec5
DR
141static inline void hugetlb_show_meminfo(void)
142{
143}
e66f17ff
NH
144#define follow_huge_pmd(mm, addr, pmd, flags) NULL
145#define follow_huge_pud(mm, addr, pud, flags) NULL
a5516438 146#define prepare_hugepage_range(file, addr, len) (-EINVAL)
1da177e4 147#define pmd_huge(x) 0
ceb86879 148#define pud_huge(x) 0
1da177e4 149#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 150#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
788c7df4 151#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
0fe6e20b 152#define huge_pte_offset(mm, address) 0
24669e58
AK
153static inline int dequeue_hwpoisoned_huge_page(struct page *page)
154{
155 return 0;
156}
157
f40386a4
NH
158static inline bool isolate_huge_page(struct page *page, struct list_head *list)
159{
160 return false;
161}
31caf665 162#define putback_active_hugepage(p) do {} while (0)
1da177e4 163
7da4d641
PZ
164static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
165 unsigned long address, unsigned long end, pgprot_t newprot)
166{
167 return 0;
168}
8f860591 169
d833352a
MG
170static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
171 struct vm_area_struct *vma, unsigned long start,
172 unsigned long end, struct page *ref_page)
173{
174 BUG();
175}
176
24669e58
AK
177static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
178 struct vm_area_struct *vma, unsigned long start,
179 unsigned long end, struct page *ref_page)
180{
181 BUG();
182}
183
1da177e4 184#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
185/*
186 * hugepages at page global directory. If arch support
187 * hugepages at pgd level, they need to define this.
188 */
189#ifndef pgd_huge
190#define pgd_huge(x) 0
191#endif
192
193#ifndef pgd_write
194static inline int pgd_write(pgd_t pgd)
195{
196 BUG();
197 return 0;
198}
199#endif
200
201#ifndef pud_write
202static inline int pud_write(pud_t pud)
203{
204 BUG();
205 return 0;
206}
207#endif
208
209#ifndef is_hugepd
210/*
211 * Some architectures requires a hugepage directory format that is
212 * required to support multiple hugepage sizes. For example
213 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
214 * introduced the same on powerpc. This allows for a more flexible hugepage
215 * pagetable layout.
216 */
217typedef struct { unsigned long pd; } hugepd_t;
218#define is_hugepd(hugepd) (0)
219#define __hugepd(x) ((hugepd_t) { (x) })
220static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 unsigned pdshift, unsigned long end,
222 int write, struct page **pages, int *nr)
223{
224 return 0;
225}
226#else
227extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
228 unsigned pdshift, unsigned long end,
229 int write, struct page **pages, int *nr);
230#endif
1da177e4 231
4e52780d
EM
232#define HUGETLB_ANON_FILE "anon_hugepage"
233
6bfde05b
EM
234enum {
235 /*
236 * The file will be used as an shm file so shmfs accounting rules
237 * apply
238 */
239 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
240 /*
241 * The file is being created on the internal vfs mount and shmfs
242 * accounting rules do not apply
243 */
244 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
245};
246
1da177e4 247#ifdef CONFIG_HUGETLBFS
1da177e4 248struct hugetlbfs_sb_info {
1da177e4
LT
249 long max_inodes; /* inodes allowed */
250 long free_inodes; /* inodes free */
251 spinlock_t stat_lock;
a137e1cc 252 struct hstate *hstate;
90481622 253 struct hugepage_subpool *spool;
1da177e4
LT
254};
255
1da177e4
LT
256static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
257{
258 return sb->s_fs_info;
259}
260
4b6f5d20 261extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 262extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 263struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
42d7395f
AK
264 struct user_struct **user, int creat_flags,
265 int page_size_log);
1da177e4 266
719ff321 267static inline bool is_file_hugepages(struct file *file)
1da177e4 268{
516dffdc 269 if (file->f_op == &hugetlbfs_file_operations)
719ff321 270 return true;
516dffdc 271
719ff321 272 return is_file_shm_hugepages(file);
1da177e4
LT
273}
274
42d7395f 275
1da177e4
LT
276#else /* !CONFIG_HUGETLBFS */
277
719ff321 278#define is_file_hugepages(file) false
40716e29 279static inline struct file *
af73e4d9
NH
280hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
281 struct user_struct **user, int creat_flags,
42d7395f 282 int page_size_log)
e9ea0e2d
AM
283{
284 return ERR_PTR(-ENOSYS);
285}
1da177e4
LT
286
287#endif /* !CONFIG_HUGETLBFS */
288
d2ba27e8
AB
289#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
290unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
291 unsigned long len, unsigned long pgoff,
292 unsigned long flags);
293#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
294
a5516438
AK
295#ifdef CONFIG_HUGETLB_PAGE
296
a3437870 297#define HSTATE_NAME_LEN 32
a5516438
AK
298/* Defines one hugetlb page size */
299struct hstate {
e8c5c824
LS
300 int next_nid_to_alloc;
301 int next_nid_to_free;
a5516438
AK
302 unsigned int order;
303 unsigned long mask;
304 unsigned long max_huge_pages;
305 unsigned long nr_huge_pages;
306 unsigned long free_huge_pages;
307 unsigned long resv_huge_pages;
308 unsigned long surplus_huge_pages;
309 unsigned long nr_overcommit_huge_pages;
0edaecfa 310 struct list_head hugepage_activelist;
a5516438
AK
311 struct list_head hugepage_freelists[MAX_NUMNODES];
312 unsigned int nr_huge_pages_node[MAX_NUMNODES];
313 unsigned int free_huge_pages_node[MAX_NUMNODES];
314 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
315#ifdef CONFIG_CGROUP_HUGETLB
316 /* cgroup control files */
317 struct cftype cgroup_files[5];
318#endif
a3437870 319 char name[HSTATE_NAME_LEN];
a5516438
AK
320};
321
53ba51d2
JT
322struct huge_bootmem_page {
323 struct list_head list;
324 struct hstate *hstate;
ee8f248d
BB
325#ifdef CONFIG_HIGHMEM
326 phys_addr_t phys;
327#endif
53ba51d2
JT
328};
329
70c3547e
MK
330struct page *alloc_huge_page(struct vm_area_struct *vma,
331 unsigned long addr, int avoid_reserve);
bf50bab2 332struct page *alloc_huge_page_node(struct hstate *h, int nid);
74060e4d
NH
333struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
334 unsigned long addr, int avoid_reserve);
ab76ad54
MK
335int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
336 pgoff_t idx);
bf50bab2 337
53ba51d2
JT
338/* arch callback */
339int __init alloc_bootmem_huge_page(struct hstate *h);
340
9fee021d 341void __init hugetlb_bad_size(void);
e5ff2159
AK
342void __init hugetlb_add_hstate(unsigned order);
343struct hstate *size_to_hstate(unsigned long size);
344
345#ifndef HUGE_MAX_HSTATE
346#define HUGE_MAX_HSTATE 1
347#endif
348
349extern struct hstate hstates[HUGE_MAX_HSTATE];
350extern unsigned int default_hstate_idx;
351
352#define default_hstate (hstates[default_hstate_idx])
a5516438 353
a137e1cc 354static inline struct hstate *hstate_inode(struct inode *i)
a5516438 355{
a137e1cc
AK
356 struct hugetlbfs_sb_info *hsb;
357 hsb = HUGETLBFS_SB(i->i_sb);
358 return hsb->hstate;
a5516438
AK
359}
360
361static inline struct hstate *hstate_file(struct file *f)
362{
496ad9aa 363 return hstate_inode(file_inode(f));
a5516438
AK
364}
365
af73e4d9
NH
366static inline struct hstate *hstate_sizelog(int page_size_log)
367{
368 if (!page_size_log)
369 return &default_hstate;
97ad2be1
SL
370
371 return size_to_hstate(1UL << page_size_log);
af73e4d9
NH
372}
373
a137e1cc 374static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 375{
a137e1cc 376 return hstate_file(vma->vm_file);
a5516438
AK
377}
378
379static inline unsigned long huge_page_size(struct hstate *h)
380{
381 return (unsigned long)PAGE_SIZE << h->order;
382}
383
08fba699
MG
384extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
385
3340289d
MG
386extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
387
a5516438
AK
388static inline unsigned long huge_page_mask(struct hstate *h)
389{
390 return h->mask;
391}
392
393static inline unsigned int huge_page_order(struct hstate *h)
394{
395 return h->order;
396}
397
398static inline unsigned huge_page_shift(struct hstate *h)
399{
400 return h->order + PAGE_SHIFT;
401}
402
bae7f4ae
LC
403static inline bool hstate_is_gigantic(struct hstate *h)
404{
405 return huge_page_order(h) >= MAX_ORDER;
406}
407
a5516438
AK
408static inline unsigned int pages_per_huge_page(struct hstate *h)
409{
410 return 1 << h->order;
411}
412
413static inline unsigned int blocks_per_huge_page(struct hstate *h)
414{
415 return huge_page_size(h) / 512;
416}
417
418#include <asm/hugetlb.h>
419
d9ed9faa
CM
420#ifndef arch_make_huge_pte
421static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
422 struct page *page, int writable)
423{
424 return entry;
425}
426#endif
427
e5ff2159
AK
428static inline struct hstate *page_hstate(struct page *page)
429{
309381fe 430 VM_BUG_ON_PAGE(!PageHuge(page), page);
e5ff2159
AK
431 return size_to_hstate(PAGE_SIZE << compound_order(page));
432}
433
aa50d3a7
AK
434static inline unsigned hstate_index_to_shift(unsigned index)
435{
436 return hstates[index].order + PAGE_SHIFT;
437}
438
972dc4de
AK
439static inline int hstate_index(struct hstate *h)
440{
441 return h - hstates;
442}
443
13d60f4b
ZY
444pgoff_t __basepage_index(struct page *page);
445
446/* Return page->index in PAGE_SIZE units */
447static inline pgoff_t basepage_index(struct page *page)
448{
449 if (!PageCompound(page))
450 return page->index;
451
452 return __basepage_index(page);
453}
454
c8721bbb
NH
455extern void dissolve_free_huge_pages(unsigned long start_pfn,
456 unsigned long end_pfn);
100873d7 457static inline int hugepage_migration_supported(struct hstate *h)
83467efb 458{
c177c81e
NH
459#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
460 return huge_page_shift(h) == PMD_SHIFT;
461#else
462 return 0;
463#endif
83467efb 464}
c8721bbb 465
cb900f41
KS
466static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
467 struct mm_struct *mm, pte_t *pte)
468{
469 if (huge_page_size(h) == PMD_SIZE)
470 return pmd_lockptr(mm, (pmd_t *) pte);
471 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
472 return &mm->page_table_lock;
473}
474
2531c8cf
DD
475#ifndef hugepages_supported
476/*
477 * Some platform decide whether they support huge pages at boot
478 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
479 * when there is no such support
480 */
481#define hugepages_supported() (HPAGE_SHIFT != 0)
482#endif
457c1b27 483
5d317b2b
NH
484void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
485
486static inline void hugetlb_count_add(long l, struct mm_struct *mm)
487{
488 atomic_long_add(l, &mm->hugetlb_usage);
489}
490
491static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
492{
493 atomic_long_sub(l, &mm->hugetlb_usage);
494}
af73e4d9 495#else /* CONFIG_HUGETLB_PAGE */
a5516438 496struct hstate {};
70c3547e 497#define alloc_huge_page(v, a, r) NULL
bf50bab2 498#define alloc_huge_page_node(h, nid) NULL
74060e4d 499#define alloc_huge_page_noerr(v, a, r) NULL
53ba51d2 500#define alloc_bootmem_huge_page(h) NULL
a5516438 501#define hstate_file(f) NULL
af73e4d9 502#define hstate_sizelog(s) NULL
a5516438
AK
503#define hstate_vma(v) NULL
504#define hstate_inode(i) NULL
cb900f41 505#define page_hstate(page) NULL
a5516438
AK
506#define huge_page_size(h) PAGE_SIZE
507#define huge_page_mask(h) PAGE_MASK
08fba699 508#define vma_kernel_pagesize(v) PAGE_SIZE
3340289d 509#define vma_mmu_pagesize(v) PAGE_SIZE
a5516438
AK
510#define huge_page_order(h) 0
511#define huge_page_shift(h) PAGE_SHIFT
510a35d4
AR
512static inline unsigned int pages_per_huge_page(struct hstate *h)
513{
514 return 1;
515}
aa50d3a7 516#define hstate_index_to_shift(index) 0
972dc4de 517#define hstate_index(h) 0
13d60f4b
ZY
518
519static inline pgoff_t basepage_index(struct page *page)
520{
521 return page->index;
522}
c8721bbb 523#define dissolve_free_huge_pages(s, e) do {} while (0)
100873d7 524#define hugepage_migration_supported(h) 0
cb900f41
KS
525
526static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
527 struct mm_struct *mm, pte_t *pte)
528{
529 return &mm->page_table_lock;
530}
5d317b2b
NH
531
532static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
533{
534}
535
536static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
537{
538}
af73e4d9 539#endif /* CONFIG_HUGETLB_PAGE */
a5516438 540
cb900f41
KS
541static inline spinlock_t *huge_pte_lock(struct hstate *h,
542 struct mm_struct *mm, pte_t *pte)
543{
544 spinlock_t *ptl;
545
546 ptl = huge_pte_lockptr(h, mm, pte);
547 spin_lock(ptl);
548 return ptl;
549}
550
1da177e4 551#endif /* _LINUX_HUGETLB_H */