]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/hugetlb.h
ACPI: restore CONFIG_ACPI_SLEEP
[mirror_ubuntu-artful-kernel.git] / include / linux / hugetlb.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#ifdef CONFIG_HUGETLB_PAGE
5
6#include <linux/mempolicy.h>
516dffdc 7#include <linux/shm.h>
63551ae0 8#include <asm/tlbflush.h>
1da177e4
LT
9
10struct ctl_table;
11
12static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
13{
14 return vma->vm_flags & VM_HUGETLB;
15}
16
17int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
396faf03 18int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
1da177e4
LT
19int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
20int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
1da177e4 21void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
502717f4 22void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
1da177e4
LT
23int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
24int hugetlb_report_meminfo(char *);
25int hugetlb_report_node_meminfo(int, char *);
1da177e4 26unsigned long hugetlb_total_pages(void);
ac9b9c66
HD
27int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
28 unsigned long address, int write_access);
a43a8c39
KC
29int hugetlb_reserve_pages(struct inode *inode, long from, long to);
30void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
1da177e4
LT
31
32extern unsigned long max_huge_pages;
396faf03 33extern unsigned long hugepages_treat_as_movable;
1da177e4
LT
34extern const unsigned long hugetlb_zero, hugetlb_infinity;
35extern int sysctl_hugetlb_shm_group;
36
63551ae0
DG
37/* arch callbacks */
38
39pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
40pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
39dde65c 41int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
42struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
43 int write);
44struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
45 pmd_t *pmd, int write);
63551ae0 46int pmd_huge(pmd_t pmd);
8f860591
ZY
47void hugetlb_change_protection(struct vm_area_struct *vma,
48 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 49
1da177e4
LT
50#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
51#define is_hugepage_only_range(mm, addr, len) 0
9da61aef
DG
52#endif
53
54#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
55#define hugetlb_free_pgd_range free_pgd_range
3915bcf3
DG
56#else
57void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
58 unsigned long end, unsigned long floor,
59 unsigned long ceiling);
1da177e4
LT
60#endif
61
62#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
42b88bef
DG
63/*
64 * If the arch doesn't supply something else, assume that hugepage
65 * size aligned regions are ok without further preparation.
66 */
68589bc3
HD
67static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
68 pgoff_t pgoff)
42b88bef 69{
68589bc3
HD
70 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
71 return -EINVAL;
42b88bef
DG
72 if (len & ~HPAGE_MASK)
73 return -EINVAL;
74 if (addr & ~HPAGE_MASK)
75 return -EINVAL;
76 return 0;
77}
1da177e4 78#else
68589bc3
HD
79int prepare_hugepage_range(unsigned long addr, unsigned long len,
80 pgoff_t pgoff);
1da177e4
LT
81#endif
82
63551ae0
DG
83#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
84#define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
85#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
86#else
87void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
88 pte_t *ptep, pte_t pte);
89pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
90 pte_t *ptep);
91#endif
92
93#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
94#define hugetlb_prefault_arch_hook(mm) do { } while (0)
95#else
96void hugetlb_prefault_arch_hook(struct mm_struct *mm);
97#endif
98
1da177e4
LT
99#else /* !CONFIG_HUGETLB_PAGE */
100
101static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
102{
103 return 0;
104}
105static inline unsigned long hugetlb_total_pages(void)
106{
107 return 0;
108}
109
110#define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
111#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
112#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
113#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
1da177e4 114#define unmap_hugepage_range(vma, start, end) BUG()
1da177e4
LT
115#define hugetlb_report_meminfo(buf) 0
116#define hugetlb_report_node_meminfo(n, buf) 0
117#define follow_huge_pmd(mm, addr, pmd, write) NULL
68589bc3 118#define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
1da177e4
LT
119#define pmd_huge(x) 0
120#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 121#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
ac9b9c66 122#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
1da177e4 123
8f860591
ZY
124#define hugetlb_change_protection(vma, address, end, newprot)
125
1da177e4 126#ifndef HPAGE_MASK
51c6f666
RH
127#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
128#define HPAGE_SIZE PAGE_SIZE
1da177e4
LT
129#endif
130
131#endif /* !CONFIG_HUGETLB_PAGE */
132
133#ifdef CONFIG_HUGETLBFS
134struct hugetlbfs_config {
135 uid_t uid;
136 gid_t gid;
137 umode_t mode;
138 long nr_blocks;
139 long nr_inodes;
140};
141
142struct hugetlbfs_sb_info {
143 long max_blocks; /* blocks allowed */
144 long free_blocks; /* blocks free */
145 long max_inodes; /* inodes allowed */
146 long free_inodes; /* inodes free */
147 spinlock_t stat_lock;
148};
149
150
151struct hugetlbfs_inode_info {
152 struct shared_policy policy;
153 struct inode vfs_inode;
154};
155
156static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
157{
158 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
159}
160
161static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
162{
163 return sb->s_fs_info;
164}
165
4b6f5d20 166extern const struct file_operations hugetlbfs_file_operations;
1da177e4 167extern struct vm_operations_struct hugetlb_vm_ops;
9d66586f 168struct file *hugetlb_file_setup(const char *name, size_t);
1da177e4
LT
169int hugetlb_get_quota(struct address_space *mapping);
170void hugetlb_put_quota(struct address_space *mapping);
171
172static inline int is_file_hugepages(struct file *file)
173{
516dffdc
AL
174 if (file->f_op == &hugetlbfs_file_operations)
175 return 1;
176 if (is_file_shm_hugepages(file))
177 return 1;
178
179 return 0;
1da177e4
LT
180}
181
182static inline void set_file_hugepages(struct file *file)
183{
184 file->f_op = &hugetlbfs_file_operations;
185}
186#else /* !CONFIG_HUGETLBFS */
187
188#define is_file_hugepages(file) 0
189#define set_file_hugepages(file) BUG()
9d66586f 190#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
1da177e4
LT
191
192#endif /* !CONFIG_HUGETLBFS */
193
d2ba27e8
AB
194#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
195unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
196 unsigned long len, unsigned long pgoff,
197 unsigned long flags);
198#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
199
1da177e4 200#endif /* _LINUX_HUGETLB_H */