]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/hugetlb.h
mm/hugetlb: allow architectures to override huge_pte_clear()
[mirror_ubuntu-artful-kernel.git] / include / linux / hugetlb.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
be93d8cf 4#include <linux/mm_types.h>
309381fe 5#include <linux/mmdebug.h>
4e950f6f 6#include <linux/fs.h>
8edf344c 7#include <linux/hugetlb_inline.h>
abb8206c 8#include <linux/cgroup.h>
9119a41e
JK
9#include <linux/list.h>
10#include <linux/kref.h>
888cdbc2 11#include <asm/pgtable.h>
4e950f6f 12
e9ea0e2d
AM
13struct ctl_table;
14struct user_struct;
24669e58 15struct mmu_gather;
e9ea0e2d 16
e2299292
AK
17#ifndef is_hugepd
18/*
19 * Some architectures requires a hugepage directory format that is
20 * required to support multiple hugepage sizes. For example
21 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
22 * introduced the same on powerpc. This allows for a more flexible hugepage
23 * pagetable layout.
24 */
25typedef struct { unsigned long pd; } hugepd_t;
26#define is_hugepd(hugepd) (0)
27#define __hugepd(x) ((hugepd_t) { (x) })
28static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
29 unsigned pdshift, unsigned long end,
30 int write, struct page **pages, int *nr)
31{
32 return 0;
33}
34#else
35extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
36 unsigned pdshift, unsigned long end,
37 int write, struct page **pages, int *nr);
38#endif
39
40
1da177e4
LT
41#ifdef CONFIG_HUGETLB_PAGE
42
43#include <linux/mempolicy.h>
516dffdc 44#include <linux/shm.h>
63551ae0 45#include <asm/tlbflush.h>
1da177e4 46
90481622
DG
47struct hugepage_subpool {
48 spinlock_t lock;
49 long count;
c6a91820
MK
50 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
51 long used_hpages; /* Used count against maximum, includes */
52 /* both alloced and reserved pages. */
53 struct hstate *hstate;
54 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
55 long rsv_hpages; /* Pages reserved against global pool to */
56 /* sasitfy minimum size. */
90481622
DG
57};
58
9119a41e
JK
59struct resv_map {
60 struct kref refs;
7b24d861 61 spinlock_t lock;
9119a41e 62 struct list_head regions;
5e911373
MK
63 long adds_in_progress;
64 struct list_head region_cache;
65 long region_cache_count;
9119a41e
JK
66};
67extern struct resv_map *resv_map_alloc(void);
68void resv_map_release(struct kref *ref);
69
c3f38a38
AK
70extern spinlock_t hugetlb_lock;
71extern int hugetlb_max_hstate __read_mostly;
72#define for_each_hstate(h) \
73 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
74
7ca02d0a
MK
75struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
76 long min_hpages);
90481622
DG
77void hugepage_put_subpool(struct hugepage_subpool *spool);
78
a1e78772 79void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
8d65af78
AD
80int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
81int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
06808b08
LS
83
84#ifdef CONFIG_NUMA
85int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
86 void __user *, size_t *, loff_t *);
87#endif
88
1da177e4 89int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716
ML
90long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
91 struct page **, struct vm_area_struct **,
87ffc118
AA
92 unsigned long *, unsigned long *, long, unsigned int,
93 int *);
04f2cbe3 94void unmap_hugepage_range(struct vm_area_struct *,
24669e58 95 unsigned long, unsigned long, struct page *);
d833352a
MG
96void __unmap_hugepage_range_final(struct mmu_gather *tlb,
97 struct vm_area_struct *vma,
98 unsigned long start, unsigned long end,
99 struct page *ref_page);
24669e58
AK
100void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
101 unsigned long start, unsigned long end,
102 struct page *ref_page);
e1759c21 103void hugetlb_report_meminfo(struct seq_file *);
1da177e4 104int hugetlb_report_node_meminfo(int, char *);
949f7ec5 105void hugetlb_show_meminfo(void);
1da177e4 106unsigned long hugetlb_total_pages(void);
ac9b9c66 107int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 108 unsigned long address, unsigned int flags);
8fb5debc
MK
109int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
110 struct vm_area_struct *dst_vma,
111 unsigned long dst_addr,
112 unsigned long src_addr,
113 struct page **pagep);
a1e78772 114int hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 115 struct vm_area_struct *vma,
ca16d140 116 vm_flags_t vm_flags);
b5cec28d
MK
117long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
118 long freed);
6de2b1aa 119int dequeue_hwpoisoned_huge_page(struct page *page);
31caf665
NH
120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
8f1d26d0 122void free_huge_page(struct page *page);
72e2936c 123void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2
MK
124extern struct mutex *hugetlb_fault_mutex_table;
125u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
126 struct vm_area_struct *vma,
127 struct address_space *mapping,
128 pgoff_t idx, unsigned long address);
1da177e4 129
3212b535 130pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
3212b535 131
753162cd 132extern int hugepages_treat_as_movable;
1da177e4 133extern int sysctl_hugetlb_shm_group;
53ba51d2 134extern struct list_head huge_boot_pages;
1da177e4 135
63551ae0
DG
136/* arch callbacks */
137
a5516438
AK
138pte_t *huge_pte_alloc(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
7868a208
PA
140pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz);
39dde65c 142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write);
4dc71451
AK
145struct page *follow_huge_pd(struct vm_area_struct *vma,
146 unsigned long address, hugepd_t hpd,
147 int flags, int pdshift);
63551ae0 148struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 149 pmd_t *pmd, int flags);
ceb86879 150struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 151 pud_t *pud, int flags);
faaa5b62
AK
152struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153 pgd_t *pgd, int flags);
154
63551ae0 155int pmd_huge(pmd_t pmd);
c2febafc 156int pud_huge(pud_t pud);
7da4d641 157unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591 158 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 159
d5ed7444 160bool is_hugetlb_entry_migration(pte_t pte);
1da177e4
LT
161#else /* !CONFIG_HUGETLB_PAGE */
162
a1e78772
MG
163static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
164{
165}
166
1da177e4
LT
167static inline unsigned long hugetlb_total_pages(void)
168{
169 return 0;
170}
171
87ffc118 172#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
1da177e4
LT
173#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
174#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
e1759c21
AD
175static inline void hugetlb_report_meminfo(struct seq_file *m)
176{
177}
1da177e4 178#define hugetlb_report_node_meminfo(n, buf) 0
949f7ec5
DR
179static inline void hugetlb_show_meminfo(void)
180{
181}
4dc71451 182#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
e66f17ff
NH
183#define follow_huge_pmd(mm, addr, pmd, flags) NULL
184#define follow_huge_pud(mm, addr, pud, flags) NULL
faaa5b62 185#define follow_huge_pgd(mm, addr, pgd, flags) NULL
a5516438 186#define prepare_hugepage_range(file, addr, len) (-EINVAL)
1da177e4 187#define pmd_huge(x) 0
ceb86879 188#define pud_huge(x) 0
1da177e4 189#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 190#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
788c7df4 191#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
8fb5debc
MK
192#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
193 src_addr, pagep) ({ BUG(); 0; })
7868a208 194#define huge_pte_offset(mm, address, sz) 0
24669e58
AK
195static inline int dequeue_hwpoisoned_huge_page(struct page *page)
196{
197 return 0;
198}
199
f40386a4
NH
200static inline bool isolate_huge_page(struct page *page, struct list_head *list)
201{
202 return false;
203}
31caf665 204#define putback_active_hugepage(p) do {} while (0)
1da177e4 205
7da4d641
PZ
206static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
207 unsigned long address, unsigned long end, pgprot_t newprot)
208{
209 return 0;
210}
8f860591 211
d833352a
MG
212static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
213 struct vm_area_struct *vma, unsigned long start,
214 unsigned long end, struct page *ref_page)
215{
216 BUG();
217}
218
24669e58
AK
219static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
220 struct vm_area_struct *vma, unsigned long start,
221 unsigned long end, struct page *ref_page)
222{
223 BUG();
224}
225
1da177e4 226#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
227/*
228 * hugepages at page global directory. If arch support
229 * hugepages at pgd level, they need to define this.
230 */
231#ifndef pgd_huge
232#define pgd_huge(x) 0
233#endif
c2febafc
KS
234#ifndef p4d_huge
235#define p4d_huge(x) 0
236#endif
f30c59e9
AK
237
238#ifndef pgd_write
239static inline int pgd_write(pgd_t pgd)
240{
241 BUG();
242 return 0;
243}
244#endif
245
246#ifndef pud_write
247static inline int pud_write(pud_t pud)
248{
249 BUG();
250 return 0;
251}
252#endif
253
4e52780d
EM
254#define HUGETLB_ANON_FILE "anon_hugepage"
255
6bfde05b
EM
256enum {
257 /*
258 * The file will be used as an shm file so shmfs accounting rules
259 * apply
260 */
261 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
262 /*
263 * The file is being created on the internal vfs mount and shmfs
264 * accounting rules do not apply
265 */
266 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
267};
268
1da177e4 269#ifdef CONFIG_HUGETLBFS
1da177e4 270struct hugetlbfs_sb_info {
1da177e4
LT
271 long max_inodes; /* inodes allowed */
272 long free_inodes; /* inodes free */
273 spinlock_t stat_lock;
a137e1cc 274 struct hstate *hstate;
90481622 275 struct hugepage_subpool *spool;
1da177e4
LT
276};
277
1da177e4
LT
278static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
279{
280 return sb->s_fs_info;
281}
282
4b6f5d20 283extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 284extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 285struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
42d7395f
AK
286 struct user_struct **user, int creat_flags,
287 int page_size_log);
1da177e4 288
719ff321 289static inline bool is_file_hugepages(struct file *file)
1da177e4 290{
516dffdc 291 if (file->f_op == &hugetlbfs_file_operations)
719ff321 292 return true;
516dffdc 293
719ff321 294 return is_file_shm_hugepages(file);
1da177e4
LT
295}
296
42d7395f 297
1da177e4
LT
298#else /* !CONFIG_HUGETLBFS */
299
719ff321 300#define is_file_hugepages(file) false
40716e29 301static inline struct file *
af73e4d9
NH
302hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
303 struct user_struct **user, int creat_flags,
42d7395f 304 int page_size_log)
e9ea0e2d
AM
305{
306 return ERR_PTR(-ENOSYS);
307}
1da177e4
LT
308
309#endif /* !CONFIG_HUGETLBFS */
310
d2ba27e8
AB
311#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
312unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
313 unsigned long len, unsigned long pgoff,
314 unsigned long flags);
315#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
316
a5516438
AK
317#ifdef CONFIG_HUGETLB_PAGE
318
a3437870 319#define HSTATE_NAME_LEN 32
a5516438
AK
320/* Defines one hugetlb page size */
321struct hstate {
e8c5c824
LS
322 int next_nid_to_alloc;
323 int next_nid_to_free;
a5516438
AK
324 unsigned int order;
325 unsigned long mask;
326 unsigned long max_huge_pages;
327 unsigned long nr_huge_pages;
328 unsigned long free_huge_pages;
329 unsigned long resv_huge_pages;
330 unsigned long surplus_huge_pages;
331 unsigned long nr_overcommit_huge_pages;
0edaecfa 332 struct list_head hugepage_activelist;
a5516438
AK
333 struct list_head hugepage_freelists[MAX_NUMNODES];
334 unsigned int nr_huge_pages_node[MAX_NUMNODES];
335 unsigned int free_huge_pages_node[MAX_NUMNODES];
336 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
337#ifdef CONFIG_CGROUP_HUGETLB
338 /* cgroup control files */
339 struct cftype cgroup_files[5];
340#endif
a3437870 341 char name[HSTATE_NAME_LEN];
a5516438
AK
342};
343
53ba51d2
JT
344struct huge_bootmem_page {
345 struct list_head list;
346 struct hstate *hstate;
ee8f248d
BB
347#ifdef CONFIG_HIGHMEM
348 phys_addr_t phys;
349#endif
53ba51d2
JT
350};
351
70c3547e
MK
352struct page *alloc_huge_page(struct vm_area_struct *vma,
353 unsigned long addr, int avoid_reserve);
bf50bab2 354struct page *alloc_huge_page_node(struct hstate *h, int nid);
74060e4d
NH
355struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
356 unsigned long addr, int avoid_reserve);
ab76ad54
MK
357int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
358 pgoff_t idx);
bf50bab2 359
53ba51d2
JT
360/* arch callback */
361int __init alloc_bootmem_huge_page(struct hstate *h);
362
9fee021d 363void __init hugetlb_bad_size(void);
e5ff2159
AK
364void __init hugetlb_add_hstate(unsigned order);
365struct hstate *size_to_hstate(unsigned long size);
366
367#ifndef HUGE_MAX_HSTATE
368#define HUGE_MAX_HSTATE 1
369#endif
370
371extern struct hstate hstates[HUGE_MAX_HSTATE];
372extern unsigned int default_hstate_idx;
373
374#define default_hstate (hstates[default_hstate_idx])
a5516438 375
a137e1cc 376static inline struct hstate *hstate_inode(struct inode *i)
a5516438 377{
7fab358d 378 return HUGETLBFS_SB(i->i_sb)->hstate;
a5516438
AK
379}
380
381static inline struct hstate *hstate_file(struct file *f)
382{
496ad9aa 383 return hstate_inode(file_inode(f));
a5516438
AK
384}
385
af73e4d9
NH
386static inline struct hstate *hstate_sizelog(int page_size_log)
387{
388 if (!page_size_log)
389 return &default_hstate;
97ad2be1
SL
390
391 return size_to_hstate(1UL << page_size_log);
af73e4d9
NH
392}
393
a137e1cc 394static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 395{
a137e1cc 396 return hstate_file(vma->vm_file);
a5516438
AK
397}
398
399static inline unsigned long huge_page_size(struct hstate *h)
400{
401 return (unsigned long)PAGE_SIZE << h->order;
402}
403
08fba699
MG
404extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
405
3340289d
MG
406extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
407
a5516438
AK
408static inline unsigned long huge_page_mask(struct hstate *h)
409{
410 return h->mask;
411}
412
413static inline unsigned int huge_page_order(struct hstate *h)
414{
415 return h->order;
416}
417
418static inline unsigned huge_page_shift(struct hstate *h)
419{
420 return h->order + PAGE_SHIFT;
421}
422
bae7f4ae
LC
423static inline bool hstate_is_gigantic(struct hstate *h)
424{
425 return huge_page_order(h) >= MAX_ORDER;
426}
427
a5516438
AK
428static inline unsigned int pages_per_huge_page(struct hstate *h)
429{
430 return 1 << h->order;
431}
432
433static inline unsigned int blocks_per_huge_page(struct hstate *h)
434{
435 return huge_page_size(h) / 512;
436}
437
438#include <asm/hugetlb.h>
439
d9ed9faa
CM
440#ifndef arch_make_huge_pte
441static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
442 struct page *page, int writable)
443{
444 return entry;
445}
446#endif
447
e5ff2159
AK
448static inline struct hstate *page_hstate(struct page *page)
449{
309381fe 450 VM_BUG_ON_PAGE(!PageHuge(page), page);
e5ff2159
AK
451 return size_to_hstate(PAGE_SIZE << compound_order(page));
452}
453
aa50d3a7
AK
454static inline unsigned hstate_index_to_shift(unsigned index)
455{
456 return hstates[index].order + PAGE_SHIFT;
457}
458
972dc4de
AK
459static inline int hstate_index(struct hstate *h)
460{
461 return h - hstates;
462}
463
13d60f4b
ZY
464pgoff_t __basepage_index(struct page *page);
465
466/* Return page->index in PAGE_SIZE units */
467static inline pgoff_t basepage_index(struct page *page)
468{
469 if (!PageCompound(page))
470 return page->index;
471
472 return __basepage_index(page);
473}
474
082d5b6b
GS
475extern int dissolve_free_huge_pages(unsigned long start_pfn,
476 unsigned long end_pfn);
d70c17d4 477static inline bool hugepage_migration_supported(struct hstate *h)
83467efb 478{
c177c81e 479#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
94310cbc
AK
480 if ((huge_page_shift(h) == PMD_SHIFT) ||
481 (huge_page_shift(h) == PGDIR_SHIFT))
482 return true;
483 else
484 return false;
c177c81e 485#else
d70c17d4 486 return false;
c177c81e 487#endif
83467efb 488}
c8721bbb 489
cb900f41
KS
490static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
491 struct mm_struct *mm, pte_t *pte)
492{
493 if (huge_page_size(h) == PMD_SIZE)
494 return pmd_lockptr(mm, (pmd_t *) pte);
495 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
496 return &mm->page_table_lock;
497}
498
2531c8cf
DD
499#ifndef hugepages_supported
500/*
501 * Some platform decide whether they support huge pages at boot
502 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
503 * when there is no such support
504 */
505#define hugepages_supported() (HPAGE_SHIFT != 0)
506#endif
457c1b27 507
5d317b2b
NH
508void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
509
510static inline void hugetlb_count_add(long l, struct mm_struct *mm)
511{
512 atomic_long_add(l, &mm->hugetlb_usage);
513}
514
515static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
516{
517 atomic_long_sub(l, &mm->hugetlb_usage);
518}
af73e4d9 519#else /* CONFIG_HUGETLB_PAGE */
a5516438 520struct hstate {};
70c3547e 521#define alloc_huge_page(v, a, r) NULL
bf50bab2 522#define alloc_huge_page_node(h, nid) NULL
74060e4d 523#define alloc_huge_page_noerr(v, a, r) NULL
53ba51d2 524#define alloc_bootmem_huge_page(h) NULL
a5516438 525#define hstate_file(f) NULL
af73e4d9 526#define hstate_sizelog(s) NULL
a5516438
AK
527#define hstate_vma(v) NULL
528#define hstate_inode(i) NULL
cb900f41 529#define page_hstate(page) NULL
a5516438
AK
530#define huge_page_size(h) PAGE_SIZE
531#define huge_page_mask(h) PAGE_MASK
08fba699 532#define vma_kernel_pagesize(v) PAGE_SIZE
3340289d 533#define vma_mmu_pagesize(v) PAGE_SIZE
a5516438
AK
534#define huge_page_order(h) 0
535#define huge_page_shift(h) PAGE_SHIFT
94310cbc
AK
536static inline bool hstate_is_gigantic(struct hstate *h)
537{
538 return false;
539}
540
510a35d4
AR
541static inline unsigned int pages_per_huge_page(struct hstate *h)
542{
543 return 1;
544}
aa50d3a7 545#define hstate_index_to_shift(index) 0
972dc4de 546#define hstate_index(h) 0
13d60f4b
ZY
547
548static inline pgoff_t basepage_index(struct page *page)
549{
550 return page->index;
551}
082d5b6b 552#define dissolve_free_huge_pages(s, e) 0
d70c17d4 553#define hugepage_migration_supported(h) false
cb900f41
KS
554
555static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
556 struct mm_struct *mm, pte_t *pte)
557{
558 return &mm->page_table_lock;
559}
5d317b2b
NH
560
561static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
562{
563}
564
565static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
566{
567}
af73e4d9 568#endif /* CONFIG_HUGETLB_PAGE */
a5516438 569
cb900f41
KS
570static inline spinlock_t *huge_pte_lock(struct hstate *h,
571 struct mm_struct *mm, pte_t *pte)
572{
573 spinlock_t *ptl;
574
575 ptl = huge_pte_lockptr(h, mm, pte);
576 spin_lock(ptl);
577 return ptl;
578}
579
1da177e4 580#endif /* _LINUX_HUGETLB_H */