]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/hugetlb.h
mm: update get_user_pages_longterm to migrate pages allocated from CMA region
[mirror_ubuntu-jammy-kernel.git] / include / linux / hugetlb.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <asm/pgtable.h>
13
14 struct ctl_table;
15 struct user_struct;
16 struct mmu_gather;
17
18 #ifndef is_hugepd
19 /*
20 * Some architectures requires a hugepage directory format that is
21 * required to support multiple hugepage sizes. For example
22 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23 * introduced the same on powerpc. This allows for a more flexible hugepage
24 * pagetable layout.
25 */
26 typedef struct { unsigned long pd; } hugepd_t;
27 #define is_hugepd(hugepd) (0)
28 #define __hugepd(x) ((hugepd_t) { (x) })
29 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32 {
33 return 0;
34 }
35 #else
36 extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39 #endif
40
41
42 #ifdef CONFIG_HUGETLB_PAGE
43
44 #include <linux/mempolicy.h>
45 #include <linux/shm.h>
46 #include <asm/tlbflush.h>
47
48 struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
51 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
52 long used_hpages; /* Used count against maximum, includes */
53 /* both alloced and reserved pages. */
54 struct hstate *hstate;
55 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
56 long rsv_hpages; /* Pages reserved against global pool to */
57 /* sasitfy minimum size. */
58 };
59
60 struct resv_map {
61 struct kref refs;
62 spinlock_t lock;
63 struct list_head regions;
64 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
67 };
68 extern struct resv_map *resv_map_alloc(void);
69 void resv_map_release(struct kref *ref);
70
71 extern spinlock_t hugetlb_lock;
72 extern int hugetlb_max_hstate __read_mostly;
73 #define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
76 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
78 void hugepage_put_subpool(struct hugepage_subpool *spool);
79
80 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81 int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82 int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83 int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
85 #ifdef CONFIG_NUMA
86 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88 #endif
89
90 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
93 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
95 void unmap_hugepage_range(struct vm_area_struct *,
96 unsigned long, unsigned long, struct page *);
97 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
101 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
104 void hugetlb_report_meminfo(struct seq_file *);
105 int hugetlb_report_node_meminfo(int, char *);
106 void hugetlb_show_meminfo(void);
107 unsigned long hugetlb_total_pages(void);
108 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109 unsigned long address, unsigned int flags);
110 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
115 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116 struct vm_area_struct *vma,
117 vm_flags_t vm_flags);
118 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
120 bool isolate_huge_page(struct page *page, struct list_head *list);
121 void putback_active_hugepage(struct page *page);
122 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123 void free_huge_page(struct page *page);
124 void hugetlb_fix_reserve_counts(struct inode *inode);
125 extern struct mutex *hugetlb_fault_mutex_table;
126 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
127 struct vm_area_struct *vma,
128 struct address_space *mapping,
129 pgoff_t idx, unsigned long address);
130
131 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
132
133 extern int sysctl_hugetlb_shm_group;
134 extern struct list_head huge_boot_pages;
135
136 /* arch callbacks */
137
138 pte_t *huge_pte_alloc(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
140 pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz);
142 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
144 unsigned long *start, unsigned long *end);
145 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
146 int write);
147 struct page *follow_huge_pd(struct vm_area_struct *vma,
148 unsigned long address, hugepd_t hpd,
149 int flags, int pdshift);
150 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
151 pmd_t *pmd, int flags);
152 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
153 pud_t *pud, int flags);
154 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
155 pgd_t *pgd, int flags);
156
157 int pmd_huge(pmd_t pmd);
158 int pud_huge(pud_t pud);
159 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
160 unsigned long address, unsigned long end, pgprot_t newprot);
161
162 bool is_hugetlb_entry_migration(pte_t pte);
163
164 #else /* !CONFIG_HUGETLB_PAGE */
165
166 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
167 {
168 }
169
170 static inline unsigned long hugetlb_total_pages(void)
171 {
172 return 0;
173 }
174
175 static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
176 pte_t *ptep)
177 {
178 return 0;
179 }
180
181 static inline void adjust_range_if_pmd_sharing_possible(
182 struct vm_area_struct *vma,
183 unsigned long *start, unsigned long *end)
184 {
185 }
186
187 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
188 #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
189 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
190 static inline void hugetlb_report_meminfo(struct seq_file *m)
191 {
192 }
193 #define hugetlb_report_node_meminfo(n, buf) 0
194 static inline void hugetlb_show_meminfo(void)
195 {
196 }
197 #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
198 #define follow_huge_pmd(mm, addr, pmd, flags) NULL
199 #define follow_huge_pud(mm, addr, pud, flags) NULL
200 #define follow_huge_pgd(mm, addr, pgd, flags) NULL
201 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
202 #define pmd_huge(x) 0
203 #define pud_huge(x) 0
204 #define is_hugepage_only_range(mm, addr, len) 0
205 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
206 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
207 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
208 src_addr, pagep) ({ BUG(); 0; })
209 #define huge_pte_offset(mm, address, sz) 0
210
211 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
212 {
213 return false;
214 }
215 #define putback_active_hugepage(p) do {} while (0)
216 #define move_hugetlb_state(old, new, reason) do {} while (0)
217
218 static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
219 unsigned long address, unsigned long end, pgprot_t newprot)
220 {
221 return 0;
222 }
223
224 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
225 struct vm_area_struct *vma, unsigned long start,
226 unsigned long end, struct page *ref_page)
227 {
228 BUG();
229 }
230
231 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
232 struct vm_area_struct *vma, unsigned long start,
233 unsigned long end, struct page *ref_page)
234 {
235 BUG();
236 }
237
238 #endif /* !CONFIG_HUGETLB_PAGE */
239 /*
240 * hugepages at page global directory. If arch support
241 * hugepages at pgd level, they need to define this.
242 */
243 #ifndef pgd_huge
244 #define pgd_huge(x) 0
245 #endif
246 #ifndef p4d_huge
247 #define p4d_huge(x) 0
248 #endif
249
250 #ifndef pgd_write
251 static inline int pgd_write(pgd_t pgd)
252 {
253 BUG();
254 return 0;
255 }
256 #endif
257
258 #define HUGETLB_ANON_FILE "anon_hugepage"
259
260 enum {
261 /*
262 * The file will be used as an shm file so shmfs accounting rules
263 * apply
264 */
265 HUGETLB_SHMFS_INODE = 1,
266 /*
267 * The file is being created on the internal vfs mount and shmfs
268 * accounting rules do not apply
269 */
270 HUGETLB_ANONHUGE_INODE = 2,
271 };
272
273 #ifdef CONFIG_HUGETLBFS
274 struct hugetlbfs_sb_info {
275 long max_inodes; /* inodes allowed */
276 long free_inodes; /* inodes free */
277 spinlock_t stat_lock;
278 struct hstate *hstate;
279 struct hugepage_subpool *spool;
280 kuid_t uid;
281 kgid_t gid;
282 umode_t mode;
283 };
284
285 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
286 {
287 return sb->s_fs_info;
288 }
289
290 struct hugetlbfs_inode_info {
291 struct shared_policy policy;
292 struct inode vfs_inode;
293 unsigned int seals;
294 };
295
296 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
297 {
298 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
299 }
300
301 extern const struct file_operations hugetlbfs_file_operations;
302 extern const struct vm_operations_struct hugetlb_vm_ops;
303 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
304 struct user_struct **user, int creat_flags,
305 int page_size_log);
306
307 static inline bool is_file_hugepages(struct file *file)
308 {
309 if (file->f_op == &hugetlbfs_file_operations)
310 return true;
311
312 return is_file_shm_hugepages(file);
313 }
314
315
316 #else /* !CONFIG_HUGETLBFS */
317
318 #define is_file_hugepages(file) false
319 static inline struct file *
320 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
321 struct user_struct **user, int creat_flags,
322 int page_size_log)
323 {
324 return ERR_PTR(-ENOSYS);
325 }
326
327 #endif /* !CONFIG_HUGETLBFS */
328
329 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
330 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
331 unsigned long len, unsigned long pgoff,
332 unsigned long flags);
333 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
334
335 #ifdef CONFIG_HUGETLB_PAGE
336
337 #define HSTATE_NAME_LEN 32
338 /* Defines one hugetlb page size */
339 struct hstate {
340 int next_nid_to_alloc;
341 int next_nid_to_free;
342 unsigned int order;
343 unsigned long mask;
344 unsigned long max_huge_pages;
345 unsigned long nr_huge_pages;
346 unsigned long free_huge_pages;
347 unsigned long resv_huge_pages;
348 unsigned long surplus_huge_pages;
349 unsigned long nr_overcommit_huge_pages;
350 struct list_head hugepage_activelist;
351 struct list_head hugepage_freelists[MAX_NUMNODES];
352 unsigned int nr_huge_pages_node[MAX_NUMNODES];
353 unsigned int free_huge_pages_node[MAX_NUMNODES];
354 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
355 #ifdef CONFIG_CGROUP_HUGETLB
356 /* cgroup control files */
357 struct cftype cgroup_files[5];
358 #endif
359 char name[HSTATE_NAME_LEN];
360 };
361
362 struct huge_bootmem_page {
363 struct list_head list;
364 struct hstate *hstate;
365 };
366
367 struct page *alloc_huge_page(struct vm_area_struct *vma,
368 unsigned long addr, int avoid_reserve);
369 struct page *alloc_huge_page_node(struct hstate *h, int nid);
370 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
371 nodemask_t *nmask);
372 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
373 unsigned long address);
374 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
375 int nid, nodemask_t *nmask);
376 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
377 pgoff_t idx);
378
379 /* arch callback */
380 int __init __alloc_bootmem_huge_page(struct hstate *h);
381 int __init alloc_bootmem_huge_page(struct hstate *h);
382
383 void __init hugetlb_bad_size(void);
384 void __init hugetlb_add_hstate(unsigned order);
385 struct hstate *size_to_hstate(unsigned long size);
386
387 #ifndef HUGE_MAX_HSTATE
388 #define HUGE_MAX_HSTATE 1
389 #endif
390
391 extern struct hstate hstates[HUGE_MAX_HSTATE];
392 extern unsigned int default_hstate_idx;
393
394 #define default_hstate (hstates[default_hstate_idx])
395
396 static inline struct hstate *hstate_inode(struct inode *i)
397 {
398 return HUGETLBFS_SB(i->i_sb)->hstate;
399 }
400
401 static inline struct hstate *hstate_file(struct file *f)
402 {
403 return hstate_inode(file_inode(f));
404 }
405
406 static inline struct hstate *hstate_sizelog(int page_size_log)
407 {
408 if (!page_size_log)
409 return &default_hstate;
410
411 return size_to_hstate(1UL << page_size_log);
412 }
413
414 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
415 {
416 return hstate_file(vma->vm_file);
417 }
418
419 static inline unsigned long huge_page_size(struct hstate *h)
420 {
421 return (unsigned long)PAGE_SIZE << h->order;
422 }
423
424 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
425
426 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
427
428 static inline unsigned long huge_page_mask(struct hstate *h)
429 {
430 return h->mask;
431 }
432
433 static inline unsigned int huge_page_order(struct hstate *h)
434 {
435 return h->order;
436 }
437
438 static inline unsigned huge_page_shift(struct hstate *h)
439 {
440 return h->order + PAGE_SHIFT;
441 }
442
443 static inline bool hstate_is_gigantic(struct hstate *h)
444 {
445 return huge_page_order(h) >= MAX_ORDER;
446 }
447
448 static inline unsigned int pages_per_huge_page(struct hstate *h)
449 {
450 return 1 << h->order;
451 }
452
453 static inline unsigned int blocks_per_huge_page(struct hstate *h)
454 {
455 return huge_page_size(h) / 512;
456 }
457
458 #include <asm/hugetlb.h>
459
460 #ifndef arch_make_huge_pte
461 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
462 struct page *page, int writable)
463 {
464 return entry;
465 }
466 #endif
467
468 static inline struct hstate *page_hstate(struct page *page)
469 {
470 VM_BUG_ON_PAGE(!PageHuge(page), page);
471 return size_to_hstate(PAGE_SIZE << compound_order(page));
472 }
473
474 static inline unsigned hstate_index_to_shift(unsigned index)
475 {
476 return hstates[index].order + PAGE_SHIFT;
477 }
478
479 static inline int hstate_index(struct hstate *h)
480 {
481 return h - hstates;
482 }
483
484 pgoff_t __basepage_index(struct page *page);
485
486 /* Return page->index in PAGE_SIZE units */
487 static inline pgoff_t basepage_index(struct page *page)
488 {
489 if (!PageCompound(page))
490 return page->index;
491
492 return __basepage_index(page);
493 }
494
495 extern int dissolve_free_huge_page(struct page *page);
496 extern int dissolve_free_huge_pages(unsigned long start_pfn,
497 unsigned long end_pfn);
498
499 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
500 #ifndef arch_hugetlb_migration_supported
501 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
502 {
503 if ((huge_page_shift(h) == PMD_SHIFT) ||
504 (huge_page_shift(h) == PUD_SHIFT) ||
505 (huge_page_shift(h) == PGDIR_SHIFT))
506 return true;
507 else
508 return false;
509 }
510 #endif
511 #else
512 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
513 {
514 return false;
515 }
516 #endif
517
518 static inline bool hugepage_migration_supported(struct hstate *h)
519 {
520 return arch_hugetlb_migration_supported(h);
521 }
522
523 /*
524 * Movability check is different as compared to migration check.
525 * It determines whether or not a huge page should be placed on
526 * movable zone or not. Movability of any huge page should be
527 * required only if huge page size is supported for migration.
528 * There wont be any reason for the huge page to be movable if
529 * it is not migratable to start with. Also the size of the huge
530 * page should be large enough to be placed under a movable zone
531 * and still feasible enough to be migratable. Just the presence
532 * in movable zone does not make the migration feasible.
533 *
534 * So even though large huge page sizes like the gigantic ones
535 * are migratable they should not be movable because its not
536 * feasible to migrate them from movable zone.
537 */
538 static inline bool hugepage_movable_supported(struct hstate *h)
539 {
540 if (!hugepage_migration_supported(h))
541 return false;
542
543 if (hstate_is_gigantic(h))
544 return false;
545 return true;
546 }
547
548 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
549 struct mm_struct *mm, pte_t *pte)
550 {
551 if (huge_page_size(h) == PMD_SIZE)
552 return pmd_lockptr(mm, (pmd_t *) pte);
553 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
554 return &mm->page_table_lock;
555 }
556
557 #ifndef hugepages_supported
558 /*
559 * Some platform decide whether they support huge pages at boot
560 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
561 * when there is no such support
562 */
563 #define hugepages_supported() (HPAGE_SHIFT != 0)
564 #endif
565
566 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
567
568 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
569 {
570 atomic_long_add(l, &mm->hugetlb_usage);
571 }
572
573 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
574 {
575 atomic_long_sub(l, &mm->hugetlb_usage);
576 }
577
578 #ifndef set_huge_swap_pte_at
579 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
580 pte_t *ptep, pte_t pte, unsigned long sz)
581 {
582 set_huge_pte_at(mm, addr, ptep, pte);
583 }
584 #endif
585
586 #ifndef huge_ptep_modify_prot_start
587 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
588 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
589 unsigned long addr, pte_t *ptep)
590 {
591 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
592 }
593 #endif
594
595 #ifndef huge_ptep_modify_prot_commit
596 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
597 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
598 unsigned long addr, pte_t *ptep,
599 pte_t old_pte, pte_t pte)
600 {
601 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
602 }
603 #endif
604
605 #else /* CONFIG_HUGETLB_PAGE */
606 struct hstate {};
607 #define alloc_huge_page(v, a, r) NULL
608 #define alloc_huge_page_node(h, nid) NULL
609 #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
610 #define alloc_huge_page_vma(h, vma, address) NULL
611 #define alloc_bootmem_huge_page(h) NULL
612 #define hstate_file(f) NULL
613 #define hstate_sizelog(s) NULL
614 #define hstate_vma(v) NULL
615 #define hstate_inode(i) NULL
616 #define page_hstate(page) NULL
617 #define huge_page_size(h) PAGE_SIZE
618 #define huge_page_mask(h) PAGE_MASK
619 #define vma_kernel_pagesize(v) PAGE_SIZE
620 #define vma_mmu_pagesize(v) PAGE_SIZE
621 #define huge_page_order(h) 0
622 #define huge_page_shift(h) PAGE_SHIFT
623 static inline bool hstate_is_gigantic(struct hstate *h)
624 {
625 return false;
626 }
627
628 static inline unsigned int pages_per_huge_page(struct hstate *h)
629 {
630 return 1;
631 }
632
633 static inline unsigned hstate_index_to_shift(unsigned index)
634 {
635 return 0;
636 }
637
638 static inline int hstate_index(struct hstate *h)
639 {
640 return 0;
641 }
642
643 static inline pgoff_t basepage_index(struct page *page)
644 {
645 return page->index;
646 }
647
648 static inline int dissolve_free_huge_page(struct page *page)
649 {
650 return 0;
651 }
652
653 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
654 unsigned long end_pfn)
655 {
656 return 0;
657 }
658
659 static inline bool hugepage_migration_supported(struct hstate *h)
660 {
661 return false;
662 }
663
664 static inline bool hugepage_movable_supported(struct hstate *h)
665 {
666 return false;
667 }
668
669 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
670 struct mm_struct *mm, pte_t *pte)
671 {
672 return &mm->page_table_lock;
673 }
674
675 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
676 {
677 }
678
679 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
680 {
681 }
682
683 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
684 pte_t *ptep, pte_t pte, unsigned long sz)
685 {
686 }
687 #endif /* CONFIG_HUGETLB_PAGE */
688
689 static inline spinlock_t *huge_pte_lock(struct hstate *h,
690 struct mm_struct *mm, pte_t *pte)
691 {
692 spinlock_t *ptl;
693
694 ptl = huge_pte_lockptr(h, mm, pte);
695 spin_lock(ptl);
696 return ptl;
697 }
698
699 #endif /* _LINUX_HUGETLB_H */