]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - include/linux/hugetlb.h
mm: make alloc_contig_range handle free hugetlb pages
[mirror_ubuntu-jammy-kernel.git] / include / linux / hugetlb.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
7 #include <linux/fs.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14
15 struct ctl_table;
16 struct user_struct;
17 struct mmu_gather;
18
19 #ifndef is_hugepd
20 typedef struct { unsigned long pd; } hugepd_t;
21 #define is_hugepd(hugepd) (0)
22 #define __hugepd(x) ((hugepd_t) { (x) })
23 #endif
24
25 #ifdef CONFIG_HUGETLB_PAGE
26
27 #include <linux/mempolicy.h>
28 #include <linux/shm.h>
29 #include <asm/tlbflush.h>
30
31 struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
40 /* satisfy minimum size. */
41 };
42
43 struct resv_map {
44 struct kref refs;
45 spinlock_t lock;
46 struct list_head regions;
47 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
50 #ifdef CONFIG_CGROUP_HUGETLB
51 /*
52 * On private mappings, the counter to uncharge reservations is stored
53 * here. If these fields are 0, then either the mapping is shared, or
54 * cgroup accounting is disabled for this resv_map.
55 */
56 struct page_counter *reservation_counter;
57 unsigned long pages_per_hpage;
58 struct cgroup_subsys_state *css;
59 #endif
60 };
61
62 /*
63 * Region tracking -- allows tracking of reservations and instantiated pages
64 * across the pages in a mapping.
65 *
66 * The region data structures are embedded into a resv_map and protected
67 * by a resv_map's lock. The set of regions within the resv_map represent
68 * reservations for huge pages, or huge pages that have already been
69 * instantiated within the map. The from and to elements are huge page
70 * indicies into the associated mapping. from indicates the starting index
71 * of the region. to represents the first index past the end of the region.
72 *
73 * For example, a file region structure with from == 0 and to == 4 represents
74 * four huge pages in a mapping. It is important to note that the to element
75 * represents the first element past the end of the region. This is used in
76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 *
78 * Interval notation of the form [from, to) will be used to indicate that
79 * the endpoint from is inclusive and to is exclusive.
80 */
81 struct file_region {
82 struct list_head link;
83 long from;
84 long to;
85 #ifdef CONFIG_CGROUP_HUGETLB
86 /*
87 * On shared mappings, each reserved region appears as a struct
88 * file_region in resv_map. These fields hold the info needed to
89 * uncharge each reservation.
90 */
91 struct page_counter *reservation_counter;
92 struct cgroup_subsys_state *css;
93 #endif
94 };
95
96 extern struct resv_map *resv_map_alloc(void);
97 void resv_map_release(struct kref *ref);
98
99 extern spinlock_t hugetlb_lock;
100 extern int hugetlb_max_hstate __read_mostly;
101 #define for_each_hstate(h) \
102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103
104 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 long min_hpages);
106 void hugepage_put_subpool(struct hugepage_subpool *spool);
107
108 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
109 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
110 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 loff_t *);
112 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
116
117 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
118 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
119 struct page **, struct vm_area_struct **,
120 unsigned long *, unsigned long *, long, unsigned int,
121 int *);
122 void unmap_hugepage_range(struct vm_area_struct *,
123 unsigned long, unsigned long, struct page *);
124 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
125 struct vm_area_struct *vma,
126 unsigned long start, unsigned long end,
127 struct page *ref_page);
128 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
129 unsigned long start, unsigned long end,
130 struct page *ref_page);
131 void hugetlb_report_meminfo(struct seq_file *);
132 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
133 void hugetlb_show_meminfo(void);
134 unsigned long hugetlb_total_pages(void);
135 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
136 unsigned long address, unsigned int flags);
137 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
138 struct vm_area_struct *dst_vma,
139 unsigned long dst_addr,
140 unsigned long src_addr,
141 struct page **pagep);
142 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
143 struct vm_area_struct *vma,
144 vm_flags_t vm_flags);
145 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 long freed);
147 bool isolate_huge_page(struct page *page, struct list_head *list);
148 void putback_active_hugepage(struct page *page);
149 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
150 void free_huge_page(struct page *page);
151 void hugetlb_fix_reserve_counts(struct inode *inode);
152 extern struct mutex *hugetlb_fault_mutex_table;
153 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
154
155 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
156 unsigned long addr, pud_t *pud);
157
158 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
159
160 extern int sysctl_hugetlb_shm_group;
161 extern struct list_head huge_boot_pages;
162
163 /* arch callbacks */
164
165 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
166 unsigned long addr, unsigned long sz);
167 pte_t *huge_pte_offset(struct mm_struct *mm,
168 unsigned long addr, unsigned long sz);
169 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
170 unsigned long *addr, pte_t *ptep);
171 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
172 unsigned long *start, unsigned long *end);
173 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
174 int write);
175 struct page *follow_huge_pd(struct vm_area_struct *vma,
176 unsigned long address, hugepd_t hpd,
177 int flags, int pdshift);
178 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
179 pmd_t *pmd, int flags);
180 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
181 pud_t *pud, int flags);
182 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
183 pgd_t *pgd, int flags);
184
185 int pmd_huge(pmd_t pmd);
186 int pud_huge(pud_t pud);
187 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
188 unsigned long address, unsigned long end, pgprot_t newprot);
189
190 bool is_hugetlb_entry_migration(pte_t pte);
191 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
192
193 #else /* !CONFIG_HUGETLB_PAGE */
194
195 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
196 {
197 }
198
199 static inline unsigned long hugetlb_total_pages(void)
200 {
201 return 0;
202 }
203
204 static inline struct address_space *hugetlb_page_mapping_lock_write(
205 struct page *hpage)
206 {
207 return NULL;
208 }
209
210 static inline int huge_pmd_unshare(struct mm_struct *mm,
211 struct vm_area_struct *vma,
212 unsigned long *addr, pte_t *ptep)
213 {
214 return 0;
215 }
216
217 static inline void adjust_range_if_pmd_sharing_possible(
218 struct vm_area_struct *vma,
219 unsigned long *start, unsigned long *end)
220 {
221 }
222
223 static inline long follow_hugetlb_page(struct mm_struct *mm,
224 struct vm_area_struct *vma, struct page **pages,
225 struct vm_area_struct **vmas, unsigned long *position,
226 unsigned long *nr_pages, long i, unsigned int flags,
227 int *nonblocking)
228 {
229 BUG();
230 return 0;
231 }
232
233 static inline struct page *follow_huge_addr(struct mm_struct *mm,
234 unsigned long address, int write)
235 {
236 return ERR_PTR(-EINVAL);
237 }
238
239 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
240 struct mm_struct *src, struct vm_area_struct *vma)
241 {
242 BUG();
243 return 0;
244 }
245
246 static inline void hugetlb_report_meminfo(struct seq_file *m)
247 {
248 }
249
250 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
251 {
252 return 0;
253 }
254
255 static inline void hugetlb_show_meminfo(void)
256 {
257 }
258
259 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
260 unsigned long address, hugepd_t hpd, int flags,
261 int pdshift)
262 {
263 return NULL;
264 }
265
266 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
267 unsigned long address, pmd_t *pmd, int flags)
268 {
269 return NULL;
270 }
271
272 static inline struct page *follow_huge_pud(struct mm_struct *mm,
273 unsigned long address, pud_t *pud, int flags)
274 {
275 return NULL;
276 }
277
278 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
279 unsigned long address, pgd_t *pgd, int flags)
280 {
281 return NULL;
282 }
283
284 static inline int prepare_hugepage_range(struct file *file,
285 unsigned long addr, unsigned long len)
286 {
287 return -EINVAL;
288 }
289
290 static inline int pmd_huge(pmd_t pmd)
291 {
292 return 0;
293 }
294
295 static inline int pud_huge(pud_t pud)
296 {
297 return 0;
298 }
299
300 static inline int is_hugepage_only_range(struct mm_struct *mm,
301 unsigned long addr, unsigned long len)
302 {
303 return 0;
304 }
305
306 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
307 unsigned long addr, unsigned long end,
308 unsigned long floor, unsigned long ceiling)
309 {
310 BUG();
311 }
312
313 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
314 pte_t *dst_pte,
315 struct vm_area_struct *dst_vma,
316 unsigned long dst_addr,
317 unsigned long src_addr,
318 struct page **pagep)
319 {
320 BUG();
321 return 0;
322 }
323
324 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
325 unsigned long sz)
326 {
327 return NULL;
328 }
329
330 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
331 {
332 return false;
333 }
334
335 static inline void putback_active_hugepage(struct page *page)
336 {
337 }
338
339 static inline void move_hugetlb_state(struct page *oldpage,
340 struct page *newpage, int reason)
341 {
342 }
343
344 static inline unsigned long hugetlb_change_protection(
345 struct vm_area_struct *vma, unsigned long address,
346 unsigned long end, pgprot_t newprot)
347 {
348 return 0;
349 }
350
351 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
352 struct vm_area_struct *vma, unsigned long start,
353 unsigned long end, struct page *ref_page)
354 {
355 BUG();
356 }
357
358 static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
359 struct vm_area_struct *vma, unsigned long start,
360 unsigned long end, struct page *ref_page)
361 {
362 BUG();
363 }
364
365 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
366 struct vm_area_struct *vma, unsigned long address,
367 unsigned int flags)
368 {
369 BUG();
370 return 0;
371 }
372
373 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
374
375 #endif /* !CONFIG_HUGETLB_PAGE */
376 /*
377 * hugepages at page global directory. If arch support
378 * hugepages at pgd level, they need to define this.
379 */
380 #ifndef pgd_huge
381 #define pgd_huge(x) 0
382 #endif
383 #ifndef p4d_huge
384 #define p4d_huge(x) 0
385 #endif
386
387 #ifndef pgd_write
388 static inline int pgd_write(pgd_t pgd)
389 {
390 BUG();
391 return 0;
392 }
393 #endif
394
395 #define HUGETLB_ANON_FILE "anon_hugepage"
396
397 enum {
398 /*
399 * The file will be used as an shm file so shmfs accounting rules
400 * apply
401 */
402 HUGETLB_SHMFS_INODE = 1,
403 /*
404 * The file is being created on the internal vfs mount and shmfs
405 * accounting rules do not apply
406 */
407 HUGETLB_ANONHUGE_INODE = 2,
408 };
409
410 #ifdef CONFIG_HUGETLBFS
411 struct hugetlbfs_sb_info {
412 long max_inodes; /* inodes allowed */
413 long free_inodes; /* inodes free */
414 spinlock_t stat_lock;
415 struct hstate *hstate;
416 struct hugepage_subpool *spool;
417 kuid_t uid;
418 kgid_t gid;
419 umode_t mode;
420 };
421
422 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
423 {
424 return sb->s_fs_info;
425 }
426
427 struct hugetlbfs_inode_info {
428 struct shared_policy policy;
429 struct inode vfs_inode;
430 unsigned int seals;
431 };
432
433 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
434 {
435 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
436 }
437
438 extern const struct file_operations hugetlbfs_file_operations;
439 extern const struct vm_operations_struct hugetlb_vm_ops;
440 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
441 struct user_struct **user, int creat_flags,
442 int page_size_log);
443
444 static inline bool is_file_hugepages(struct file *file)
445 {
446 if (file->f_op == &hugetlbfs_file_operations)
447 return true;
448
449 return is_file_shm_hugepages(file);
450 }
451
452 static inline struct hstate *hstate_inode(struct inode *i)
453 {
454 return HUGETLBFS_SB(i->i_sb)->hstate;
455 }
456 #else /* !CONFIG_HUGETLBFS */
457
458 #define is_file_hugepages(file) false
459 static inline struct file *
460 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
461 struct user_struct **user, int creat_flags,
462 int page_size_log)
463 {
464 return ERR_PTR(-ENOSYS);
465 }
466
467 static inline struct hstate *hstate_inode(struct inode *i)
468 {
469 return NULL;
470 }
471 #endif /* !CONFIG_HUGETLBFS */
472
473 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
474 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
475 unsigned long len, unsigned long pgoff,
476 unsigned long flags);
477 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
478
479 /*
480 * huegtlb page specific state flags. These flags are located in page.private
481 * of the hugetlb head page. Functions created via the below macros should be
482 * used to manipulate these flags.
483 *
484 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
485 * allocation time. Cleared when page is fully instantiated. Free
486 * routine checks flag to restore a reservation on error paths.
487 * Synchronization: Examined or modified by code that knows it has
488 * the only reference to page. i.e. After allocation but before use
489 * or when the page is being freed.
490 * HPG_migratable - Set after a newly allocated page is added to the page
491 * cache and/or page tables. Indicates the page is a candidate for
492 * migration.
493 * Synchronization: Initially set after new page allocation with no
494 * locking. When examined and modified during migration processing
495 * (isolate, migrate, putback) the hugetlb_lock is held.
496 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
497 * allocator. Typically used for migration target pages when no pages
498 * are available in the pool. The hugetlb free page path will
499 * immediately free pages with this flag set to the buddy allocator.
500 * Synchronization: Can be set after huge page allocation from buddy when
501 * code knows it has only reference. All other examinations and
502 * modifications require hugetlb_lock.
503 * HPG_freed - Set when page is on the free lists.
504 * Synchronization: hugetlb_lock held for examination and modification.
505 */
506 enum hugetlb_page_flags {
507 HPG_restore_reserve = 0,
508 HPG_migratable,
509 HPG_temporary,
510 HPG_freed,
511 __NR_HPAGEFLAGS,
512 };
513
514 /*
515 * Macros to create test, set and clear function definitions for
516 * hugetlb specific page flags.
517 */
518 #ifdef CONFIG_HUGETLB_PAGE
519 #define TESTHPAGEFLAG(uname, flname) \
520 static inline int HPage##uname(struct page *page) \
521 { return test_bit(HPG_##flname, &(page->private)); }
522
523 #define SETHPAGEFLAG(uname, flname) \
524 static inline void SetHPage##uname(struct page *page) \
525 { set_bit(HPG_##flname, &(page->private)); }
526
527 #define CLEARHPAGEFLAG(uname, flname) \
528 static inline void ClearHPage##uname(struct page *page) \
529 { clear_bit(HPG_##flname, &(page->private)); }
530 #else
531 #define TESTHPAGEFLAG(uname, flname) \
532 static inline int HPage##uname(struct page *page) \
533 { return 0; }
534
535 #define SETHPAGEFLAG(uname, flname) \
536 static inline void SetHPage##uname(struct page *page) \
537 { }
538
539 #define CLEARHPAGEFLAG(uname, flname) \
540 static inline void ClearHPage##uname(struct page *page) \
541 { }
542 #endif
543
544 #define HPAGEFLAG(uname, flname) \
545 TESTHPAGEFLAG(uname, flname) \
546 SETHPAGEFLAG(uname, flname) \
547 CLEARHPAGEFLAG(uname, flname) \
548
549 /*
550 * Create functions associated with hugetlb page flags
551 */
552 HPAGEFLAG(RestoreReserve, restore_reserve)
553 HPAGEFLAG(Migratable, migratable)
554 HPAGEFLAG(Temporary, temporary)
555 HPAGEFLAG(Freed, freed)
556
557 #ifdef CONFIG_HUGETLB_PAGE
558
559 #define HSTATE_NAME_LEN 32
560 /* Defines one hugetlb page size */
561 struct hstate {
562 struct mutex resize_lock;
563 int next_nid_to_alloc;
564 int next_nid_to_free;
565 unsigned int order;
566 unsigned long mask;
567 unsigned long max_huge_pages;
568 unsigned long nr_huge_pages;
569 unsigned long free_huge_pages;
570 unsigned long resv_huge_pages;
571 unsigned long surplus_huge_pages;
572 unsigned long nr_overcommit_huge_pages;
573 struct list_head hugepage_activelist;
574 struct list_head hugepage_freelists[MAX_NUMNODES];
575 unsigned int nr_huge_pages_node[MAX_NUMNODES];
576 unsigned int free_huge_pages_node[MAX_NUMNODES];
577 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
578 #ifdef CONFIG_CGROUP_HUGETLB
579 /* cgroup control files */
580 struct cftype cgroup_files_dfl[7];
581 struct cftype cgroup_files_legacy[9];
582 #endif
583 char name[HSTATE_NAME_LEN];
584 };
585
586 struct huge_bootmem_page {
587 struct list_head list;
588 struct hstate *hstate;
589 };
590
591 int isolate_or_dissolve_huge_page(struct page *page);
592 struct page *alloc_huge_page(struct vm_area_struct *vma,
593 unsigned long addr, int avoid_reserve);
594 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
595 nodemask_t *nmask, gfp_t gfp_mask);
596 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
597 unsigned long address);
598 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
599 pgoff_t idx);
600
601 /* arch callback */
602 int __init __alloc_bootmem_huge_page(struct hstate *h);
603 int __init alloc_bootmem_huge_page(struct hstate *h);
604
605 void __init hugetlb_add_hstate(unsigned order);
606 bool __init arch_hugetlb_valid_size(unsigned long size);
607 struct hstate *size_to_hstate(unsigned long size);
608
609 #ifndef HUGE_MAX_HSTATE
610 #define HUGE_MAX_HSTATE 1
611 #endif
612
613 extern struct hstate hstates[HUGE_MAX_HSTATE];
614 extern unsigned int default_hstate_idx;
615
616 #define default_hstate (hstates[default_hstate_idx])
617
618 /*
619 * hugetlb page subpool pointer located in hpage[1].private
620 */
621 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
622 {
623 return (struct hugepage_subpool *)(hpage+1)->private;
624 }
625
626 static inline void hugetlb_set_page_subpool(struct page *hpage,
627 struct hugepage_subpool *subpool)
628 {
629 set_page_private(hpage+1, (unsigned long)subpool);
630 }
631
632 static inline struct hstate *hstate_file(struct file *f)
633 {
634 return hstate_inode(file_inode(f));
635 }
636
637 static inline struct hstate *hstate_sizelog(int page_size_log)
638 {
639 if (!page_size_log)
640 return &default_hstate;
641
642 return size_to_hstate(1UL << page_size_log);
643 }
644
645 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
646 {
647 return hstate_file(vma->vm_file);
648 }
649
650 static inline unsigned long huge_page_size(struct hstate *h)
651 {
652 return (unsigned long)PAGE_SIZE << h->order;
653 }
654
655 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
656
657 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
658
659 static inline unsigned long huge_page_mask(struct hstate *h)
660 {
661 return h->mask;
662 }
663
664 static inline unsigned int huge_page_order(struct hstate *h)
665 {
666 return h->order;
667 }
668
669 static inline unsigned huge_page_shift(struct hstate *h)
670 {
671 return h->order + PAGE_SHIFT;
672 }
673
674 static inline bool hstate_is_gigantic(struct hstate *h)
675 {
676 return huge_page_order(h) >= MAX_ORDER;
677 }
678
679 static inline unsigned int pages_per_huge_page(struct hstate *h)
680 {
681 return 1 << h->order;
682 }
683
684 static inline unsigned int blocks_per_huge_page(struct hstate *h)
685 {
686 return huge_page_size(h) / 512;
687 }
688
689 #include <asm/hugetlb.h>
690
691 #ifndef is_hugepage_only_range
692 static inline int is_hugepage_only_range(struct mm_struct *mm,
693 unsigned long addr, unsigned long len)
694 {
695 return 0;
696 }
697 #define is_hugepage_only_range is_hugepage_only_range
698 #endif
699
700 #ifndef arch_clear_hugepage_flags
701 static inline void arch_clear_hugepage_flags(struct page *page) { }
702 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
703 #endif
704
705 #ifndef arch_make_huge_pte
706 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
707 struct page *page, int writable)
708 {
709 return entry;
710 }
711 #endif
712
713 static inline struct hstate *page_hstate(struct page *page)
714 {
715 VM_BUG_ON_PAGE(!PageHuge(page), page);
716 return size_to_hstate(page_size(page));
717 }
718
719 static inline unsigned hstate_index_to_shift(unsigned index)
720 {
721 return hstates[index].order + PAGE_SHIFT;
722 }
723
724 static inline int hstate_index(struct hstate *h)
725 {
726 return h - hstates;
727 }
728
729 pgoff_t __basepage_index(struct page *page);
730
731 /* Return page->index in PAGE_SIZE units */
732 static inline pgoff_t basepage_index(struct page *page)
733 {
734 if (!PageCompound(page))
735 return page->index;
736
737 return __basepage_index(page);
738 }
739
740 extern int dissolve_free_huge_page(struct page *page);
741 extern int dissolve_free_huge_pages(unsigned long start_pfn,
742 unsigned long end_pfn);
743
744 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
745 #ifndef arch_hugetlb_migration_supported
746 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
747 {
748 if ((huge_page_shift(h) == PMD_SHIFT) ||
749 (huge_page_shift(h) == PUD_SHIFT) ||
750 (huge_page_shift(h) == PGDIR_SHIFT))
751 return true;
752 else
753 return false;
754 }
755 #endif
756 #else
757 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
758 {
759 return false;
760 }
761 #endif
762
763 static inline bool hugepage_migration_supported(struct hstate *h)
764 {
765 return arch_hugetlb_migration_supported(h);
766 }
767
768 /*
769 * Movability check is different as compared to migration check.
770 * It determines whether or not a huge page should be placed on
771 * movable zone or not. Movability of any huge page should be
772 * required only if huge page size is supported for migration.
773 * There wont be any reason for the huge page to be movable if
774 * it is not migratable to start with. Also the size of the huge
775 * page should be large enough to be placed under a movable zone
776 * and still feasible enough to be migratable. Just the presence
777 * in movable zone does not make the migration feasible.
778 *
779 * So even though large huge page sizes like the gigantic ones
780 * are migratable they should not be movable because its not
781 * feasible to migrate them from movable zone.
782 */
783 static inline bool hugepage_movable_supported(struct hstate *h)
784 {
785 if (!hugepage_migration_supported(h))
786 return false;
787
788 if (hstate_is_gigantic(h))
789 return false;
790 return true;
791 }
792
793 /* Movability of hugepages depends on migration support. */
794 static inline gfp_t htlb_alloc_mask(struct hstate *h)
795 {
796 if (hugepage_movable_supported(h))
797 return GFP_HIGHUSER_MOVABLE;
798 else
799 return GFP_HIGHUSER;
800 }
801
802 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
803 {
804 gfp_t modified_mask = htlb_alloc_mask(h);
805
806 /* Some callers might want to enforce node */
807 modified_mask |= (gfp_mask & __GFP_THISNODE);
808
809 modified_mask |= (gfp_mask & __GFP_NOWARN);
810
811 return modified_mask;
812 }
813
814 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
815 struct mm_struct *mm, pte_t *pte)
816 {
817 if (huge_page_size(h) == PMD_SIZE)
818 return pmd_lockptr(mm, (pmd_t *) pte);
819 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
820 return &mm->page_table_lock;
821 }
822
823 #ifndef hugepages_supported
824 /*
825 * Some platform decide whether they support huge pages at boot
826 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
827 * when there is no such support
828 */
829 #define hugepages_supported() (HPAGE_SHIFT != 0)
830 #endif
831
832 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
833
834 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
835 {
836 atomic_long_add(l, &mm->hugetlb_usage);
837 }
838
839 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
840 {
841 atomic_long_sub(l, &mm->hugetlb_usage);
842 }
843
844 #ifndef set_huge_swap_pte_at
845 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
846 pte_t *ptep, pte_t pte, unsigned long sz)
847 {
848 set_huge_pte_at(mm, addr, ptep, pte);
849 }
850 #endif
851
852 #ifndef huge_ptep_modify_prot_start
853 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
854 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
855 unsigned long addr, pte_t *ptep)
856 {
857 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
858 }
859 #endif
860
861 #ifndef huge_ptep_modify_prot_commit
862 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
863 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
864 unsigned long addr, pte_t *ptep,
865 pte_t old_pte, pte_t pte)
866 {
867 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
868 }
869 #endif
870
871 #else /* CONFIG_HUGETLB_PAGE */
872 struct hstate {};
873
874 static inline int isolate_or_dissolve_huge_page(struct page *page)
875 {
876 return -ENOMEM;
877 }
878
879 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
880 unsigned long addr,
881 int avoid_reserve)
882 {
883 return NULL;
884 }
885
886 static inline struct page *
887 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
888 nodemask_t *nmask, gfp_t gfp_mask)
889 {
890 return NULL;
891 }
892
893 static inline struct page *alloc_huge_page_vma(struct hstate *h,
894 struct vm_area_struct *vma,
895 unsigned long address)
896 {
897 return NULL;
898 }
899
900 static inline int __alloc_bootmem_huge_page(struct hstate *h)
901 {
902 return 0;
903 }
904
905 static inline struct hstate *hstate_file(struct file *f)
906 {
907 return NULL;
908 }
909
910 static inline struct hstate *hstate_sizelog(int page_size_log)
911 {
912 return NULL;
913 }
914
915 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
916 {
917 return NULL;
918 }
919
920 static inline struct hstate *page_hstate(struct page *page)
921 {
922 return NULL;
923 }
924
925 static inline unsigned long huge_page_size(struct hstate *h)
926 {
927 return PAGE_SIZE;
928 }
929
930 static inline unsigned long huge_page_mask(struct hstate *h)
931 {
932 return PAGE_MASK;
933 }
934
935 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
936 {
937 return PAGE_SIZE;
938 }
939
940 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
941 {
942 return PAGE_SIZE;
943 }
944
945 static inline unsigned int huge_page_order(struct hstate *h)
946 {
947 return 0;
948 }
949
950 static inline unsigned int huge_page_shift(struct hstate *h)
951 {
952 return PAGE_SHIFT;
953 }
954
955 static inline bool hstate_is_gigantic(struct hstate *h)
956 {
957 return false;
958 }
959
960 static inline unsigned int pages_per_huge_page(struct hstate *h)
961 {
962 return 1;
963 }
964
965 static inline unsigned hstate_index_to_shift(unsigned index)
966 {
967 return 0;
968 }
969
970 static inline int hstate_index(struct hstate *h)
971 {
972 return 0;
973 }
974
975 static inline pgoff_t basepage_index(struct page *page)
976 {
977 return page->index;
978 }
979
980 static inline int dissolve_free_huge_page(struct page *page)
981 {
982 return 0;
983 }
984
985 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
986 unsigned long end_pfn)
987 {
988 return 0;
989 }
990
991 static inline bool hugepage_migration_supported(struct hstate *h)
992 {
993 return false;
994 }
995
996 static inline bool hugepage_movable_supported(struct hstate *h)
997 {
998 return false;
999 }
1000
1001 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1002 {
1003 return 0;
1004 }
1005
1006 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1007 {
1008 return 0;
1009 }
1010
1011 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1012 struct mm_struct *mm, pte_t *pte)
1013 {
1014 return &mm->page_table_lock;
1015 }
1016
1017 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1018 {
1019 }
1020
1021 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1022 {
1023 }
1024
1025 static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1026 pte_t *ptep, pte_t pte, unsigned long sz)
1027 {
1028 }
1029 #endif /* CONFIG_HUGETLB_PAGE */
1030
1031 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1032 struct mm_struct *mm, pte_t *pte)
1033 {
1034 spinlock_t *ptl;
1035
1036 ptl = huge_pte_lockptr(h, mm, pte);
1037 spin_lock(ptl);
1038 return ptl;
1039 }
1040
1041 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1042 extern void __init hugetlb_cma_reserve(int order);
1043 extern void __init hugetlb_cma_check(void);
1044 #else
1045 static inline __init void hugetlb_cma_reserve(int order)
1046 {
1047 }
1048 static inline __init void hugetlb_cma_check(void)
1049 {
1050 }
1051 #endif
1052
1053 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1054
1055 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1056 /*
1057 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1058 * implement this.
1059 */
1060 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1061 #endif
1062
1063 #endif /* _LINUX_HUGETLB_H */