2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/init.h>
10 #include <linux/sched/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/err.h>
14 #include <linux/sysctl.h>
15 #include <linux/compat.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgalloc.h>
23 #if 0 /* This is just for testing */
25 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
27 unsigned long start
= address
;
31 struct vm_area_struct
*vma
;
33 vma
= find_vma(mm
, addr
);
34 if (!vma
|| !is_vm_hugetlb_page(vma
))
35 return ERR_PTR(-EINVAL
);
37 pte
= huge_pte_offset(mm
, address
, vma_mmu_pagesize(vma
));
39 /* hugetlb should be locked, and hence, prefaulted */
40 WARN_ON(!pte
|| pte_none(*pte
));
42 page
= &pte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
44 WARN_ON(!PageHead(page
));
49 int pmd_huge(pmd_t pmd
)
54 int pud_huge(pud_t pud
)
62 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
63 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
64 * Otherwise, returns 0.
66 int pmd_huge(pmd_t pmd
)
68 return !pmd_none(pmd
) &&
69 (pmd_val(pmd
) & (_PAGE_PRESENT
|_PAGE_PSE
)) != _PAGE_PRESENT
;
72 int pud_huge(pud_t pud
)
74 return !!(pud_val(pud
) & _PAGE_PSE
);
78 #ifdef CONFIG_HUGETLB_PAGE
79 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
80 unsigned long addr
, unsigned long len
,
81 unsigned long pgoff
, unsigned long flags
)
83 struct hstate
*h
= hstate_file(file
);
84 struct vm_unmapped_area_info info
;
88 info
.low_limit
= get_mmap_base(1);
91 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
92 * in the full address space.
94 info
.high_limit
= in_compat_syscall() ?
95 task_size_32bit() : task_size_64bit(addr
> DEFAULT_MAP_WINDOW
);
97 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
98 info
.align_offset
= 0;
99 return vm_unmapped_area(&info
);
102 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
103 unsigned long addr
, unsigned long len
,
104 unsigned long pgoff
, unsigned long flags
)
106 struct hstate
*h
= hstate_file(file
);
107 struct vm_unmapped_area_info info
;
109 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
111 info
.low_limit
= PAGE_SIZE
;
112 info
.high_limit
= get_mmap_base(0);
115 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
116 * in the full address space.
118 if (addr
> DEFAULT_MAP_WINDOW
&& !in_compat_syscall())
119 info
.high_limit
+= TASK_SIZE_MAX
- DEFAULT_MAP_WINDOW
;
121 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
122 info
.align_offset
= 0;
123 addr
= vm_unmapped_area(&info
);
126 * A failed mmap() very likely causes application failure,
127 * so fall back to the bottom-up function here. This scenario
128 * can happen with large stack limits and large mmap()
131 if (addr
& ~PAGE_MASK
) {
132 VM_BUG_ON(addr
!= -ENOMEM
);
134 info
.low_limit
= TASK_UNMAPPED_BASE
;
135 info
.high_limit
= TASK_SIZE_LOW
;
136 addr
= vm_unmapped_area(&info
);
143 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
144 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
146 struct hstate
*h
= hstate_file(file
);
147 struct mm_struct
*mm
= current
->mm
;
148 struct vm_area_struct
*vma
;
150 if (len
& ~huge_page_mask(h
))
153 addr
= mpx_unmapped_area_check(addr
, len
, flags
);
154 if (IS_ERR_VALUE(addr
))
160 if (flags
& MAP_FIXED
) {
161 if (prepare_hugepage_range(file
, addr
, len
))
167 addr
= ALIGN(addr
, huge_page_size(h
));
168 vma
= find_vma(mm
, addr
);
169 if (TASK_SIZE
- len
>= addr
&&
170 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
173 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
174 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
177 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
180 #endif /* CONFIG_HUGETLB_PAGE */
183 static __init
int setup_hugepagesz(char *opt
)
185 unsigned long ps
= memparse(opt
, &opt
);
186 if (ps
== PMD_SIZE
) {
187 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
188 } else if (ps
== PUD_SIZE
&& boot_cpu_has(X86_FEATURE_GBPAGES
)) {
189 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
192 printk(KERN_ERR
"hugepagesz: Unsupported page size %lu M\n",
198 __setup("hugepagesz=", setup_hugepagesz
);
200 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
201 static __init
int gigantic_pages_init(void)
203 /* With compaction or CMA we can allocate gigantic pages at runtime */
204 if (boot_cpu_has(X86_FEATURE_GBPAGES
) && !size_to_hstate(1UL << PUD_SHIFT
))
205 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
208 arch_initcall(gigantic_pages_init
);