]>
Commit | Line | Data |
---|---|---|
e624e95b JH |
1 | /* |
2 | * arch/metag/mm/hugetlbpage.c | |
3 | * | |
4 | * METAG HugeTLB page support. | |
5 | * | |
6 | * Cloned from SuperH | |
7 | * | |
8 | * Cloned from sparc64 by Paul Mundt. | |
9 | * | |
10 | * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) | |
11 | */ | |
12 | ||
13 | #include <linux/init.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/hugetlb.h> | |
17 | #include <linux/pagemap.h> | |
18 | #include <linux/sysctl.h> | |
19 | ||
20 | #include <asm/mman.h> | |
21 | #include <asm/pgalloc.h> | |
22 | #include <asm/tlb.h> | |
23 | #include <asm/tlbflush.h> | |
24 | #include <asm/cacheflush.h> | |
25 | ||
26 | /* | |
27 | * If the arch doesn't supply something else, assume that hugepage | |
28 | * size aligned regions are ok without further preparation. | |
29 | */ | |
30 | int prepare_hugepage_range(struct file *file, unsigned long addr, | |
31 | unsigned long len) | |
32 | { | |
33 | struct mm_struct *mm = current->mm; | |
34 | struct hstate *h = hstate_file(file); | |
35 | struct vm_area_struct *vma; | |
36 | ||
37 | if (len & ~huge_page_mask(h)) | |
38 | return -EINVAL; | |
39 | if (addr & ~huge_page_mask(h)) | |
40 | return -EINVAL; | |
41 | if (TASK_SIZE - len < addr) | |
42 | return -EINVAL; | |
43 | ||
44 | vma = find_vma(mm, ALIGN_HUGEPT(addr)); | |
45 | if (vma && !(vma->vm_flags & MAP_HUGETLB)) | |
46 | return -EINVAL; | |
47 | ||
48 | vma = find_vma(mm, addr); | |
49 | if (vma) { | |
50 | if (addr + len > vma->vm_start) | |
51 | return -EINVAL; | |
52 | if (!(vma->vm_flags & MAP_HUGETLB) && | |
53 | (ALIGN_HUGEPT(addr + len) > vma->vm_start)) | |
54 | return -EINVAL; | |
55 | } | |
56 | return 0; | |
57 | } | |
58 | ||
59 | pte_t *huge_pte_alloc(struct mm_struct *mm, | |
60 | unsigned long addr, unsigned long sz) | |
61 | { | |
62 | pgd_t *pgd; | |
63 | pud_t *pud; | |
64 | pmd_t *pmd; | |
65 | pte_t *pte; | |
66 | ||
67 | pgd = pgd_offset(mm, addr); | |
68 | pud = pud_offset(pgd, addr); | |
69 | pmd = pmd_offset(pud, addr); | |
70 | pte = pte_alloc_map(mm, NULL, pmd, addr); | |
71 | pgd->pgd &= ~_PAGE_SZ_MASK; | |
72 | pgd->pgd |= _PAGE_SZHUGE; | |
73 | ||
74 | return pte; | |
75 | } | |
76 | ||
77 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |
78 | { | |
79 | pgd_t *pgd; | |
80 | pud_t *pud; | |
81 | pmd_t *pmd; | |
82 | pte_t *pte = NULL; | |
83 | ||
84 | pgd = pgd_offset(mm, addr); | |
85 | pud = pud_offset(pgd, addr); | |
86 | pmd = pmd_offset(pud, addr); | |
87 | pte = pte_offset_kernel(pmd, addr); | |
88 | ||
89 | return pte; | |
90 | } | |
91 | ||
92 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | |
93 | { | |
94 | return 0; | |
95 | } | |
96 | ||
97 | struct page *follow_huge_addr(struct mm_struct *mm, | |
98 | unsigned long address, int write) | |
99 | { | |
100 | return ERR_PTR(-EINVAL); | |
101 | } | |
102 | ||
103 | int pmd_huge(pmd_t pmd) | |
104 | { | |
105 | return pmd_page_shift(pmd) > PAGE_SHIFT; | |
106 | } | |
107 | ||
108 | int pud_huge(pud_t pud) | |
109 | { | |
110 | return 0; | |
111 | } | |
112 | ||
83467efb NH |
113 | int pmd_huge_support(void) |
114 | { | |
115 | return 1; | |
116 | } | |
117 | ||
e624e95b JH |
118 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, |
119 | pmd_t *pmd, int write) | |
120 | { | |
121 | return NULL; | |
122 | } | |
123 | ||
124 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | |
125 | ||
126 | /* | |
127 | * Look for an unmapped area starting after another hugetlb vma. | |
128 | * There are guaranteed to be no huge pte's spare if all the huge pages are | |
129 | * full size (4MB), so in that case compile out this search. | |
130 | */ | |
131 | #if HPAGE_SHIFT == HUGEPT_SHIFT | |
132 | static inline unsigned long | |
133 | hugetlb_get_unmapped_area_existing(unsigned long len) | |
134 | { | |
135 | return 0; | |
136 | } | |
137 | #else | |
138 | static unsigned long | |
139 | hugetlb_get_unmapped_area_existing(unsigned long len) | |
140 | { | |
141 | struct mm_struct *mm = current->mm; | |
142 | struct vm_area_struct *vma; | |
143 | unsigned long start_addr, addr; | |
144 | int after_huge; | |
145 | ||
146 | if (mm->context.part_huge) { | |
147 | start_addr = mm->context.part_huge; | |
148 | after_huge = 1; | |
149 | } else { | |
150 | start_addr = TASK_UNMAPPED_BASE; | |
151 | after_huge = 0; | |
152 | } | |
153 | new_search: | |
154 | addr = start_addr; | |
155 | ||
156 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | |
157 | if ((!vma && !after_huge) || TASK_SIZE - len < addr) { | |
158 | /* | |
159 | * Start a new search - just in case we missed | |
160 | * some holes. | |
161 | */ | |
162 | if (start_addr != TASK_UNMAPPED_BASE) { | |
163 | start_addr = TASK_UNMAPPED_BASE; | |
164 | goto new_search; | |
165 | } | |
166 | return 0; | |
167 | } | |
168 | /* skip ahead if we've aligned right over some vmas */ | |
169 | if (vma && vma->vm_end <= addr) | |
170 | continue; | |
171 | /* space before the next vma? */ | |
172 | if (after_huge && (!vma || ALIGN_HUGEPT(addr + len) | |
173 | <= vma->vm_start)) { | |
174 | unsigned long end = addr + len; | |
175 | if (end & HUGEPT_MASK) | |
176 | mm->context.part_huge = end; | |
177 | else if (addr == mm->context.part_huge) | |
178 | mm->context.part_huge = 0; | |
179 | return addr; | |
180 | } | |
181 | if (vma && (vma->vm_flags & MAP_HUGETLB)) { | |
182 | /* space after a huge vma in 2nd level page table? */ | |
183 | if (vma->vm_end & HUGEPT_MASK) { | |
184 | after_huge = 1; | |
185 | /* no need to align to the next PT block */ | |
186 | addr = vma->vm_end; | |
187 | continue; | |
188 | } | |
189 | } | |
190 | after_huge = 0; | |
191 | addr = ALIGN_HUGEPT(vma->vm_end); | |
192 | } | |
193 | } | |
194 | #endif | |
195 | ||
196 | /* Do a full search to find an area without any nearby normal pages. */ | |
197 | static unsigned long | |
198 | hugetlb_get_unmapped_area_new_pmd(unsigned long len) | |
199 | { | |
f75c28d8 JH |
200 | struct vm_unmapped_area_info info; |
201 | ||
202 | info.flags = 0; | |
203 | info.length = len; | |
204 | info.low_limit = TASK_UNMAPPED_BASE; | |
205 | info.high_limit = TASK_SIZE; | |
206 | info.align_mask = PAGE_MASK & HUGEPT_MASK; | |
207 | info.align_offset = 0; | |
208 | return vm_unmapped_area(&info); | |
e624e95b JH |
209 | } |
210 | ||
211 | unsigned long | |
212 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
213 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
214 | { | |
215 | struct hstate *h = hstate_file(file); | |
216 | ||
217 | if (len & ~huge_page_mask(h)) | |
218 | return -EINVAL; | |
219 | if (len > TASK_SIZE) | |
220 | return -ENOMEM; | |
221 | ||
222 | if (flags & MAP_FIXED) { | |
223 | if (prepare_hugepage_range(file, addr, len)) | |
224 | return -EINVAL; | |
225 | return addr; | |
226 | } | |
227 | ||
228 | if (addr) { | |
229 | addr = ALIGN(addr, huge_page_size(h)); | |
230 | if (!prepare_hugepage_range(file, addr, len)) | |
231 | return addr; | |
232 | } | |
233 | ||
234 | /* | |
235 | * Look for an existing hugetlb vma with space after it (this is to to | |
236 | * minimise fragmentation caused by huge pages. | |
237 | */ | |
238 | addr = hugetlb_get_unmapped_area_existing(len); | |
239 | if (addr) | |
240 | return addr; | |
241 | ||
242 | /* | |
243 | * Find an unmapped naturally aligned set of 4MB blocks that we can use | |
244 | * for huge pages. | |
245 | */ | |
f75c28d8 | 246 | return hugetlb_get_unmapped_area_new_pmd(len); |
e624e95b JH |
247 | } |
248 | ||
249 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | |
250 | ||
251 | /* necessary for boot time 4MB huge page allocation */ | |
252 | static __init int setup_hugepagesz(char *opt) | |
253 | { | |
254 | unsigned long ps = memparse(opt, &opt); | |
255 | if (ps == (1 << HPAGE_SHIFT)) { | |
256 | hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); | |
257 | } else { | |
258 | pr_err("hugepagesz: Unsupported page size %lu M\n", | |
259 | ps >> 20); | |
260 | return 0; | |
261 | } | |
262 | return 1; | |
263 | } | |
264 | __setup("hugepagesz=", setup_hugepagesz); |