]>
Commit | Line | Data |
---|---|---|
d0f13e3c BH |
1 | /* |
2 | * address space "slices" (meta-segments) support | |
3 | * | |
4 | * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation. | |
5 | * | |
6 | * Based on hugetlb implementation | |
7 | * | |
8 | * Copyright (C) 2003 David Gibson, IBM Corporation. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
25 | #undef DEBUG | |
26 | ||
27 | #include <linux/kernel.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/pagemap.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/spinlock.h> | |
4b16f8e2 | 32 | #include <linux/export.h> |
1217d34b | 33 | #include <linux/hugetlb.h> |
d0f13e3c BH |
34 | #include <asm/mman.h> |
35 | #include <asm/mmu.h> | |
be3ebfe8 | 36 | #include <asm/copro.h> |
1217d34b | 37 | #include <asm/hugetlb.h> |
d0f13e3c | 38 | |
f7a75f0a | 39 | static DEFINE_SPINLOCK(slice_convert_lock); |
82185222 AK |
40 | /* |
41 | * One bit per slice. We have lower slices which cover 256MB segments | |
42 | * upto 4G range. That gets us 16 low slices. For the rest we track slices | |
43 | * in 1TB size. | |
44 | */ | |
45 | struct slice_mask { | |
46 | u64 low_slices; | |
47 | DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH); | |
48 | }; | |
d0f13e3c BH |
49 | |
50 | #ifdef DEBUG | |
51 | int _slice_debug = 1; | |
52 | ||
53 | static void slice_print_mask(const char *label, struct slice_mask mask) | |
54 | { | |
d0f13e3c BH |
55 | if (!_slice_debug) |
56 | return; | |
302413ca AK |
57 | pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices); |
58 | pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices); | |
d0f13e3c BH |
59 | } |
60 | ||
302413ca | 61 | #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0) |
d0f13e3c BH |
62 | |
63 | #else | |
64 | ||
65 | static void slice_print_mask(const char *label, struct slice_mask mask) {} | |
66 | #define slice_dbg(fmt...) | |
67 | ||
68 | #endif | |
69 | ||
a4d36215 AK |
70 | static void slice_range_to_mask(unsigned long start, unsigned long len, |
71 | struct slice_mask *ret) | |
d0f13e3c BH |
72 | { |
73 | unsigned long end = start + len - 1; | |
f3207c12 | 74 | |
a4d36215 AK |
75 | ret->low_slices = 0; |
76 | bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); | |
d0f13e3c BH |
77 | |
78 | if (start < SLICE_LOW_TOP) { | |
98beda74 | 79 | unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); |
d0f13e3c | 80 | |
a4d36215 | 81 | ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) |
98beda74 | 82 | - (1u << GET_LOW_SLICE_INDEX(start)); |
d0f13e3c BH |
83 | } |
84 | ||
f3207c12 AK |
85 | if ((start + len) > SLICE_LOW_TOP) { |
86 | unsigned long start_index = GET_HIGH_SLICE_INDEX(start); | |
87 | unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); | |
88 | unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; | |
d0f13e3c | 89 | |
a4d36215 | 90 | bitmap_set(ret->high_slices, start_index, count); |
f3207c12 | 91 | } |
d0f13e3c BH |
92 | } |
93 | ||
94 | static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, | |
95 | unsigned long len) | |
96 | { | |
97 | struct vm_area_struct *vma; | |
98 | ||
99 | if ((mm->task_size - len) < addr) | |
100 | return 0; | |
101 | vma = find_vma(mm, addr); | |
1be7107f | 102 | return (!vma || (addr + len) <= vm_start_gap(vma)); |
d0f13e3c BH |
103 | } |
104 | ||
105 | static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) | |
106 | { | |
107 | return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, | |
108 | 1ul << SLICE_LOW_SHIFT); | |
109 | } | |
110 | ||
111 | static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) | |
112 | { | |
113 | unsigned long start = slice << SLICE_HIGH_SHIFT; | |
114 | unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); | |
115 | ||
116 | /* Hack, so that each addresses is controlled by exactly one | |
117 | * of the high or low area bitmaps, the first high area starts | |
118 | * at 4GB, not 0 */ | |
119 | if (start == 0) | |
120 | start = SLICE_LOW_TOP; | |
121 | ||
122 | return !slice_area_is_free(mm, start, end - start); | |
123 | } | |
124 | ||
a4d36215 | 125 | static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) |
d0f13e3c | 126 | { |
d0f13e3c BH |
127 | unsigned long i; |
128 | ||
a4d36215 AK |
129 | ret->low_slices = 0; |
130 | bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); | |
f3207c12 | 131 | |
d0f13e3c BH |
132 | for (i = 0; i < SLICE_NUM_LOW; i++) |
133 | if (!slice_low_has_vma(mm, i)) | |
a4d36215 | 134 | ret->low_slices |= 1u << i; |
d0f13e3c BH |
135 | |
136 | if (mm->task_size <= SLICE_LOW_TOP) | |
a4d36215 | 137 | return; |
d0f13e3c | 138 | |
957b778a | 139 | for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) |
d0f13e3c | 140 | if (!slice_high_has_vma(mm, i)) |
a4d36215 | 141 | __set_bit(i, ret->high_slices); |
d0f13e3c BH |
142 | } |
143 | ||
a4d36215 | 144 | static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret) |
d0f13e3c | 145 | { |
7aa0727f AK |
146 | unsigned char *hpsizes; |
147 | int index, mask_index; | |
d0f13e3c | 148 | unsigned long i; |
7aa0727f | 149 | u64 lpsizes; |
d0f13e3c | 150 | |
a4d36215 AK |
151 | ret->low_slices = 0; |
152 | bitmap_zero(ret->high_slices, SLICE_NUM_HIGH); | |
f3207c12 | 153 | |
7aa0727f | 154 | lpsizes = mm->context.low_slices_psize; |
d0f13e3c | 155 | for (i = 0; i < SLICE_NUM_LOW; i++) |
7aa0727f | 156 | if (((lpsizes >> (i * 4)) & 0xf) == psize) |
a4d36215 | 157 | ret->low_slices |= 1u << i; |
d0f13e3c | 158 | |
7aa0727f | 159 | hpsizes = mm->context.high_slices_psize; |
957b778a | 160 | for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { |
7aa0727f AK |
161 | mask_index = i & 0x1; |
162 | index = i >> 1; | |
163 | if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) | |
a4d36215 | 164 | __set_bit(i, ret->high_slices); |
7aa0727f | 165 | } |
d0f13e3c BH |
166 | } |
167 | ||
957b778a AK |
168 | static int slice_check_fit(struct mm_struct *mm, |
169 | struct slice_mask mask, struct slice_mask available) | |
d0f13e3c | 170 | { |
f3207c12 | 171 | DECLARE_BITMAP(result, SLICE_NUM_HIGH); |
957b778a | 172 | unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); |
f3207c12 AK |
173 | |
174 | bitmap_and(result, mask.high_slices, | |
957b778a | 175 | available.high_slices, slice_count); |
f3207c12 | 176 | |
d0f13e3c | 177 | return (mask.low_slices & available.low_slices) == mask.low_slices && |
957b778a | 178 | bitmap_equal(result, mask.high_slices, slice_count); |
d0f13e3c BH |
179 | } |
180 | ||
181 | static void slice_flush_segments(void *parm) | |
182 | { | |
183 | struct mm_struct *mm = parm; | |
184 | unsigned long flags; | |
185 | ||
186 | if (mm != current->active_mm) | |
187 | return; | |
188 | ||
52b1e665 | 189 | copy_mm_to_paca(current->active_mm); |
d0f13e3c BH |
190 | |
191 | local_irq_save(flags); | |
192 | slb_flush_and_rebolt(); | |
193 | local_irq_restore(flags); | |
194 | } | |
195 | ||
196 | static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) | |
197 | { | |
7aa0727f | 198 | int index, mask_index; |
d0f13e3c | 199 | /* Write the new slice psize bits */ |
7aa0727f AK |
200 | unsigned char *hpsizes; |
201 | u64 lpsizes; | |
d0f13e3c BH |
202 | unsigned long i, flags; |
203 | ||
204 | slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); | |
205 | slice_print_mask(" mask", mask); | |
206 | ||
207 | /* We need to use a spinlock here to protect against | |
208 | * concurrent 64k -> 4k demotion ... | |
209 | */ | |
210 | spin_lock_irqsave(&slice_convert_lock, flags); | |
211 | ||
212 | lpsizes = mm->context.low_slices_psize; | |
213 | for (i = 0; i < SLICE_NUM_LOW; i++) | |
214 | if (mask.low_slices & (1u << i)) | |
215 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | |
216 | (((unsigned long)psize) << (i * 4)); | |
217 | ||
7aa0727f | 218 | /* Assign the value back */ |
d0f13e3c | 219 | mm->context.low_slices_psize = lpsizes; |
7aa0727f AK |
220 | |
221 | hpsizes = mm->context.high_slices_psize; | |
957b778a | 222 | for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { |
7aa0727f AK |
223 | mask_index = i & 0x1; |
224 | index = i >> 1; | |
f3207c12 | 225 | if (test_bit(i, mask.high_slices)) |
7aa0727f AK |
226 | hpsizes[index] = (hpsizes[index] & |
227 | ~(0xf << (mask_index * 4))) | | |
228 | (((unsigned long)psize) << (mask_index * 4)); | |
229 | } | |
d0f13e3c BH |
230 | |
231 | slice_dbg(" lsps=%lx, hsps=%lx\n", | |
302413ca AK |
232 | (unsigned long)mm->context.low_slices_psize, |
233 | (unsigned long)mm->context.high_slices_psize); | |
d0f13e3c BH |
234 | |
235 | spin_unlock_irqrestore(&slice_convert_lock, flags); | |
d0f13e3c | 236 | |
be3ebfe8 | 237 | copro_flush_all_slbs(mm); |
d0f13e3c BH |
238 | } |
239 | ||
fba2369e ML |
240 | /* |
241 | * Compute which slice addr is part of; | |
242 | * set *boundary_addr to the start or end boundary of that slice | |
243 | * (depending on 'end' parameter); | |
244 | * return boolean indicating if the slice is marked as available in the | |
245 | * 'available' slice_mark. | |
246 | */ | |
247 | static bool slice_scan_available(unsigned long addr, | |
248 | struct slice_mask available, | |
249 | int end, | |
250 | unsigned long *boundary_addr) | |
251 | { | |
252 | unsigned long slice; | |
253 | if (addr < SLICE_LOW_TOP) { | |
254 | slice = GET_LOW_SLICE_INDEX(addr); | |
255 | *boundary_addr = (slice + end) << SLICE_LOW_SHIFT; | |
256 | return !!(available.low_slices & (1u << slice)); | |
257 | } else { | |
258 | slice = GET_HIGH_SLICE_INDEX(addr); | |
259 | *boundary_addr = (slice + end) ? | |
260 | ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP; | |
f3207c12 | 261 | return !!test_bit(slice, available.high_slices); |
fba2369e ML |
262 | } |
263 | } | |
264 | ||
d0f13e3c BH |
265 | static unsigned long slice_find_area_bottomup(struct mm_struct *mm, |
266 | unsigned long len, | |
267 | struct slice_mask available, | |
f4ea6dcb | 268 | int psize, unsigned long high_limit) |
d0f13e3c | 269 | { |
d0f13e3c | 270 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); |
fba2369e ML |
271 | unsigned long addr, found, next_end; |
272 | struct vm_unmapped_area_info info; | |
d0f13e3c | 273 | |
fba2369e ML |
274 | info.flags = 0; |
275 | info.length = len; | |
276 | info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); | |
277 | info.align_offset = 0; | |
d0f13e3c | 278 | |
fba2369e | 279 | addr = TASK_UNMAPPED_BASE; |
f4ea6dcb AK |
280 | /* |
281 | * Check till the allow max value for this mmap request | |
282 | */ | |
283 | while (addr < high_limit) { | |
fba2369e ML |
284 | info.low_limit = addr; |
285 | if (!slice_scan_available(addr, available, 1, &addr)) | |
d0f13e3c | 286 | continue; |
fba2369e ML |
287 | |
288 | next_slice: | |
289 | /* | |
290 | * At this point [info.low_limit; addr) covers | |
291 | * available slices only and ends at a slice boundary. | |
292 | * Check if we need to reduce the range, or if we can | |
293 | * extend it to cover the next available slice. | |
294 | */ | |
be77e999 AK |
295 | if (addr >= high_limit) |
296 | addr = high_limit; | |
fba2369e ML |
297 | else if (slice_scan_available(addr, available, 1, &next_end)) { |
298 | addr = next_end; | |
299 | goto next_slice; | |
d0f13e3c | 300 | } |
fba2369e ML |
301 | info.high_limit = addr; |
302 | ||
303 | found = vm_unmapped_area(&info); | |
304 | if (!(found & ~PAGE_MASK)) | |
305 | return found; | |
d0f13e3c BH |
306 | } |
307 | ||
d0f13e3c BH |
308 | return -ENOMEM; |
309 | } | |
310 | ||
311 | static unsigned long slice_find_area_topdown(struct mm_struct *mm, | |
312 | unsigned long len, | |
313 | struct slice_mask available, | |
f4ea6dcb | 314 | int psize, unsigned long high_limit) |
d0f13e3c | 315 | { |
d0f13e3c | 316 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); |
fba2369e ML |
317 | unsigned long addr, found, prev; |
318 | struct vm_unmapped_area_info info; | |
319 | ||
320 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | |
321 | info.length = len; | |
322 | info.align_mask = PAGE_MASK & ((1ul << pshift) - 1); | |
323 | info.align_offset = 0; | |
d0f13e3c | 324 | |
d0f13e3c | 325 | addr = mm->mmap_base; |
f4ea6dcb AK |
326 | /* |
327 | * If we are trying to allocate above DEFAULT_MAP_WINDOW | |
328 | * Add the different to the mmap_base. | |
329 | * Only for that request for which high_limit is above | |
330 | * DEFAULT_MAP_WINDOW we should apply this. | |
331 | */ | |
332 | if (high_limit > DEFAULT_MAP_WINDOW) | |
333 | addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW; | |
334 | ||
fba2369e ML |
335 | while (addr > PAGE_SIZE) { |
336 | info.high_limit = addr; | |
337 | if (!slice_scan_available(addr - 1, available, 0, &addr)) | |
d0f13e3c | 338 | continue; |
d0f13e3c | 339 | |
fba2369e | 340 | prev_slice: |
d0f13e3c | 341 | /* |
fba2369e ML |
342 | * At this point [addr; info.high_limit) covers |
343 | * available slices only and starts at a slice boundary. | |
344 | * Check if we need to reduce the range, or if we can | |
345 | * extend it to cover the previous available slice. | |
d0f13e3c | 346 | */ |
fba2369e ML |
347 | if (addr < PAGE_SIZE) |
348 | addr = PAGE_SIZE; | |
349 | else if (slice_scan_available(addr - 1, available, 0, &prev)) { | |
350 | addr = prev; | |
351 | goto prev_slice; | |
352 | } | |
353 | info.low_limit = addr; | |
d0f13e3c | 354 | |
fba2369e ML |
355 | found = vm_unmapped_area(&info); |
356 | if (!(found & ~PAGE_MASK)) | |
357 | return found; | |
d0f13e3c BH |
358 | } |
359 | ||
360 | /* | |
361 | * A failed mmap() very likely causes application failure, | |
362 | * so fall back to the bottom-up function here. This scenario | |
363 | * can happen with large stack limits and large mmap() | |
364 | * allocations. | |
365 | */ | |
f4ea6dcb | 366 | return slice_find_area_bottomup(mm, len, available, psize, high_limit); |
d0f13e3c BH |
367 | } |
368 | ||
369 | ||
370 | static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len, | |
371 | struct slice_mask mask, int psize, | |
f4ea6dcb | 372 | int topdown, unsigned long high_limit) |
d0f13e3c BH |
373 | { |
374 | if (topdown) | |
f4ea6dcb | 375 | return slice_find_area_topdown(mm, len, mask, psize, high_limit); |
d0f13e3c | 376 | else |
f4ea6dcb | 377 | return slice_find_area_bottomup(mm, len, mask, psize, high_limit); |
d0f13e3c BH |
378 | } |
379 | ||
f3207c12 AK |
380 | static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src) |
381 | { | |
382 | DECLARE_BITMAP(result, SLICE_NUM_HIGH); | |
3a8247cc | 383 | |
f3207c12 AK |
384 | dst->low_slices |= src->low_slices; |
385 | bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); | |
386 | bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); | |
387 | } | |
388 | ||
389 | static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src) | |
390 | { | |
391 | DECLARE_BITMAP(result, SLICE_NUM_HIGH); | |
392 | ||
393 | dst->low_slices &= ~src->low_slices; | |
394 | ||
395 | bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH); | |
396 | bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH); | |
397 | } | |
3a8247cc PM |
398 | |
399 | #ifdef CONFIG_PPC_64K_PAGES | |
400 | #define MMU_PAGE_BASE MMU_PAGE_64K | |
401 | #else | |
402 | #define MMU_PAGE_BASE MMU_PAGE_4K | |
403 | #endif | |
404 | ||
d0f13e3c BH |
405 | unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, |
406 | unsigned long flags, unsigned int psize, | |
34d07177 | 407 | int topdown) |
d0f13e3c | 408 | { |
f3207c12 | 409 | struct slice_mask mask; |
d0f13e3c | 410 | struct slice_mask good_mask; |
f3207c12 AK |
411 | struct slice_mask potential_mask; |
412 | struct slice_mask compat_mask; | |
d0f13e3c BH |
413 | int fixed = (flags & MAP_FIXED); |
414 | int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT); | |
415 | struct mm_struct *mm = current->mm; | |
3a8247cc | 416 | unsigned long newaddr; |
f4ea6dcb | 417 | unsigned long high_limit; |
d0f13e3c | 418 | |
f4ea6dcb AK |
419 | /* |
420 | * Check if we need to expland slice area. | |
421 | */ | |
321f7d29 AK |
422 | if (unlikely(addr > mm->context.addr_limit && |
423 | mm->context.addr_limit != TASK_SIZE)) { | |
f4ea6dcb AK |
424 | mm->context.addr_limit = TASK_SIZE; |
425 | on_each_cpu(slice_flush_segments, mm, 1); | |
426 | } | |
427 | /* | |
428 | * This mmap request can allocate upt to 512TB | |
429 | */ | |
430 | if (addr > DEFAULT_MAP_WINDOW) | |
431 | high_limit = mm->context.addr_limit; | |
432 | else | |
433 | high_limit = DEFAULT_MAP_WINDOW; | |
f3207c12 AK |
434 | /* |
435 | * init different masks | |
436 | */ | |
437 | mask.low_slices = 0; | |
438 | bitmap_zero(mask.high_slices, SLICE_NUM_HIGH); | |
439 | ||
440 | /* silence stupid warning */; | |
441 | potential_mask.low_slices = 0; | |
442 | bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH); | |
443 | ||
444 | compat_mask.low_slices = 0; | |
445 | bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH); | |
446 | ||
d0f13e3c BH |
447 | /* Sanity checks */ |
448 | BUG_ON(mm->task_size == 0); | |
764041e0 | 449 | VM_BUG_ON(radix_enabled()); |
d0f13e3c BH |
450 | |
451 | slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); | |
34d07177 ML |
452 | slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n", |
453 | addr, len, flags, topdown); | |
d0f13e3c BH |
454 | |
455 | if (len > mm->task_size) | |
456 | return -ENOMEM; | |
d1f5a77f BH |
457 | if (len & ((1ul << pshift) - 1)) |
458 | return -EINVAL; | |
d0f13e3c BH |
459 | if (fixed && (addr & ((1ul << pshift) - 1))) |
460 | return -EINVAL; | |
461 | if (fixed && addr > (mm->task_size - len)) | |
19751c07 | 462 | return -ENOMEM; |
d0f13e3c BH |
463 | |
464 | /* If hint, make sure it matches our alignment restrictions */ | |
465 | if (!fixed && addr) { | |
466 | addr = _ALIGN_UP(addr, 1ul << pshift); | |
467 | slice_dbg(" aligned addr=%lx\n", addr); | |
3a8247cc PM |
468 | /* Ignore hint if it's too large or overlaps a VMA */ |
469 | if (addr > mm->task_size - len || | |
470 | !slice_area_is_free(mm, addr, len)) | |
471 | addr = 0; | |
d0f13e3c BH |
472 | } |
473 | ||
3a8247cc | 474 | /* First make up a "good" mask of slices that have the right size |
d0f13e3c BH |
475 | * already |
476 | */ | |
a4d36215 | 477 | slice_mask_for_size(mm, psize, &good_mask); |
d0f13e3c BH |
478 | slice_print_mask(" good_mask", good_mask); |
479 | ||
3a8247cc PM |
480 | /* |
481 | * Here "good" means slices that are already the right page size, | |
482 | * "compat" means slices that have a compatible page size (i.e. | |
483 | * 4k in a 64k pagesize kernel), and "free" means slices without | |
484 | * any VMAs. | |
485 | * | |
486 | * If MAP_FIXED: | |
487 | * check if fits in good | compat => OK | |
488 | * check if fits in good | compat | free => convert free | |
489 | * else bad | |
490 | * If have hint: | |
491 | * check if hint fits in good => OK | |
492 | * check if hint fits in good | free => convert free | |
493 | * Otherwise: | |
494 | * search in good, found => OK | |
495 | * search in good | free, found => convert free | |
496 | * search in good | compat | free, found => convert free. | |
497 | */ | |
d0f13e3c | 498 | |
3a8247cc PM |
499 | #ifdef CONFIG_PPC_64K_PAGES |
500 | /* If we support combo pages, we can allow 64k pages in 4k slices */ | |
501 | if (psize == MMU_PAGE_64K) { | |
a4d36215 | 502 | slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask); |
3a8247cc | 503 | if (fixed) |
f3207c12 | 504 | slice_or_mask(&good_mask, &compat_mask); |
3a8247cc PM |
505 | } |
506 | #endif | |
d0f13e3c | 507 | |
3a8247cc PM |
508 | /* First check hint if it's valid or if we have MAP_FIXED */ |
509 | if (addr != 0 || fixed) { | |
d0f13e3c | 510 | /* Build a mask for the requested range */ |
a4d36215 | 511 | slice_range_to_mask(addr, len, &mask); |
d0f13e3c BH |
512 | slice_print_mask(" mask", mask); |
513 | ||
514 | /* Check if we fit in the good mask. If we do, we just return, | |
515 | * nothing else to do | |
516 | */ | |
957b778a | 517 | if (slice_check_fit(mm, mask, good_mask)) { |
d0f13e3c BH |
518 | slice_dbg(" fits good !\n"); |
519 | return addr; | |
520 | } | |
3a8247cc PM |
521 | } else { |
522 | /* Now let's see if we can find something in the existing | |
523 | * slices for that size | |
d0f13e3c | 524 | */ |
f4ea6dcb AK |
525 | newaddr = slice_find_area(mm, len, good_mask, |
526 | psize, topdown, high_limit); | |
3a8247cc PM |
527 | if (newaddr != -ENOMEM) { |
528 | /* Found within the good mask, we don't have to setup, | |
529 | * we thus return directly | |
530 | */ | |
531 | slice_dbg(" found area at 0x%lx\n", newaddr); | |
532 | return newaddr; | |
d0f13e3c BH |
533 | } |
534 | } | |
535 | ||
3a8247cc PM |
536 | /* We don't fit in the good mask, check what other slices are |
537 | * empty and thus can be converted | |
538 | */ | |
a4d36215 | 539 | slice_mask_for_free(mm, &potential_mask); |
f3207c12 | 540 | slice_or_mask(&potential_mask, &good_mask); |
3a8247cc PM |
541 | slice_print_mask(" potential", potential_mask); |
542 | ||
957b778a | 543 | if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) { |
3a8247cc PM |
544 | slice_dbg(" fits potential !\n"); |
545 | goto convert; | |
546 | } | |
547 | ||
548 | /* If we have MAP_FIXED and failed the above steps, then error out */ | |
d0f13e3c BH |
549 | if (fixed) |
550 | return -EBUSY; | |
551 | ||
d0f13e3c BH |
552 | slice_dbg(" search...\n"); |
553 | ||
3a8247cc PM |
554 | /* If we had a hint that didn't work out, see if we can fit |
555 | * anywhere in the good area. | |
d0f13e3c | 556 | */ |
3a8247cc | 557 | if (addr) { |
f4ea6dcb AK |
558 | addr = slice_find_area(mm, len, good_mask, |
559 | psize, topdown, high_limit); | |
3a8247cc PM |
560 | if (addr != -ENOMEM) { |
561 | slice_dbg(" found area at 0x%lx\n", addr); | |
562 | return addr; | |
563 | } | |
d0f13e3c BH |
564 | } |
565 | ||
566 | /* Now let's see if we can find something in the existing slices | |
3a8247cc | 567 | * for that size plus free slices |
d0f13e3c | 568 | */ |
f4ea6dcb AK |
569 | addr = slice_find_area(mm, len, potential_mask, |
570 | psize, topdown, high_limit); | |
3a8247cc PM |
571 | |
572 | #ifdef CONFIG_PPC_64K_PAGES | |
573 | if (addr == -ENOMEM && psize == MMU_PAGE_64K) { | |
574 | /* retry the search with 4k-page slices included */ | |
f3207c12 | 575 | slice_or_mask(&potential_mask, &compat_mask); |
f4ea6dcb AK |
576 | addr = slice_find_area(mm, len, potential_mask, |
577 | psize, topdown, high_limit); | |
3a8247cc PM |
578 | } |
579 | #endif | |
580 | ||
d0f13e3c BH |
581 | if (addr == -ENOMEM) |
582 | return -ENOMEM; | |
583 | ||
a4d36215 | 584 | slice_range_to_mask(addr, len, &mask); |
d0f13e3c BH |
585 | slice_dbg(" found potential area at 0x%lx\n", addr); |
586 | slice_print_mask(" mask", mask); | |
587 | ||
588 | convert: | |
f3207c12 AK |
589 | slice_andnot_mask(&mask, &good_mask); |
590 | slice_andnot_mask(&mask, &compat_mask); | |
591 | if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) { | |
3a8247cc PM |
592 | slice_convert(mm, mask, psize); |
593 | if (psize > MMU_PAGE_BASE) | |
84c3d4aa | 594 | on_each_cpu(slice_flush_segments, mm, 1); |
3a8247cc | 595 | } |
d0f13e3c BH |
596 | return addr; |
597 | ||
598 | } | |
599 | EXPORT_SYMBOL_GPL(slice_get_unmapped_area); | |
600 | ||
601 | unsigned long arch_get_unmapped_area(struct file *filp, | |
602 | unsigned long addr, | |
603 | unsigned long len, | |
604 | unsigned long pgoff, | |
605 | unsigned long flags) | |
606 | { | |
607 | return slice_get_unmapped_area(addr, len, flags, | |
34d07177 | 608 | current->mm->context.user_psize, 0); |
d0f13e3c BH |
609 | } |
610 | ||
611 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, | |
612 | const unsigned long addr0, | |
613 | const unsigned long len, | |
614 | const unsigned long pgoff, | |
615 | const unsigned long flags) | |
616 | { | |
617 | return slice_get_unmapped_area(addr0, len, flags, | |
34d07177 | 618 | current->mm->context.user_psize, 1); |
d0f13e3c BH |
619 | } |
620 | ||
621 | unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) | |
622 | { | |
7aa0727f AK |
623 | unsigned char *hpsizes; |
624 | int index, mask_index; | |
d0f13e3c | 625 | |
764041e0 AK |
626 | /* |
627 | * Radix doesn't use slice, but can get enabled along with MMU_SLICE | |
628 | */ | |
629 | if (radix_enabled()) { | |
630 | #ifdef CONFIG_PPC_64K_PAGES | |
631 | return MMU_PAGE_64K; | |
632 | #else | |
633 | return MMU_PAGE_4K; | |
634 | #endif | |
635 | } | |
d0f13e3c | 636 | if (addr < SLICE_LOW_TOP) { |
7aa0727f AK |
637 | u64 lpsizes; |
638 | lpsizes = mm->context.low_slices_psize; | |
d0f13e3c | 639 | index = GET_LOW_SLICE_INDEX(addr); |
7aa0727f | 640 | return (lpsizes >> (index * 4)) & 0xf; |
d0f13e3c | 641 | } |
7aa0727f AK |
642 | hpsizes = mm->context.high_slices_psize; |
643 | index = GET_HIGH_SLICE_INDEX(addr); | |
644 | mask_index = index & 0x1; | |
645 | return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf; | |
d0f13e3c BH |
646 | } |
647 | EXPORT_SYMBOL_GPL(get_slice_psize); | |
648 | ||
649 | /* | |
650 | * This is called by hash_page when it needs to do a lazy conversion of | |
651 | * an address space from real 64K pages to combo 4K pages (typically | |
652 | * when hitting a non cacheable mapping on a processor or hypervisor | |
653 | * that won't allow them for 64K pages). | |
654 | * | |
655 | * This is also called in init_new_context() to change back the user | |
656 | * psize from whatever the parent context had it set to | |
9dfe5c53 | 657 | * N.B. This may be called before mm->context.id has been set. |
d0f13e3c BH |
658 | * |
659 | * This function will only change the content of the {low,high)_slice_psize | |
660 | * masks, it will not flush SLBs as this shall be handled lazily by the | |
661 | * caller. | |
662 | */ | |
663 | void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) | |
664 | { | |
7aa0727f AK |
665 | int index, mask_index; |
666 | unsigned char *hpsizes; | |
667 | unsigned long flags, lpsizes; | |
d0f13e3c BH |
668 | unsigned int old_psize; |
669 | int i; | |
670 | ||
671 | slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize); | |
672 | ||
764041e0 | 673 | VM_BUG_ON(radix_enabled()); |
d0f13e3c BH |
674 | spin_lock_irqsave(&slice_convert_lock, flags); |
675 | ||
676 | old_psize = mm->context.user_psize; | |
677 | slice_dbg(" old_psize=%d\n", old_psize); | |
678 | if (old_psize == psize) | |
679 | goto bail; | |
680 | ||
681 | mm->context.user_psize = psize; | |
682 | wmb(); | |
683 | ||
684 | lpsizes = mm->context.low_slices_psize; | |
685 | for (i = 0; i < SLICE_NUM_LOW; i++) | |
686 | if (((lpsizes >> (i * 4)) & 0xf) == old_psize) | |
687 | lpsizes = (lpsizes & ~(0xful << (i * 4))) | | |
688 | (((unsigned long)psize) << (i * 4)); | |
7aa0727f AK |
689 | /* Assign the value back */ |
690 | mm->context.low_slices_psize = lpsizes; | |
d0f13e3c BH |
691 | |
692 | hpsizes = mm->context.high_slices_psize; | |
7aa0727f AK |
693 | for (i = 0; i < SLICE_NUM_HIGH; i++) { |
694 | mask_index = i & 0x1; | |
695 | index = i >> 1; | |
696 | if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize) | |
697 | hpsizes[index] = (hpsizes[index] & | |
698 | ~(0xf << (mask_index * 4))) | | |
699 | (((unsigned long)psize) << (mask_index * 4)); | |
700 | } | |
701 | ||
702 | ||
d0f13e3c | 703 | |
d0f13e3c BH |
704 | |
705 | slice_dbg(" lsps=%lx, hsps=%lx\n", | |
302413ca AK |
706 | (unsigned long)mm->context.low_slices_psize, |
707 | (unsigned long)mm->context.high_slices_psize); | |
d0f13e3c BH |
708 | |
709 | bail: | |
710 | spin_unlock_irqrestore(&slice_convert_lock, flags); | |
711 | } | |
712 | ||
3a8247cc PM |
713 | void slice_set_range_psize(struct mm_struct *mm, unsigned long start, |
714 | unsigned long len, unsigned int psize) | |
715 | { | |
a4d36215 | 716 | struct slice_mask mask; |
3a8247cc | 717 | |
764041e0 | 718 | VM_BUG_ON(radix_enabled()); |
a4d36215 AK |
719 | |
720 | slice_range_to_mask(start, len, &mask); | |
3a8247cc PM |
721 | slice_convert(mm, mask, psize); |
722 | } | |
723 | ||
6643773c | 724 | #ifdef CONFIG_HUGETLB_PAGE |
d0f13e3c | 725 | /* |
48fc7f7e | 726 | * is_hugepage_only_range() is used by generic code to verify whether |
d0f13e3c BH |
727 | * a normal mmap mapping (non hugetlbfs) is valid on a given area. |
728 | * | |
729 | * until the generic code provides a more generic hook and/or starts | |
730 | * calling arch get_unmapped_area for MAP_FIXED (which our implementation | |
731 | * here knows how to deal with), we hijack it to keep standard mappings | |
732 | * away from us. | |
733 | * | |
734 | * because of that generic code limitation, MAP_FIXED mapping cannot | |
735 | * "convert" back a slice with no VMAs to the standard page size, only | |
736 | * get_unmapped_area() can. It would be possible to fix it here but I | |
737 | * prefer working on fixing the generic code instead. | |
738 | * | |
739 | * WARNING: This will not work if hugetlbfs isn't enabled since the | |
740 | * generic code will redefine that function as 0 in that. This is ok | |
741 | * for now as we only use slices with hugetlbfs enabled. This should | |
742 | * be fixed as the generic code gets fixed. | |
743 | */ | |
744 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, | |
745 | unsigned long len) | |
746 | { | |
747 | struct slice_mask mask, available; | |
9ba0fdbf | 748 | unsigned int psize = mm->context.user_psize; |
d0f13e3c | 749 | |
764041e0 AK |
750 | if (radix_enabled()) |
751 | return 0; | |
752 | ||
a4d36215 AK |
753 | slice_range_to_mask(addr, len, &mask); |
754 | slice_mask_for_size(mm, psize, &available); | |
9ba0fdbf DK |
755 | #ifdef CONFIG_PPC_64K_PAGES |
756 | /* We need to account for 4k slices too */ | |
757 | if (psize == MMU_PAGE_64K) { | |
758 | struct slice_mask compat_mask; | |
a4d36215 | 759 | slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask); |
f3207c12 | 760 | slice_or_mask(&available, &compat_mask); |
9ba0fdbf DK |
761 | } |
762 | #endif | |
d0f13e3c BH |
763 | |
764 | #if 0 /* too verbose */ | |
765 | slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", | |
766 | mm, addr, len); | |
767 | slice_print_mask(" mask", mask); | |
768 | slice_print_mask(" available", available); | |
769 | #endif | |
957b778a | 770 | return !slice_check_fit(mm, mask, available); |
d0f13e3c | 771 | } |
6643773c | 772 | #endif |