]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/mm/slice.c
19d8788820e12236ee77d26c2c4f71ad9d67c270
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / mm / slice.c
1 /*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25 #undef DEBUG
26
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
34 #include <asm/mman.h>
35 #include <asm/mmu.h>
36 #include <asm/copro.h>
37 #include <asm/hugetlb.h>
38
39 static DEFINE_SPINLOCK(slice_convert_lock);
40 /*
41 * One bit per slice. We have lower slices which cover 256MB segments
42 * upto 4G range. That gets us 16 low slices. For the rest we track slices
43 * in 1TB size.
44 */
45 struct slice_mask {
46 u64 low_slices;
47 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
48 };
49
50 #ifdef DEBUG
51 int _slice_debug = 1;
52
53 static void slice_print_mask(const char *label, struct slice_mask mask)
54 {
55 if (!_slice_debug)
56 return;
57 pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices);
58 pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices);
59 }
60
61 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
62
63 #else
64
65 static void slice_print_mask(const char *label, struct slice_mask mask) {}
66 #define slice_dbg(fmt...)
67
68 #endif
69
70 static void slice_range_to_mask(unsigned long start, unsigned long len,
71 struct slice_mask *ret)
72 {
73 unsigned long end = start + len - 1;
74
75 ret->low_slices = 0;
76 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
77
78 if (start < SLICE_LOW_TOP) {
79 unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
80
81 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
82 - (1u << GET_LOW_SLICE_INDEX(start));
83 }
84
85 if ((start + len) > SLICE_LOW_TOP) {
86 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
87 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
88 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
89
90 bitmap_set(ret->high_slices, start_index, count);
91 }
92 }
93
94 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
95 unsigned long len)
96 {
97 struct vm_area_struct *vma;
98
99 if ((mm->task_size - len) < addr)
100 return 0;
101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vma->vm_start);
103 }
104
105 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
106 {
107 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
108 1ul << SLICE_LOW_SHIFT);
109 }
110
111 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
112 {
113 unsigned long start = slice << SLICE_HIGH_SHIFT;
114 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
115
116 /* Hack, so that each addresses is controlled by exactly one
117 * of the high or low area bitmaps, the first high area starts
118 * at 4GB, not 0 */
119 if (start == 0)
120 start = SLICE_LOW_TOP;
121
122 return !slice_area_is_free(mm, start, end - start);
123 }
124
125 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
126 {
127 unsigned long i;
128
129 ret->low_slices = 0;
130 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
131
132 for (i = 0; i < SLICE_NUM_LOW; i++)
133 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i;
135
136 if (mm->task_size <= SLICE_LOW_TOP)
137 return;
138
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
140 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices);
142 }
143
144 static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
145 {
146 unsigned char *hpsizes;
147 int index, mask_index;
148 unsigned long i;
149 u64 lpsizes;
150
151 ret->low_slices = 0;
152 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
153
154 lpsizes = mm->context.low_slices_psize;
155 for (i = 0; i < SLICE_NUM_LOW; i++)
156 if (((lpsizes >> (i * 4)) & 0xf) == psize)
157 ret->low_slices |= 1u << i;
158
159 hpsizes = mm->context.high_slices_psize;
160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
161 mask_index = i & 0x1;
162 index = i >> 1;
163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
164 __set_bit(i, ret->high_slices);
165 }
166 }
167
168 static int slice_check_fit(struct mm_struct *mm,
169 struct slice_mask mask, struct slice_mask available)
170 {
171 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
173
174 bitmap_and(result, mask.high_slices,
175 available.high_slices, slice_count);
176
177 return (mask.low_slices & available.low_slices) == mask.low_slices &&
178 bitmap_equal(result, mask.high_slices, slice_count);
179 }
180
181 static void slice_flush_segments(void *parm)
182 {
183 struct mm_struct *mm = parm;
184 unsigned long flags;
185
186 if (mm != current->active_mm)
187 return;
188
189 copy_mm_to_paca(current->active_mm);
190
191 local_irq_save(flags);
192 slb_flush_and_rebolt();
193 local_irq_restore(flags);
194 }
195
196 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
197 {
198 int index, mask_index;
199 /* Write the new slice psize bits */
200 unsigned char *hpsizes;
201 u64 lpsizes;
202 unsigned long i, flags;
203
204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
205 slice_print_mask(" mask", mask);
206
207 /* We need to use a spinlock here to protect against
208 * concurrent 64k -> 4k demotion ...
209 */
210 spin_lock_irqsave(&slice_convert_lock, flags);
211
212 lpsizes = mm->context.low_slices_psize;
213 for (i = 0; i < SLICE_NUM_LOW; i++)
214 if (mask.low_slices & (1u << i))
215 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
216 (((unsigned long)psize) << (i * 4));
217
218 /* Assign the value back */
219 mm->context.low_slices_psize = lpsizes;
220
221 hpsizes = mm->context.high_slices_psize;
222 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
223 mask_index = i & 0x1;
224 index = i >> 1;
225 if (test_bit(i, mask.high_slices))
226 hpsizes[index] = (hpsizes[index] &
227 ~(0xf << (mask_index * 4))) |
228 (((unsigned long)psize) << (mask_index * 4));
229 }
230
231 slice_dbg(" lsps=%lx, hsps=%lx\n",
232 (unsigned long)mm->context.low_slices_psize,
233 (unsigned long)mm->context.high_slices_psize);
234
235 spin_unlock_irqrestore(&slice_convert_lock, flags);
236
237 copro_flush_all_slbs(mm);
238 }
239
240 /*
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
246 */
247 static bool slice_scan_available(unsigned long addr,
248 struct slice_mask available,
249 int end,
250 unsigned long *boundary_addr)
251 {
252 unsigned long slice;
253 if (addr < SLICE_LOW_TOP) {
254 slice = GET_LOW_SLICE_INDEX(addr);
255 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
256 return !!(available.low_slices & (1u << slice));
257 } else {
258 slice = GET_HIGH_SLICE_INDEX(addr);
259 *boundary_addr = (slice + end) ?
260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261 return !!test_bit(slice, available.high_slices);
262 }
263 }
264
265 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
266 unsigned long len,
267 struct slice_mask available,
268 int psize)
269 {
270 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
271 unsigned long addr, found, next_end;
272 struct vm_unmapped_area_info info;
273
274 info.flags = 0;
275 info.length = len;
276 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
277 info.align_offset = 0;
278
279 addr = TASK_UNMAPPED_BASE;
280 while (addr < mm->context.addr_limit) {
281 info.low_limit = addr;
282 if (!slice_scan_available(addr, available, 1, &addr))
283 continue;
284
285 next_slice:
286 /*
287 * At this point [info.low_limit; addr) covers
288 * available slices only and ends at a slice boundary.
289 * Check if we need to reduce the range, or if we can
290 * extend it to cover the next available slice.
291 */
292 if (addr >= mm->context.addr_limit)
293 addr = mm->context.addr_limit;
294 else if (slice_scan_available(addr, available, 1, &next_end)) {
295 addr = next_end;
296 goto next_slice;
297 }
298 info.high_limit = addr;
299
300 found = vm_unmapped_area(&info);
301 if (!(found & ~PAGE_MASK))
302 return found;
303 }
304
305 return -ENOMEM;
306 }
307
308 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
309 unsigned long len,
310 struct slice_mask available,
311 int psize)
312 {
313 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
314 unsigned long addr, found, prev;
315 struct vm_unmapped_area_info info;
316
317 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
318 info.length = len;
319 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
320 info.align_offset = 0;
321
322 addr = mm->mmap_base;
323 while (addr > PAGE_SIZE) {
324 info.high_limit = addr;
325 if (!slice_scan_available(addr - 1, available, 0, &addr))
326 continue;
327
328 prev_slice:
329 /*
330 * At this point [addr; info.high_limit) covers
331 * available slices only and starts at a slice boundary.
332 * Check if we need to reduce the range, or if we can
333 * extend it to cover the previous available slice.
334 */
335 if (addr < PAGE_SIZE)
336 addr = PAGE_SIZE;
337 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
338 addr = prev;
339 goto prev_slice;
340 }
341 info.low_limit = addr;
342
343 found = vm_unmapped_area(&info);
344 if (!(found & ~PAGE_MASK))
345 return found;
346 }
347
348 /*
349 * A failed mmap() very likely causes application failure,
350 * so fall back to the bottom-up function here. This scenario
351 * can happen with large stack limits and large mmap()
352 * allocations.
353 */
354 return slice_find_area_bottomup(mm, len, available, psize);
355 }
356
357
358 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
359 struct slice_mask mask, int psize,
360 int topdown)
361 {
362 if (topdown)
363 return slice_find_area_topdown(mm, len, mask, psize);
364 else
365 return slice_find_area_bottomup(mm, len, mask, psize);
366 }
367
368 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
369 {
370 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
371
372 dst->low_slices |= src->low_slices;
373 bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
374 bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
375 }
376
377 static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
378 {
379 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
380
381 dst->low_slices &= ~src->low_slices;
382
383 bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
384 bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
385 }
386
387 #ifdef CONFIG_PPC_64K_PAGES
388 #define MMU_PAGE_BASE MMU_PAGE_64K
389 #else
390 #define MMU_PAGE_BASE MMU_PAGE_4K
391 #endif
392
393 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
394 unsigned long flags, unsigned int psize,
395 int topdown)
396 {
397 struct slice_mask mask;
398 struct slice_mask good_mask;
399 struct slice_mask potential_mask;
400 struct slice_mask compat_mask;
401 int fixed = (flags & MAP_FIXED);
402 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
403 struct mm_struct *mm = current->mm;
404 unsigned long newaddr;
405
406 /*
407 * init different masks
408 */
409 mask.low_slices = 0;
410 bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
411
412 /* silence stupid warning */;
413 potential_mask.low_slices = 0;
414 bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
415
416 compat_mask.low_slices = 0;
417 bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
418
419 /* Sanity checks */
420 BUG_ON(mm->task_size == 0);
421 VM_BUG_ON(radix_enabled());
422
423 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
424 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
425 addr, len, flags, topdown);
426
427 if (len > mm->task_size)
428 return -ENOMEM;
429 if (len & ((1ul << pshift) - 1))
430 return -EINVAL;
431 if (fixed && (addr & ((1ul << pshift) - 1)))
432 return -EINVAL;
433 if (fixed && addr > (mm->task_size - len))
434 return -ENOMEM;
435
436 /* If hint, make sure it matches our alignment restrictions */
437 if (!fixed && addr) {
438 addr = _ALIGN_UP(addr, 1ul << pshift);
439 slice_dbg(" aligned addr=%lx\n", addr);
440 /* Ignore hint if it's too large or overlaps a VMA */
441 if (addr > mm->task_size - len ||
442 !slice_area_is_free(mm, addr, len))
443 addr = 0;
444 }
445
446 /* First make up a "good" mask of slices that have the right size
447 * already
448 */
449 slice_mask_for_size(mm, psize, &good_mask);
450 slice_print_mask(" good_mask", good_mask);
451
452 /*
453 * Here "good" means slices that are already the right page size,
454 * "compat" means slices that have a compatible page size (i.e.
455 * 4k in a 64k pagesize kernel), and "free" means slices without
456 * any VMAs.
457 *
458 * If MAP_FIXED:
459 * check if fits in good | compat => OK
460 * check if fits in good | compat | free => convert free
461 * else bad
462 * If have hint:
463 * check if hint fits in good => OK
464 * check if hint fits in good | free => convert free
465 * Otherwise:
466 * search in good, found => OK
467 * search in good | free, found => convert free
468 * search in good | compat | free, found => convert free.
469 */
470
471 #ifdef CONFIG_PPC_64K_PAGES
472 /* If we support combo pages, we can allow 64k pages in 4k slices */
473 if (psize == MMU_PAGE_64K) {
474 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
475 if (fixed)
476 slice_or_mask(&good_mask, &compat_mask);
477 }
478 #endif
479
480 /* First check hint if it's valid or if we have MAP_FIXED */
481 if (addr != 0 || fixed) {
482 /* Build a mask for the requested range */
483 slice_range_to_mask(addr, len, &mask);
484 slice_print_mask(" mask", mask);
485
486 /* Check if we fit in the good mask. If we do, we just return,
487 * nothing else to do
488 */
489 if (slice_check_fit(mm, mask, good_mask)) {
490 slice_dbg(" fits good !\n");
491 return addr;
492 }
493 } else {
494 /* Now let's see if we can find something in the existing
495 * slices for that size
496 */
497 newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
498 if (newaddr != -ENOMEM) {
499 /* Found within the good mask, we don't have to setup,
500 * we thus return directly
501 */
502 slice_dbg(" found area at 0x%lx\n", newaddr);
503 return newaddr;
504 }
505 }
506
507 /* We don't fit in the good mask, check what other slices are
508 * empty and thus can be converted
509 */
510 slice_mask_for_free(mm, &potential_mask);
511 slice_or_mask(&potential_mask, &good_mask);
512 slice_print_mask(" potential", potential_mask);
513
514 if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
515 slice_dbg(" fits potential !\n");
516 goto convert;
517 }
518
519 /* If we have MAP_FIXED and failed the above steps, then error out */
520 if (fixed)
521 return -EBUSY;
522
523 slice_dbg(" search...\n");
524
525 /* If we had a hint that didn't work out, see if we can fit
526 * anywhere in the good area.
527 */
528 if (addr) {
529 addr = slice_find_area(mm, len, good_mask, psize, topdown);
530 if (addr != -ENOMEM) {
531 slice_dbg(" found area at 0x%lx\n", addr);
532 return addr;
533 }
534 }
535
536 /* Now let's see if we can find something in the existing slices
537 * for that size plus free slices
538 */
539 addr = slice_find_area(mm, len, potential_mask, psize, topdown);
540
541 #ifdef CONFIG_PPC_64K_PAGES
542 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
543 /* retry the search with 4k-page slices included */
544 slice_or_mask(&potential_mask, &compat_mask);
545 addr = slice_find_area(mm, len, potential_mask, psize,
546 topdown);
547 }
548 #endif
549
550 if (addr == -ENOMEM)
551 return -ENOMEM;
552
553 slice_range_to_mask(addr, len, &mask);
554 slice_dbg(" found potential area at 0x%lx\n", addr);
555 slice_print_mask(" mask", mask);
556
557 convert:
558 slice_andnot_mask(&mask, &good_mask);
559 slice_andnot_mask(&mask, &compat_mask);
560 if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
561 slice_convert(mm, mask, psize);
562 if (psize > MMU_PAGE_BASE)
563 on_each_cpu(slice_flush_segments, mm, 1);
564 }
565 return addr;
566
567 }
568 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
569
570 unsigned long arch_get_unmapped_area(struct file *filp,
571 unsigned long addr,
572 unsigned long len,
573 unsigned long pgoff,
574 unsigned long flags)
575 {
576 return slice_get_unmapped_area(addr, len, flags,
577 current->mm->context.user_psize, 0);
578 }
579
580 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
581 const unsigned long addr0,
582 const unsigned long len,
583 const unsigned long pgoff,
584 const unsigned long flags)
585 {
586 return slice_get_unmapped_area(addr0, len, flags,
587 current->mm->context.user_psize, 1);
588 }
589
590 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
591 {
592 unsigned char *hpsizes;
593 int index, mask_index;
594
595 /*
596 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
597 */
598 if (radix_enabled()) {
599 #ifdef CONFIG_PPC_64K_PAGES
600 return MMU_PAGE_64K;
601 #else
602 return MMU_PAGE_4K;
603 #endif
604 }
605 if (addr < SLICE_LOW_TOP) {
606 u64 lpsizes;
607 lpsizes = mm->context.low_slices_psize;
608 index = GET_LOW_SLICE_INDEX(addr);
609 return (lpsizes >> (index * 4)) & 0xf;
610 }
611 hpsizes = mm->context.high_slices_psize;
612 index = GET_HIGH_SLICE_INDEX(addr);
613 mask_index = index & 0x1;
614 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
615 }
616 EXPORT_SYMBOL_GPL(get_slice_psize);
617
618 /*
619 * This is called by hash_page when it needs to do a lazy conversion of
620 * an address space from real 64K pages to combo 4K pages (typically
621 * when hitting a non cacheable mapping on a processor or hypervisor
622 * that won't allow them for 64K pages).
623 *
624 * This is also called in init_new_context() to change back the user
625 * psize from whatever the parent context had it set to
626 * N.B. This may be called before mm->context.id has been set.
627 *
628 * This function will only change the content of the {low,high)_slice_psize
629 * masks, it will not flush SLBs as this shall be handled lazily by the
630 * caller.
631 */
632 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
633 {
634 int index, mask_index;
635 unsigned char *hpsizes;
636 unsigned long flags, lpsizes;
637 unsigned int old_psize;
638 int i;
639
640 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
641
642 VM_BUG_ON(radix_enabled());
643 spin_lock_irqsave(&slice_convert_lock, flags);
644
645 old_psize = mm->context.user_psize;
646 slice_dbg(" old_psize=%d\n", old_psize);
647 if (old_psize == psize)
648 goto bail;
649
650 mm->context.user_psize = psize;
651 wmb();
652
653 lpsizes = mm->context.low_slices_psize;
654 for (i = 0; i < SLICE_NUM_LOW; i++)
655 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
656 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
657 (((unsigned long)psize) << (i * 4));
658 /* Assign the value back */
659 mm->context.low_slices_psize = lpsizes;
660
661 hpsizes = mm->context.high_slices_psize;
662 for (i = 0; i < SLICE_NUM_HIGH; i++) {
663 mask_index = i & 0x1;
664 index = i >> 1;
665 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
666 hpsizes[index] = (hpsizes[index] &
667 ~(0xf << (mask_index * 4))) |
668 (((unsigned long)psize) << (mask_index * 4));
669 }
670
671
672
673
674 slice_dbg(" lsps=%lx, hsps=%lx\n",
675 (unsigned long)mm->context.low_slices_psize,
676 (unsigned long)mm->context.high_slices_psize);
677
678 bail:
679 spin_unlock_irqrestore(&slice_convert_lock, flags);
680 }
681
682 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
683 unsigned long len, unsigned int psize)
684 {
685 struct slice_mask mask;
686
687 VM_BUG_ON(radix_enabled());
688
689 slice_range_to_mask(start, len, &mask);
690 slice_convert(mm, mask, psize);
691 }
692
693 #ifdef CONFIG_HUGETLB_PAGE
694 /*
695 * is_hugepage_only_range() is used by generic code to verify whether
696 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
697 *
698 * until the generic code provides a more generic hook and/or starts
699 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
700 * here knows how to deal with), we hijack it to keep standard mappings
701 * away from us.
702 *
703 * because of that generic code limitation, MAP_FIXED mapping cannot
704 * "convert" back a slice with no VMAs to the standard page size, only
705 * get_unmapped_area() can. It would be possible to fix it here but I
706 * prefer working on fixing the generic code instead.
707 *
708 * WARNING: This will not work if hugetlbfs isn't enabled since the
709 * generic code will redefine that function as 0 in that. This is ok
710 * for now as we only use slices with hugetlbfs enabled. This should
711 * be fixed as the generic code gets fixed.
712 */
713 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
714 unsigned long len)
715 {
716 struct slice_mask mask, available;
717 unsigned int psize = mm->context.user_psize;
718
719 if (radix_enabled())
720 return 0;
721
722 slice_range_to_mask(addr, len, &mask);
723 slice_mask_for_size(mm, psize, &available);
724 #ifdef CONFIG_PPC_64K_PAGES
725 /* We need to account for 4k slices too */
726 if (psize == MMU_PAGE_64K) {
727 struct slice_mask compat_mask;
728 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
729 slice_or_mask(&available, &compat_mask);
730 }
731 #endif
732
733 #if 0 /* too verbose */
734 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
735 mm, addr, len);
736 slice_print_mask(" mask", mask);
737 slice_print_mask(" available", available);
738 #endif
739 return !slice_check_fit(mm, mask, available);
740 }
741 #endif