]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/mm/slice.c
mmc: core: prepend 0x to OCR entry in sysfs
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / mm / slice.c
1 /*
2 * address space "slices" (meta-segments) support
3 *
4 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
5 *
6 * Based on hugetlb implementation
7 *
8 * Copyright (C) 2003 David Gibson, IBM Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24
25 #undef DEBUG
26
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/export.h>
33 #include <linux/hugetlb.h>
34 #include <asm/mman.h>
35 #include <asm/mmu.h>
36 #include <asm/copro.h>
37 #include <asm/hugetlb.h>
38
39 static DEFINE_SPINLOCK(slice_convert_lock);
40 /*
41 * One bit per slice. We have lower slices which cover 256MB segments
42 * upto 4G range. That gets us 16 low slices. For the rest we track slices
43 * in 1TB size.
44 */
45 struct slice_mask {
46 u64 low_slices;
47 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
48 };
49
50 #ifdef DEBUG
51 int _slice_debug = 1;
52
53 static void slice_print_mask(const char *label, struct slice_mask mask)
54 {
55 if (!_slice_debug)
56 return;
57 pr_devel("%s low_slice: %*pbl\n", label, (int)SLICE_NUM_LOW, &mask.low_slices);
58 pr_devel("%s high_slice: %*pbl\n", label, (int)SLICE_NUM_HIGH, mask.high_slices);
59 }
60
61 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
62
63 #else
64
65 static void slice_print_mask(const char *label, struct slice_mask mask) {}
66 #define slice_dbg(fmt...)
67
68 #endif
69
70 static void slice_range_to_mask(unsigned long start, unsigned long len,
71 struct slice_mask *ret)
72 {
73 unsigned long end = start + len - 1;
74
75 ret->low_slices = 0;
76 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
77
78 if (start < SLICE_LOW_TOP) {
79 unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
80
81 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
82 - (1u << GET_LOW_SLICE_INDEX(start));
83 }
84
85 if ((start + len) > SLICE_LOW_TOP) {
86 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
87 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
88 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
89
90 bitmap_set(ret->high_slices, start_index, count);
91 }
92 }
93
94 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
95 unsigned long len)
96 {
97 struct vm_area_struct *vma;
98
99 if ((mm->context.slb_addr_limit - len) < addr)
100 return 0;
101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vm_start_gap(vma));
103 }
104
105 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
106 {
107 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
108 1ul << SLICE_LOW_SHIFT);
109 }
110
111 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
112 {
113 unsigned long start = slice << SLICE_HIGH_SHIFT;
114 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
115
116 /* Hack, so that each addresses is controlled by exactly one
117 * of the high or low area bitmaps, the first high area starts
118 * at 4GB, not 0 */
119 if (start == 0)
120 start = SLICE_LOW_TOP;
121
122 return !slice_area_is_free(mm, start, end - start);
123 }
124
125 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
126 {
127 unsigned long i;
128
129 ret->low_slices = 0;
130 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
131
132 for (i = 0; i < SLICE_NUM_LOW; i++)
133 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i;
135
136 if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
137 return;
138
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
140 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices);
142 }
143
144 static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
145 {
146 unsigned char *hpsizes;
147 int index, mask_index;
148 unsigned long i;
149 u64 lpsizes;
150
151 ret->low_slices = 0;
152 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
153
154 lpsizes = mm->context.low_slices_psize;
155 for (i = 0; i < SLICE_NUM_LOW; i++)
156 if (((lpsizes >> (i * 4)) & 0xf) == psize)
157 ret->low_slices |= 1u << i;
158
159 hpsizes = mm->context.high_slices_psize;
160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
161 mask_index = i & 0x1;
162 index = i >> 1;
163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
164 __set_bit(i, ret->high_slices);
165 }
166 }
167
168 static int slice_check_fit(struct mm_struct *mm,
169 struct slice_mask mask, struct slice_mask available)
170 {
171 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
173
174 bitmap_and(result, mask.high_slices,
175 available.high_slices, slice_count);
176
177 return (mask.low_slices & available.low_slices) == mask.low_slices &&
178 bitmap_equal(result, mask.high_slices, slice_count);
179 }
180
181 static void slice_flush_segments(void *parm)
182 {
183 struct mm_struct *mm = parm;
184 unsigned long flags;
185
186 if (mm != current->active_mm)
187 return;
188
189 copy_mm_to_paca(current->active_mm);
190
191 local_irq_save(flags);
192 slb_flush_and_rebolt();
193 local_irq_restore(flags);
194 }
195
196 static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
197 {
198 int index, mask_index;
199 /* Write the new slice psize bits */
200 unsigned char *hpsizes;
201 u64 lpsizes;
202 unsigned long i, flags;
203
204 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
205 slice_print_mask(" mask", mask);
206
207 /* We need to use a spinlock here to protect against
208 * concurrent 64k -> 4k demotion ...
209 */
210 spin_lock_irqsave(&slice_convert_lock, flags);
211
212 lpsizes = mm->context.low_slices_psize;
213 for (i = 0; i < SLICE_NUM_LOW; i++)
214 if (mask.low_slices & (1u << i))
215 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
216 (((unsigned long)psize) << (i * 4));
217
218 /* Assign the value back */
219 mm->context.low_slices_psize = lpsizes;
220
221 hpsizes = mm->context.high_slices_psize;
222 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
223 mask_index = i & 0x1;
224 index = i >> 1;
225 if (test_bit(i, mask.high_slices))
226 hpsizes[index] = (hpsizes[index] &
227 ~(0xf << (mask_index * 4))) |
228 (((unsigned long)psize) << (mask_index * 4));
229 }
230
231 slice_dbg(" lsps=%lx, hsps=%lx\n",
232 (unsigned long)mm->context.low_slices_psize,
233 (unsigned long)mm->context.high_slices_psize);
234
235 spin_unlock_irqrestore(&slice_convert_lock, flags);
236
237 copro_flush_all_slbs(mm);
238 }
239
240 /*
241 * Compute which slice addr is part of;
242 * set *boundary_addr to the start or end boundary of that slice
243 * (depending on 'end' parameter);
244 * return boolean indicating if the slice is marked as available in the
245 * 'available' slice_mark.
246 */
247 static bool slice_scan_available(unsigned long addr,
248 struct slice_mask available,
249 int end,
250 unsigned long *boundary_addr)
251 {
252 unsigned long slice;
253 if (addr < SLICE_LOW_TOP) {
254 slice = GET_LOW_SLICE_INDEX(addr);
255 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
256 return !!(available.low_slices & (1u << slice));
257 } else {
258 slice = GET_HIGH_SLICE_INDEX(addr);
259 *boundary_addr = (slice + end) ?
260 ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
261 return !!test_bit(slice, available.high_slices);
262 }
263 }
264
265 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
266 unsigned long len,
267 struct slice_mask available,
268 int psize, unsigned long high_limit)
269 {
270 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
271 unsigned long addr, found, next_end;
272 struct vm_unmapped_area_info info;
273
274 info.flags = 0;
275 info.length = len;
276 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
277 info.align_offset = 0;
278
279 addr = TASK_UNMAPPED_BASE;
280 /*
281 * Check till the allow max value for this mmap request
282 */
283 while (addr < high_limit) {
284 info.low_limit = addr;
285 if (!slice_scan_available(addr, available, 1, &addr))
286 continue;
287
288 next_slice:
289 /*
290 * At this point [info.low_limit; addr) covers
291 * available slices only and ends at a slice boundary.
292 * Check if we need to reduce the range, or if we can
293 * extend it to cover the next available slice.
294 */
295 if (addr >= high_limit)
296 addr = high_limit;
297 else if (slice_scan_available(addr, available, 1, &next_end)) {
298 addr = next_end;
299 goto next_slice;
300 }
301 info.high_limit = addr;
302
303 found = vm_unmapped_area(&info);
304 if (!(found & ~PAGE_MASK))
305 return found;
306 }
307
308 return -ENOMEM;
309 }
310
311 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
312 unsigned long len,
313 struct slice_mask available,
314 int psize, unsigned long high_limit)
315 {
316 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
317 unsigned long addr, found, prev;
318 struct vm_unmapped_area_info info;
319
320 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
321 info.length = len;
322 info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
323 info.align_offset = 0;
324
325 addr = mm->mmap_base;
326 /*
327 * If we are trying to allocate above DEFAULT_MAP_WINDOW
328 * Add the different to the mmap_base.
329 * Only for that request for which high_limit is above
330 * DEFAULT_MAP_WINDOW we should apply this.
331 */
332 if (high_limit > DEFAULT_MAP_WINDOW)
333 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
334
335 while (addr > PAGE_SIZE) {
336 info.high_limit = addr;
337 if (!slice_scan_available(addr - 1, available, 0, &addr))
338 continue;
339
340 prev_slice:
341 /*
342 * At this point [addr; info.high_limit) covers
343 * available slices only and starts at a slice boundary.
344 * Check if we need to reduce the range, or if we can
345 * extend it to cover the previous available slice.
346 */
347 if (addr < PAGE_SIZE)
348 addr = PAGE_SIZE;
349 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
350 addr = prev;
351 goto prev_slice;
352 }
353 info.low_limit = addr;
354
355 found = vm_unmapped_area(&info);
356 if (!(found & ~PAGE_MASK))
357 return found;
358 }
359
360 /*
361 * A failed mmap() very likely causes application failure,
362 * so fall back to the bottom-up function here. This scenario
363 * can happen with large stack limits and large mmap()
364 * allocations.
365 */
366 return slice_find_area_bottomup(mm, len, available, psize, high_limit);
367 }
368
369
370 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
371 struct slice_mask mask, int psize,
372 int topdown, unsigned long high_limit)
373 {
374 if (topdown)
375 return slice_find_area_topdown(mm, len, mask, psize, high_limit);
376 else
377 return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
378 }
379
380 static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
381 {
382 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
383
384 dst->low_slices |= src->low_slices;
385 bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
386 bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
387 }
388
389 static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
390 {
391 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
392
393 dst->low_slices &= ~src->low_slices;
394
395 bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
396 bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
397 }
398
399 #ifdef CONFIG_PPC_64K_PAGES
400 #define MMU_PAGE_BASE MMU_PAGE_64K
401 #else
402 #define MMU_PAGE_BASE MMU_PAGE_4K
403 #endif
404
405 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
406 unsigned long flags, unsigned int psize,
407 int topdown)
408 {
409 struct slice_mask mask;
410 struct slice_mask good_mask;
411 struct slice_mask potential_mask;
412 struct slice_mask compat_mask;
413 int fixed = (flags & MAP_FIXED);
414 int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
415 unsigned long page_size = 1UL << pshift;
416 struct mm_struct *mm = current->mm;
417 unsigned long newaddr;
418 unsigned long high_limit;
419
420 high_limit = DEFAULT_MAP_WINDOW;
421 if (addr >= high_limit || (fixed && (addr + len > high_limit)))
422 high_limit = TASK_SIZE;
423
424 if (len > high_limit)
425 return -ENOMEM;
426 if (len & (page_size - 1))
427 return -EINVAL;
428 if (fixed) {
429 if (addr & (page_size - 1))
430 return -EINVAL;
431 if (addr > high_limit - len)
432 return -ENOMEM;
433 }
434
435 if (high_limit > mm->context.slb_addr_limit) {
436 mm->context.slb_addr_limit = high_limit;
437 on_each_cpu(slice_flush_segments, mm, 1);
438 }
439
440 /*
441 * init different masks
442 */
443 mask.low_slices = 0;
444 bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);
445
446 /* silence stupid warning */;
447 potential_mask.low_slices = 0;
448 bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);
449
450 compat_mask.low_slices = 0;
451 bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);
452
453 /* Sanity checks */
454 BUG_ON(mm->task_size == 0);
455 BUG_ON(mm->context.slb_addr_limit == 0);
456 VM_BUG_ON(radix_enabled());
457
458 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
459 slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
460 addr, len, flags, topdown);
461
462 /* If hint, make sure it matches our alignment restrictions */
463 if (!fixed && addr) {
464 addr = _ALIGN_UP(addr, page_size);
465 slice_dbg(" aligned addr=%lx\n", addr);
466 /* Ignore hint if it's too large or overlaps a VMA */
467 if (addr > high_limit - len ||
468 !slice_area_is_free(mm, addr, len))
469 addr = 0;
470 }
471
472 /* First make up a "good" mask of slices that have the right size
473 * already
474 */
475 slice_mask_for_size(mm, psize, &good_mask);
476 slice_print_mask(" good_mask", good_mask);
477
478 /*
479 * Here "good" means slices that are already the right page size,
480 * "compat" means slices that have a compatible page size (i.e.
481 * 4k in a 64k pagesize kernel), and "free" means slices without
482 * any VMAs.
483 *
484 * If MAP_FIXED:
485 * check if fits in good | compat => OK
486 * check if fits in good | compat | free => convert free
487 * else bad
488 * If have hint:
489 * check if hint fits in good => OK
490 * check if hint fits in good | free => convert free
491 * Otherwise:
492 * search in good, found => OK
493 * search in good | free, found => convert free
494 * search in good | compat | free, found => convert free.
495 */
496
497 #ifdef CONFIG_PPC_64K_PAGES
498 /* If we support combo pages, we can allow 64k pages in 4k slices */
499 if (psize == MMU_PAGE_64K) {
500 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
501 if (fixed)
502 slice_or_mask(&good_mask, &compat_mask);
503 }
504 #endif
505
506 /* First check hint if it's valid or if we have MAP_FIXED */
507 if (addr != 0 || fixed) {
508 /* Build a mask for the requested range */
509 slice_range_to_mask(addr, len, &mask);
510 slice_print_mask(" mask", mask);
511
512 /* Check if we fit in the good mask. If we do, we just return,
513 * nothing else to do
514 */
515 if (slice_check_fit(mm, mask, good_mask)) {
516 slice_dbg(" fits good !\n");
517 return addr;
518 }
519 } else {
520 /* Now let's see if we can find something in the existing
521 * slices for that size
522 */
523 newaddr = slice_find_area(mm, len, good_mask,
524 psize, topdown, high_limit);
525 if (newaddr != -ENOMEM) {
526 /* Found within the good mask, we don't have to setup,
527 * we thus return directly
528 */
529 slice_dbg(" found area at 0x%lx\n", newaddr);
530 return newaddr;
531 }
532 }
533
534 /* We don't fit in the good mask, check what other slices are
535 * empty and thus can be converted
536 */
537 slice_mask_for_free(mm, &potential_mask);
538 slice_or_mask(&potential_mask, &good_mask);
539 slice_print_mask(" potential", potential_mask);
540
541 if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
542 slice_dbg(" fits potential !\n");
543 goto convert;
544 }
545
546 /* If we have MAP_FIXED and failed the above steps, then error out */
547 if (fixed)
548 return -EBUSY;
549
550 slice_dbg(" search...\n");
551
552 /* If we had a hint that didn't work out, see if we can fit
553 * anywhere in the good area.
554 */
555 if (addr) {
556 addr = slice_find_area(mm, len, good_mask,
557 psize, topdown, high_limit);
558 if (addr != -ENOMEM) {
559 slice_dbg(" found area at 0x%lx\n", addr);
560 return addr;
561 }
562 }
563
564 /* Now let's see if we can find something in the existing slices
565 * for that size plus free slices
566 */
567 addr = slice_find_area(mm, len, potential_mask,
568 psize, topdown, high_limit);
569
570 #ifdef CONFIG_PPC_64K_PAGES
571 if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
572 /* retry the search with 4k-page slices included */
573 slice_or_mask(&potential_mask, &compat_mask);
574 addr = slice_find_area(mm, len, potential_mask,
575 psize, topdown, high_limit);
576 }
577 #endif
578
579 if (addr == -ENOMEM)
580 return -ENOMEM;
581
582 slice_range_to_mask(addr, len, &mask);
583 slice_dbg(" found potential area at 0x%lx\n", addr);
584 slice_print_mask(" mask", mask);
585
586 convert:
587 slice_andnot_mask(&mask, &good_mask);
588 slice_andnot_mask(&mask, &compat_mask);
589 if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
590 slice_convert(mm, mask, psize);
591 if (psize > MMU_PAGE_BASE)
592 on_each_cpu(slice_flush_segments, mm, 1);
593 }
594 return addr;
595
596 }
597 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
598
599 unsigned long arch_get_unmapped_area(struct file *filp,
600 unsigned long addr,
601 unsigned long len,
602 unsigned long pgoff,
603 unsigned long flags)
604 {
605 return slice_get_unmapped_area(addr, len, flags,
606 current->mm->context.user_psize, 0);
607 }
608
609 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
610 const unsigned long addr0,
611 const unsigned long len,
612 const unsigned long pgoff,
613 const unsigned long flags)
614 {
615 return slice_get_unmapped_area(addr0, len, flags,
616 current->mm->context.user_psize, 1);
617 }
618
619 unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
620 {
621 unsigned char *hpsizes;
622 int index, mask_index;
623
624 /*
625 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
626 */
627 if (radix_enabled()) {
628 #ifdef CONFIG_PPC_64K_PAGES
629 return MMU_PAGE_64K;
630 #else
631 return MMU_PAGE_4K;
632 #endif
633 }
634 if (addr < SLICE_LOW_TOP) {
635 u64 lpsizes;
636 lpsizes = mm->context.low_slices_psize;
637 index = GET_LOW_SLICE_INDEX(addr);
638 return (lpsizes >> (index * 4)) & 0xf;
639 }
640 hpsizes = mm->context.high_slices_psize;
641 index = GET_HIGH_SLICE_INDEX(addr);
642 mask_index = index & 0x1;
643 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
644 }
645 EXPORT_SYMBOL_GPL(get_slice_psize);
646
647 /*
648 * This is called by hash_page when it needs to do a lazy conversion of
649 * an address space from real 64K pages to combo 4K pages (typically
650 * when hitting a non cacheable mapping on a processor or hypervisor
651 * that won't allow them for 64K pages).
652 *
653 * This is also called in init_new_context() to change back the user
654 * psize from whatever the parent context had it set to
655 * N.B. This may be called before mm->context.id has been set.
656 *
657 * This function will only change the content of the {low,high)_slice_psize
658 * masks, it will not flush SLBs as this shall be handled lazily by the
659 * caller.
660 */
661 void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
662 {
663 int index, mask_index;
664 unsigned char *hpsizes;
665 unsigned long flags, lpsizes;
666 unsigned int old_psize;
667 int i;
668
669 slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);
670
671 VM_BUG_ON(radix_enabled());
672 spin_lock_irqsave(&slice_convert_lock, flags);
673
674 old_psize = mm->context.user_psize;
675 slice_dbg(" old_psize=%d\n", old_psize);
676 if (old_psize == psize)
677 goto bail;
678
679 mm->context.user_psize = psize;
680 wmb();
681
682 lpsizes = mm->context.low_slices_psize;
683 for (i = 0; i < SLICE_NUM_LOW; i++)
684 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
685 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
686 (((unsigned long)psize) << (i * 4));
687 /* Assign the value back */
688 mm->context.low_slices_psize = lpsizes;
689
690 hpsizes = mm->context.high_slices_psize;
691 for (i = 0; i < SLICE_NUM_HIGH; i++) {
692 mask_index = i & 0x1;
693 index = i >> 1;
694 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
695 hpsizes[index] = (hpsizes[index] &
696 ~(0xf << (mask_index * 4))) |
697 (((unsigned long)psize) << (mask_index * 4));
698 }
699
700
701
702
703 slice_dbg(" lsps=%lx, hsps=%lx\n",
704 (unsigned long)mm->context.low_slices_psize,
705 (unsigned long)mm->context.high_slices_psize);
706
707 bail:
708 spin_unlock_irqrestore(&slice_convert_lock, flags);
709 }
710
711 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
712 unsigned long len, unsigned int psize)
713 {
714 struct slice_mask mask;
715
716 VM_BUG_ON(radix_enabled());
717
718 slice_range_to_mask(start, len, &mask);
719 slice_convert(mm, mask, psize);
720 }
721
722 #ifdef CONFIG_HUGETLB_PAGE
723 /*
724 * is_hugepage_only_range() is used by generic code to verify whether
725 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
726 *
727 * until the generic code provides a more generic hook and/or starts
728 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
729 * here knows how to deal with), we hijack it to keep standard mappings
730 * away from us.
731 *
732 * because of that generic code limitation, MAP_FIXED mapping cannot
733 * "convert" back a slice with no VMAs to the standard page size, only
734 * get_unmapped_area() can. It would be possible to fix it here but I
735 * prefer working on fixing the generic code instead.
736 *
737 * WARNING: This will not work if hugetlbfs isn't enabled since the
738 * generic code will redefine that function as 0 in that. This is ok
739 * for now as we only use slices with hugetlbfs enabled. This should
740 * be fixed as the generic code gets fixed.
741 */
742 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
743 unsigned long len)
744 {
745 struct slice_mask mask, available;
746 unsigned int psize = mm->context.user_psize;
747
748 if (radix_enabled())
749 return 0;
750
751 slice_range_to_mask(addr, len, &mask);
752 slice_mask_for_size(mm, psize, &available);
753 #ifdef CONFIG_PPC_64K_PAGES
754 /* We need to account for 4k slices too */
755 if (psize == MMU_PAGE_64K) {
756 struct slice_mask compat_mask;
757 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
758 slice_or_mask(&available, &compat_mask);
759 }
760 #endif
761
762 #if 0 /* too verbose */
763 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
764 mm, addr, len);
765 slice_print_mask(" mask", mask);
766 slice_print_mask(" available", available);
767 #endif
768 return !slice_check_fit(mm, mask, available);
769 }
770 #endif