]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/mm/book3s64/slb.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PowerPC64 SLB support.
5 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
6 * Based on earlier code written by:
7 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
8 * Copyright (c) 2001 Dave Engebretsen
9 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
12 #include <asm/asm-prototypes.h>
13 #include <asm/pgtable.h>
15 #include <asm/mmu_context.h>
17 #include <asm/ppc-opcode.h>
18 #include <asm/cputable.h>
19 #include <asm/cacheflush.h>
21 #include <linux/compiler.h>
22 #include <linux/context_tracking.h>
23 #include <linux/mm_types.h>
26 #include <asm/code-patching.h>
29 LINEAR_INDEX
= 0, /* Kernel linear map (0xc000000000000000) */
30 KSTACK_INDEX
= 1, /* Kernel stack map */
33 static long slb_allocate_user(struct mm_struct
*mm
, unsigned long ea
);
35 #define slb_esid_mask(ssize) \
36 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
38 static inline unsigned long mk_esid_data(unsigned long ea
, int ssize
,
41 return (ea
& slb_esid_mask(ssize
)) | SLB_ESID_V
| index
;
44 static inline unsigned long __mk_vsid_data(unsigned long vsid
, int ssize
,
47 return (vsid
<< slb_vsid_shift(ssize
)) | flags
|
48 ((unsigned long) ssize
<< SLB_VSID_SSIZE_SHIFT
);
51 static inline unsigned long mk_vsid_data(unsigned long ea
, int ssize
,
54 return __mk_vsid_data(get_kernel_vsid(ea
, ssize
), ssize
, flags
);
57 static void assert_slb_presence(bool present
, unsigned long ea
)
59 #ifdef CONFIG_DEBUG_VM
62 WARN_ON_ONCE(mfmsr() & MSR_EE
);
64 if (!cpu_has_feature(CPU_FTR_ARCH_206
))
68 * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
69 * ignores all other bits from 0-27, so just clear them all.
71 ea
&= ~((1UL << 28) - 1);
72 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp
) : "r"(ea
) : "cr0");
74 WARN_ON(present
== (tmp
== 0));
78 static inline void slb_shadow_update(unsigned long ea
, int ssize
,
82 struct slb_shadow
*p
= get_slb_shadow();
85 * Clear the ESID first so the entry is not valid while we are
86 * updating it. No write barriers are needed here, provided
87 * we only update the current CPU's SLB shadow buffer.
89 WRITE_ONCE(p
->save_area
[index
].esid
, 0);
90 WRITE_ONCE(p
->save_area
[index
].vsid
, cpu_to_be64(mk_vsid_data(ea
, ssize
, flags
)));
91 WRITE_ONCE(p
->save_area
[index
].esid
, cpu_to_be64(mk_esid_data(ea
, ssize
, index
)));
94 static inline void slb_shadow_clear(enum slb_index index
)
96 WRITE_ONCE(get_slb_shadow()->save_area
[index
].esid
, cpu_to_be64(index
));
99 static inline void create_shadowed_slbe(unsigned long ea
, int ssize
,
101 enum slb_index index
)
104 * Updating the shadow buffer before writing the SLB ensures
105 * we don't get a stale entry here if we get preempted by PHYP
106 * between these two statements.
108 slb_shadow_update(ea
, ssize
, flags
, index
);
110 assert_slb_presence(false, ea
);
111 asm volatile("slbmte %0,%1" :
112 : "r" (mk_vsid_data(ea
, ssize
, flags
)),
113 "r" (mk_esid_data(ea
, ssize
, index
))
118 * Insert bolted entries into SLB (which may not be empty, so don't clear
121 void __slb_restore_bolted_realmode(void)
123 struct slb_shadow
*p
= get_slb_shadow();
124 enum slb_index index
;
126 /* No isync needed because realmode. */
127 for (index
= 0; index
< SLB_NUM_BOLTED
; index
++) {
128 asm volatile("slbmte %0,%1" :
129 : "r" (be64_to_cpu(p
->save_area
[index
].vsid
)),
130 "r" (be64_to_cpu(p
->save_area
[index
].esid
)));
133 assert_slb_presence(true, local_paca
->kstack
);
137 * Insert the bolted entries into an empty SLB.
139 void slb_restore_bolted_realmode(void)
141 __slb_restore_bolted_realmode();
142 get_paca()->slb_cache_ptr
= 0;
144 get_paca()->slb_kern_bitmap
= (1U << SLB_NUM_BOLTED
) - 1;
145 get_paca()->slb_used_bitmap
= get_paca()->slb_kern_bitmap
;
149 * This flushes all SLB entries including 0, so it must be realmode.
151 void slb_flush_all_realmode(void)
153 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
157 * This flushes non-bolted entries, it can be run in virtual mode. Must
158 * be called with interrupts disabled.
160 void slb_flush_and_restore_bolted(void)
162 struct slb_shadow
*p
= get_slb_shadow();
164 BUILD_BUG_ON(SLB_NUM_BOLTED
!= 2);
166 WARN_ON(!irqs_disabled());
169 * We can't take a PMU exception in the following code, so hard
170 * disable interrupts.
174 asm volatile("isync\n"
178 :: "r" (be64_to_cpu(p
->save_area
[KSTACK_INDEX
].vsid
)),
179 "r" (be64_to_cpu(p
->save_area
[KSTACK_INDEX
].esid
))
181 assert_slb_presence(true, get_paca()->kstack
);
183 get_paca()->slb_cache_ptr
= 0;
185 get_paca()->slb_kern_bitmap
= (1U << SLB_NUM_BOLTED
) - 1;
186 get_paca()->slb_used_bitmap
= get_paca()->slb_kern_bitmap
;
189 void slb_save_contents(struct slb_entry
*slb_ptr
)
194 /* Save slb_cache_ptr value. */
195 get_paca()->slb_save_cache_ptr
= get_paca()->slb_cache_ptr
;
200 for (i
= 0; i
< mmu_slb_size
; i
++) {
201 asm volatile("slbmfee %0,%1" : "=r" (e
) : "r" (i
));
202 asm volatile("slbmfev %0,%1" : "=r" (v
) : "r" (i
));
209 void slb_dump_contents(struct slb_entry
*slb_ptr
)
218 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
219 pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr
);
221 for (i
= 0; i
< mmu_slb_size
; i
++) {
229 pr_err("%02d %016lx %016lx\n", i
, e
, v
);
231 if (!(e
& SLB_ESID_V
)) {
235 llp
= v
& SLB_VSID_LLP
;
236 if (v
& SLB_VSID_B_1T
) {
237 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
239 (v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT_1T
, llp
);
241 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
243 (v
& ~SLB_VSID_B
) >> SLB_VSID_SHIFT
, llp
);
246 pr_err("----------------------------------\n");
248 /* Dump slb cache entires as well. */
249 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr
);
250 pr_err("Valid SLB cache entries:\n");
251 n
= min_t(int, get_paca()->slb_save_cache_ptr
, SLB_CACHE_ENTRIES
);
252 for (i
= 0; i
< n
; i
++)
253 pr_err("%02d EA[0-35]=%9x\n", i
, get_paca()->slb_cache
[i
]);
254 pr_err("Rest of SLB cache entries:\n");
255 for (i
= n
; i
< SLB_CACHE_ENTRIES
; i
++)
256 pr_err("%02d EA[0-35]=%9x\n", i
, get_paca()->slb_cache
[i
]);
259 void slb_vmalloc_update(void)
262 * vmalloc is not bolted, so just have to flush non-bolted.
264 slb_flush_and_restore_bolted();
267 static bool preload_hit(struct thread_info
*ti
, unsigned long esid
)
271 for (i
= 0; i
< ti
->slb_preload_nr
; i
++) {
274 idx
= (ti
->slb_preload_tail
+ i
) % SLB_PRELOAD_NR
;
275 if (esid
== ti
->slb_preload_esid
[idx
])
281 static bool preload_add(struct thread_info
*ti
, unsigned long ea
)
286 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
)) {
287 /* EAs are stored >> 28 so 256MB segments don't need clearing */
288 if (ea
& ESID_MASK_1T
)
292 esid
= ea
>> SID_SHIFT
;
294 if (preload_hit(ti
, esid
))
297 idx
= (ti
->slb_preload_tail
+ ti
->slb_preload_nr
) % SLB_PRELOAD_NR
;
298 ti
->slb_preload_esid
[idx
] = esid
;
299 if (ti
->slb_preload_nr
== SLB_PRELOAD_NR
)
300 ti
->slb_preload_tail
= (ti
->slb_preload_tail
+ 1) % SLB_PRELOAD_NR
;
302 ti
->slb_preload_nr
++;
307 static void preload_age(struct thread_info
*ti
)
309 if (!ti
->slb_preload_nr
)
311 ti
->slb_preload_nr
--;
312 ti
->slb_preload_tail
= (ti
->slb_preload_tail
+ 1) % SLB_PRELOAD_NR
;
315 void slb_setup_new_exec(void)
317 struct thread_info
*ti
= current_thread_info();
318 struct mm_struct
*mm
= current
->mm
;
319 unsigned long exec
= 0x10000000;
321 WARN_ON(irqs_disabled());
324 * preload cache can only be used to determine whether a SLB
325 * entry exists if it does not start to overflow.
327 if (ti
->slb_preload_nr
+ 2 > SLB_PRELOAD_NR
)
333 * We have no good place to clear the slb preload cache on exec,
334 * flush_thread is about the earliest arch hook but that happens
335 * after we switch to the mm and have aleady preloaded the SLBEs.
337 * For the most part that's probably okay to use entries from the
338 * previous exec, they will age out if unused. It may turn out to
339 * be an advantage to clear the cache before switching to it,
344 * preload some userspace segments into the SLB.
345 * Almost all 32 and 64bit PowerPC executables are linked at
346 * 0x10000000 so it makes sense to preload this segment.
348 if (!is_kernel_addr(exec
)) {
349 if (preload_add(ti
, exec
))
350 slb_allocate_user(mm
, exec
);
353 /* Libraries and mmaps. */
354 if (!is_kernel_addr(mm
->mmap_base
)) {
355 if (preload_add(ti
, mm
->mmap_base
))
356 slb_allocate_user(mm
, mm
->mmap_base
);
360 asm volatile("isync" : : : "memory");
365 void preload_new_slb_context(unsigned long start
, unsigned long sp
)
367 struct thread_info
*ti
= current_thread_info();
368 struct mm_struct
*mm
= current
->mm
;
369 unsigned long heap
= mm
->start_brk
;
371 WARN_ON(irqs_disabled());
374 if (ti
->slb_preload_nr
+ 3 > SLB_PRELOAD_NR
)
379 /* Userspace entry address. */
380 if (!is_kernel_addr(start
)) {
381 if (preload_add(ti
, start
))
382 slb_allocate_user(mm
, start
);
385 /* Top of stack, grows down. */
386 if (!is_kernel_addr(sp
)) {
387 if (preload_add(ti
, sp
))
388 slb_allocate_user(mm
, sp
);
391 /* Bottom of heap, grows up. */
392 if (heap
&& !is_kernel_addr(heap
)) {
393 if (preload_add(ti
, heap
))
394 slb_allocate_user(mm
, heap
);
398 asm volatile("isync" : : : "memory");
404 /* Flush all user entries from the segment table of the current processor. */
405 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
407 struct thread_info
*ti
= task_thread_info(tsk
);
411 * We need interrupts hard-disabled here, not just soft-disabled,
412 * so that a PMU interrupt can't occur, which might try to access
413 * user memory (to get a stack trace) and possible cause an SLB miss
414 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
417 asm volatile("isync" : : : "memory");
418 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
420 * SLBIA IH=3 invalidates all Class=1 SLBEs and their
421 * associated lookaside structures, which matches what
422 * switch_slb wants. So ARCH_300 does not use the slb
425 asm volatile(PPC_SLBIA(3));
427 unsigned long offset
= get_paca()->slb_cache_ptr
;
429 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B
) &&
430 offset
<= SLB_CACHE_ENTRIES
) {
431 unsigned long slbie_data
= 0;
433 for (i
= 0; i
< offset
; i
++) {
437 get_paca()->slb_cache
[i
] << SID_SHIFT
;
439 * Could assert_slb_presence(true) here, but
440 * hypervisor or machine check could have come
441 * in and removed the entry at this point.
445 slbie_data
|= user_segment_size(slbie_data
)
446 << SLBIE_SSIZE_SHIFT
;
447 slbie_data
|= SLBIE_C
; /* user slbs have C=1 */
448 asm volatile("slbie %0" : : "r" (slbie_data
));
451 /* Workaround POWER5 < DD2.1 issue */
452 if (!cpu_has_feature(CPU_FTR_ARCH_207S
) && offset
== 1)
453 asm volatile("slbie %0" : : "r" (slbie_data
));
456 struct slb_shadow
*p
= get_slb_shadow();
457 unsigned long ksp_esid_data
=
458 be64_to_cpu(p
->save_area
[KSTACK_INDEX
].esid
);
459 unsigned long ksp_vsid_data
=
460 be64_to_cpu(p
->save_area
[KSTACK_INDEX
].vsid
);
462 asm volatile(PPC_SLBIA(1) "\n"
465 :: "r"(ksp_vsid_data
),
468 get_paca()->slb_kern_bitmap
= (1U << SLB_NUM_BOLTED
) - 1;
471 get_paca()->slb_cache_ptr
= 0;
473 get_paca()->slb_used_bitmap
= get_paca()->slb_kern_bitmap
;
478 * We gradually age out SLBs after a number of context switches to
479 * reduce reload overhead of unused entries (like we do with FP/VEC
480 * reload). Each time we wrap 256 switches, take an entry out of the
483 tsk
->thread
.load_slb
++;
484 if (!tsk
->thread
.load_slb
) {
485 unsigned long pc
= KSTK_EIP(tsk
);
491 for (i
= 0; i
< ti
->slb_preload_nr
; i
++) {
495 idx
= (ti
->slb_preload_tail
+ i
) % SLB_PRELOAD_NR
;
496 ea
= (unsigned long)ti
->slb_preload_esid
[idx
] << SID_SHIFT
;
498 slb_allocate_user(mm
, ea
);
502 * Synchronize slbmte preloads with possible subsequent user memory
503 * address accesses by the kernel (user mode won't happen until
504 * rfid, which is safe).
506 asm volatile("isync" : : : "memory");
509 void slb_set_size(u16 size
)
514 void slb_initialize(void)
516 unsigned long linear_llp
, vmalloc_llp
, io_llp
;
517 unsigned long lflags
;
518 static int slb_encoding_inited
;
519 #ifdef CONFIG_SPARSEMEM_VMEMMAP
520 unsigned long vmemmap_llp
;
523 /* Prepare our SLB miss handler based on our page size */
524 linear_llp
= mmu_psize_defs
[mmu_linear_psize
].sllp
;
525 io_llp
= mmu_psize_defs
[mmu_io_psize
].sllp
;
526 vmalloc_llp
= mmu_psize_defs
[mmu_vmalloc_psize
].sllp
;
527 get_paca()->vmalloc_sllp
= SLB_VSID_KERNEL
| vmalloc_llp
;
528 #ifdef CONFIG_SPARSEMEM_VMEMMAP
529 vmemmap_llp
= mmu_psize_defs
[mmu_vmemmap_psize
].sllp
;
531 if (!slb_encoding_inited
) {
532 slb_encoding_inited
= 1;
533 pr_devel("SLB: linear LLP = %04lx\n", linear_llp
);
534 pr_devel("SLB: io LLP = %04lx\n", io_llp
);
535 #ifdef CONFIG_SPARSEMEM_VMEMMAP
536 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp
);
540 get_paca()->stab_rr
= SLB_NUM_BOLTED
- 1;
541 get_paca()->slb_kern_bitmap
= (1U << SLB_NUM_BOLTED
) - 1;
542 get_paca()->slb_used_bitmap
= get_paca()->slb_kern_bitmap
;
544 lflags
= SLB_VSID_KERNEL
| linear_llp
;
546 /* Invalidate the entire SLB (even entry 0) & all the ERATS */
547 asm volatile("isync":::"memory");
548 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
549 asm volatile("isync; slbia; isync":::"memory");
550 create_shadowed_slbe(PAGE_OFFSET
, mmu_kernel_ssize
, lflags
, LINEAR_INDEX
);
553 * For the boot cpu, we're running on the stack in init_thread_union,
554 * which is in the first segment of the linear mapping, and also
555 * get_paca()->kstack hasn't been initialized yet.
556 * For secondary cpus, we need to bolt the kernel stack entry now.
558 slb_shadow_clear(KSTACK_INDEX
);
559 if (raw_smp_processor_id() != boot_cpuid
&&
560 (get_paca()->kstack
& slb_esid_mask(mmu_kernel_ssize
)) > PAGE_OFFSET
)
561 create_shadowed_slbe(get_paca()->kstack
,
562 mmu_kernel_ssize
, lflags
, KSTACK_INDEX
);
564 asm volatile("isync":::"memory");
567 static void slb_cache_update(unsigned long esid_data
)
571 if (cpu_has_feature(CPU_FTR_ARCH_300
))
572 return; /* ISAv3.0B and later does not use slb_cache */
575 * Now update slb cache entries
577 slb_cache_index
= local_paca
->slb_cache_ptr
;
578 if (slb_cache_index
< SLB_CACHE_ENTRIES
) {
580 * We have space in slb cache for optimized switch_slb().
581 * Top 36 bits from esid_data as per ISA
583 local_paca
->slb_cache
[slb_cache_index
++] = esid_data
>> 28;
584 local_paca
->slb_cache_ptr
++;
587 * Our cache is full and the current cache content strictly
588 * doesn't indicate the active SLB conents. Bump the ptr
589 * so that switch_slb() will ignore the cache.
591 local_paca
->slb_cache_ptr
= SLB_CACHE_ENTRIES
+ 1;
595 static enum slb_index
alloc_slb_index(bool kernel
)
597 enum slb_index index
;
600 * The allocation bitmaps can become out of synch with the SLB
601 * when the _switch code does slbie when bolting a new stack
602 * segment and it must not be anywhere else in the SLB. This leaves
603 * a kernel allocated entry that is unused in the SLB. With very
604 * large systems or small segment sizes, the bitmaps could slowly
605 * fill with these entries. They will eventually be cleared out
606 * by the round robin allocator in that case, so it's probably not
607 * worth accounting for.
611 * SLBs beyond 32 entries are allocated with stab_rr only
612 * POWER7/8/9 have 32 SLB entries, this could be expanded if a
613 * future CPU has more.
615 if (local_paca
->slb_used_bitmap
!= U32_MAX
) {
616 index
= ffz(local_paca
->slb_used_bitmap
);
617 local_paca
->slb_used_bitmap
|= 1U << index
;
619 local_paca
->slb_kern_bitmap
|= 1U << index
;
621 /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */
622 index
= local_paca
->stab_rr
;
623 if (index
< (mmu_slb_size
- 1))
626 index
= SLB_NUM_BOLTED
;
627 local_paca
->stab_rr
= index
;
630 local_paca
->slb_kern_bitmap
|= 1U << index
;
632 local_paca
->slb_kern_bitmap
&= ~(1U << index
);
635 BUG_ON(index
< SLB_NUM_BOLTED
);
640 static long slb_insert_entry(unsigned long ea
, unsigned long context
,
641 unsigned long flags
, int ssize
, bool kernel
)
644 unsigned long vsid_data
, esid_data
;
645 enum slb_index index
;
647 vsid
= get_vsid(context
, ea
, ssize
);
652 * There must not be a kernel SLB fault in alloc_slb_index or before
653 * slbmte here or the allocation bitmaps could get out of whack with
656 * User SLB faults or preloads take this path which might get inlined
657 * into the caller, so add compiler barriers here to ensure unsafe
658 * memory accesses do not come between.
662 index
= alloc_slb_index(kernel
);
664 vsid_data
= __mk_vsid_data(vsid
, ssize
, flags
);
665 esid_data
= mk_esid_data(ea
, ssize
, index
);
668 * No need for an isync before or after this slbmte. The exception
669 * we enter with and the rfid we exit with are context synchronizing.
670 * User preloads should add isync afterwards in case the kernel
671 * accesses user memory before it returns to userspace with rfid.
673 assert_slb_presence(false, ea
);
674 asm volatile("slbmte %0, %1" : : "r" (vsid_data
), "r" (esid_data
));
679 slb_cache_update(esid_data
);
684 static long slb_allocate_kernel(unsigned long ea
, unsigned long id
)
686 unsigned long context
;
690 if (id
== LINEAR_MAP_REGION_ID
) {
692 /* We only support upto MAX_PHYSMEM_BITS */
693 if ((ea
& EA_MASK
) > (1UL << MAX_PHYSMEM_BITS
))
696 flags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_linear_psize
].sllp
;
698 #ifdef CONFIG_SPARSEMEM_VMEMMAP
699 } else if (id
== VMEMMAP_REGION_ID
) {
701 if (ea
>= H_VMEMMAP_END
)
704 flags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_vmemmap_psize
].sllp
;
706 } else if (id
== VMALLOC_REGION_ID
) {
708 if (ea
>= H_VMALLOC_END
)
711 flags
= local_paca
->vmalloc_sllp
;
713 } else if (id
== IO_REGION_ID
) {
715 if (ea
>= H_KERN_IO_END
)
718 flags
= SLB_VSID_KERNEL
| mmu_psize_defs
[mmu_io_psize
].sllp
;
724 ssize
= MMU_SEGSIZE_1T
;
725 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT
))
726 ssize
= MMU_SEGSIZE_256M
;
728 context
= get_kernel_context(ea
);
730 return slb_insert_entry(ea
, context
, flags
, ssize
, true);
733 static long slb_allocate_user(struct mm_struct
*mm
, unsigned long ea
)
735 unsigned long context
;
741 * consider this as bad access if we take a SLB miss
742 * on an address above addr limit.
744 if (ea
>= mm_ctx_slb_addr_limit(&mm
->context
))
747 context
= get_user_context(&mm
->context
, ea
);
751 if (unlikely(ea
>= H_PGTABLE_RANGE
)) {
756 ssize
= user_segment_size(ea
);
758 bpsize
= get_slice_psize(mm
, ea
);
759 flags
= SLB_VSID_USER
| mmu_psize_defs
[bpsize
].sllp
;
761 return slb_insert_entry(ea
, context
, flags
, ssize
, false);
764 long do_slb_fault(struct pt_regs
*regs
, unsigned long ea
)
766 unsigned long id
= get_region_id(ea
);
768 /* IRQs are not reconciled here, so can't check irqs_disabled */
769 VM_WARN_ON(mfmsr() & MSR_EE
);
771 if (unlikely(!(regs
->msr
& MSR_RI
)))
775 * SLB kernel faults must be very careful not to touch anything
776 * that is not bolted. E.g., PACA and global variables are okay,
777 * mm->context stuff is not.
779 * SLB user faults can access all of kernel memory, but must be
780 * careful not to touch things like IRQ state because it is not
781 * "reconciled" here. The difficulty is that we must use
782 * fast_exception_return to return from kernel SLB faults without
783 * looking at possible non-bolted memory. We could test user vs
784 * kernel faults in the interrupt handler asm and do a full fault,
785 * reconcile, ret_from_except for user faults which would make them
786 * first class kernel code. But for performance it's probably nicer
787 * if they go via fast_exception_return too.
789 if (id
>= LINEAR_MAP_REGION_ID
) {
791 #ifdef CONFIG_DEBUG_VM
792 /* Catch recursive kernel SLB faults. */
793 BUG_ON(local_paca
->in_kernel_slb_handler
);
794 local_paca
->in_kernel_slb_handler
= 1;
796 err
= slb_allocate_kernel(ea
, id
);
797 #ifdef CONFIG_DEBUG_VM
798 local_paca
->in_kernel_slb_handler
= 0;
802 struct mm_struct
*mm
= current
->mm
;
808 err
= slb_allocate_user(mm
, ea
);
810 preload_add(current_thread_info(), ea
);
816 void do_bad_slb_fault(struct pt_regs
*regs
, unsigned long ea
, long err
)
818 if (err
== -EFAULT
) {
820 _exception(SIGSEGV
, regs
, SEGV_BNDERR
, ea
);
822 bad_page_fault(regs
, ea
, SIGSEGV
);
823 } else if (err
== -EINVAL
) {
824 unrecoverable_exception(regs
);