]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 SLB support. | |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5cdcd9d6 | 5 | * Based on earlier code written by: |
1da177e4 LT |
6 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
7 | * Copyright (c) 2001 Dave Engebretsen | |
8 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
1da177e4 LT |
17 | #include <asm/pgtable.h> |
18 | #include <asm/mmu.h> | |
19 | #include <asm/mmu_context.h> | |
20 | #include <asm/paca.h> | |
21 | #include <asm/cputable.h> | |
3c726f8d | 22 | #include <asm/cacheflush.h> |
2f6093c8 MN |
23 | #include <asm/smp.h> |
24 | #include <linux/compiler.h> | |
f384796c | 25 | #include <linux/context_tracking.h> |
589ee628 IM |
26 | #include <linux/mm_types.h> |
27 | ||
aa39be09 | 28 | #include <asm/udbg.h> |
b68a70c4 | 29 | #include <asm/code-patching.h> |
3c726f8d | 30 | |
1d15010c AK |
31 | enum slb_index { |
32 | LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ | |
85376e2a | 33 | KSTACK_INDEX = 1, /* Kernel stack map */ |
1d15010c | 34 | }; |
1da177e4 | 35 | |
fd88b945 | 36 | extern void slb_allocate(unsigned long ea); |
1da177e4 | 37 | |
3b575064 PM |
38 | #define slb_esid_mask(ssize) \ |
39 | (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) | |
40 | ||
1189be65 | 41 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
1d15010c | 42 | enum slb_index index) |
1da177e4 | 43 | { |
1d15010c | 44 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; |
1da177e4 LT |
45 | } |
46 | ||
1189be65 PM |
47 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, |
48 | unsigned long flags) | |
1da177e4 | 49 | { |
1189be65 PM |
50 | return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags | |
51 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); | |
1da177e4 LT |
52 | } |
53 | ||
1189be65 | 54 | static inline void slb_shadow_update(unsigned long ea, int ssize, |
67439b76 | 55 | unsigned long flags, |
1d15010c | 56 | enum slb_index index) |
1da177e4 | 57 | { |
26cd835e ME |
58 | struct slb_shadow *p = get_slb_shadow(); |
59 | ||
2f6093c8 MN |
60 | /* |
61 | * Clear the ESID first so the entry is not valid while we are | |
00efee7d MN |
62 | * updating it. No write barriers are needed here, provided |
63 | * we only update the current CPU's SLB shadow buffer. | |
2f6093c8 | 64 | */ |
926bc2f1 NP |
65 | WRITE_ONCE(p->save_area[index].esid, 0); |
66 | WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); | |
67 | WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); | |
2f6093c8 MN |
68 | } |
69 | ||
1d15010c | 70 | static inline void slb_shadow_clear(enum slb_index index) |
2f6093c8 | 71 | { |
0f52b3a0 | 72 | WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); |
1da177e4 LT |
73 | } |
74 | ||
1189be65 PM |
75 | static inline void create_shadowed_slbe(unsigned long ea, int ssize, |
76 | unsigned long flags, | |
1d15010c | 77 | enum slb_index index) |
175587cc PM |
78 | { |
79 | /* | |
80 | * Updating the shadow buffer before writing the SLB ensures | |
81 | * we don't get a stale entry here if we get preempted by PHYP | |
82 | * between these two statements. | |
83 | */ | |
1d15010c | 84 | slb_shadow_update(ea, ssize, flags, index); |
175587cc PM |
85 | |
86 | asm volatile("slbmte %0,%1" : | |
1189be65 | 87 | : "r" (mk_vsid_data(ea, ssize, flags)), |
1d15010c | 88 | "r" (mk_esid_data(ea, ssize, index)) |
175587cc PM |
89 | : "memory" ); |
90 | } | |
91 | ||
e7e81847 NP |
92 | /* |
93 | * Insert bolted entries into SLB (which may not be empty, so don't clear | |
94 | * slb_cache_ptr). | |
95 | */ | |
96 | void __slb_restore_bolted_realmode(void) | |
97 | { | |
98 | struct slb_shadow *p = get_slb_shadow(); | |
99 | enum slb_index index; | |
100 | ||
101 | /* No isync needed because realmode. */ | |
102 | for (index = 0; index < SLB_NUM_BOLTED; index++) { | |
103 | asm volatile("slbmte %0,%1" : | |
104 | : "r" (be64_to_cpu(p->save_area[index].vsid)), | |
105 | "r" (be64_to_cpu(p->save_area[index].esid))); | |
106 | } | |
107 | } | |
108 | ||
109 | /* | |
110 | * Insert the bolted entries into an empty SLB. | |
111 | * This is not the same as rebolt because the bolted segments are not | |
112 | * changed, just loaded from the shadow area. | |
113 | */ | |
114 | void slb_restore_bolted_realmode(void) | |
115 | { | |
116 | __slb_restore_bolted_realmode(); | |
117 | get_paca()->slb_cache_ptr = 0; | |
118 | } | |
119 | ||
120 | /* | |
121 | * This flushes all SLB entries including 0, so it must be realmode. | |
122 | */ | |
123 | void slb_flush_all_realmode(void) | |
124 | { | |
125 | /* | |
126 | * This flushes all SLB entries including 0, so it must be realmode. | |
127 | */ | |
128 | asm volatile("slbmte %0,%0; slbia" : : "r" (0)); | |
129 | } | |
130 | ||
5141c182 | 131 | void slb_flush_and_rebolt(void) |
1da177e4 LT |
132 | { |
133 | /* If you change this make sure you change SLB_NUM_BOLTED | |
d8d164a9 | 134 | * and PR KVM appropriately too. */ |
85376e2a | 135 | unsigned long linear_llp, lflags; |
1189be65 | 136 | unsigned long ksp_esid_data, ksp_vsid_data; |
1da177e4 | 137 | |
5141c182 NP |
138 | WARN_ON(!irqs_disabled()); |
139 | ||
140 | /* | |
141 | * We can't take a PMU exception in the following code, so hard | |
142 | * disable interrupts. | |
143 | */ | |
144 | hard_irq_disable(); | |
145 | ||
3c726f8d | 146 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
3c726f8d | 147 | lflags = SLB_VSID_KERNEL | linear_llp; |
1da177e4 | 148 | |
1d15010c | 149 | ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX); |
1189be65 | 150 | if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) { |
1da177e4 | 151 | ksp_esid_data &= ~SLB_ESID_V; |
1189be65 | 152 | ksp_vsid_data = 0; |
1d15010c | 153 | slb_shadow_clear(KSTACK_INDEX); |
edd0622b PM |
154 | } else { |
155 | /* Update stack entry; others don't change */ | |
1d15010c | 156 | slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX); |
7ffcf8ec | 157 | ksp_vsid_data = |
1d15010c | 158 | be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid); |
edd0622b | 159 | } |
2f6093c8 | 160 | |
1da177e4 LT |
161 | /* We need to do this all in asm, so we're sure we don't touch |
162 | * the stack between the slbia and rebolting it. */ | |
163 | asm volatile("isync\n" | |
164 | "slbia\n" | |
85376e2a | 165 | /* Slot 1 - kernel stack */ |
1da177e4 | 166 | "slbmte %0,%1\n" |
1da177e4 | 167 | "isync" |
85376e2a | 168 | :: "r"(ksp_vsid_data), |
1da177e4 LT |
169 | "r"(ksp_esid_data) |
170 | : "memory"); | |
1da177e4 | 171 | |
9c1e1052 PM |
172 | get_paca()->slb_cache_ptr = 0; |
173 | } | |
174 | ||
c6d15258 MS |
175 | void slb_save_contents(struct slb_entry *slb_ptr) |
176 | { | |
177 | int i; | |
178 | unsigned long e, v; | |
179 | ||
180 | /* Save slb_cache_ptr value. */ | |
181 | get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; | |
182 | ||
183 | if (!slb_ptr) | |
184 | return; | |
185 | ||
186 | for (i = 0; i < mmu_slb_size; i++) { | |
187 | asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); | |
188 | asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); | |
189 | slb_ptr->esid = e; | |
190 | slb_ptr->vsid = v; | |
191 | slb_ptr++; | |
192 | } | |
193 | } | |
194 | ||
195 | void slb_dump_contents(struct slb_entry *slb_ptr) | |
196 | { | |
197 | int i, n; | |
198 | unsigned long e, v; | |
199 | unsigned long llp; | |
200 | ||
201 | if (!slb_ptr) | |
202 | return; | |
203 | ||
204 | pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); | |
205 | pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr); | |
206 | ||
207 | for (i = 0; i < mmu_slb_size; i++) { | |
208 | e = slb_ptr->esid; | |
209 | v = slb_ptr->vsid; | |
210 | slb_ptr++; | |
211 | ||
212 | if (!e && !v) | |
213 | continue; | |
214 | ||
215 | pr_err("%02d %016lx %016lx\n", i, e, v); | |
216 | ||
217 | if (!(e & SLB_ESID_V)) { | |
218 | pr_err("\n"); | |
219 | continue; | |
220 | } | |
221 | llp = v & SLB_VSID_LLP; | |
222 | if (v & SLB_VSID_B_1T) { | |
223 | pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", | |
224 | GET_ESID_1T(e), | |
225 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); | |
226 | } else { | |
227 | pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", | |
228 | GET_ESID(e), | |
229 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); | |
230 | } | |
231 | } | |
232 | pr_err("----------------------------------\n"); | |
233 | ||
234 | /* Dump slb cache entires as well. */ | |
235 | pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); | |
236 | pr_err("Valid SLB cache entries:\n"); | |
237 | n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); | |
238 | for (i = 0; i < n; i++) | |
239 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); | |
240 | pr_err("Rest of SLB cache entries:\n"); | |
241 | for (i = n; i < SLB_CACHE_ENTRIES; i++) | |
242 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); | |
243 | } | |
244 | ||
67439b76 MN |
245 | void slb_vmalloc_update(void) |
246 | { | |
67439b76 MN |
247 | slb_flush_and_rebolt(); |
248 | } | |
249 | ||
465ccab9 | 250 | /* Helper function to compare esids. There are four cases to handle. |
251 | * 1. The system is not 1T segment size capable. Use the GET_ESID compare. | |
252 | * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. | |
253 | * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. | |
254 | * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. | |
255 | */ | |
256 | static inline int esids_match(unsigned long addr1, unsigned long addr2) | |
257 | { | |
258 | int esid_1t_count; | |
259 | ||
260 | /* System is not 1T segment size capable. */ | |
44ae3ab3 | 261 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
465ccab9 | 262 | return (GET_ESID(addr1) == GET_ESID(addr2)); |
263 | ||
264 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + | |
265 | ((addr2 >> SID_SHIFT_1T) != 0)); | |
266 | ||
267 | /* both addresses are < 1T */ | |
268 | if (esid_1t_count == 0) | |
269 | return (GET_ESID(addr1) == GET_ESID(addr2)); | |
270 | ||
271 | /* One address < 1T, the other > 1T. Not a match */ | |
272 | if (esid_1t_count == 1) | |
273 | return 0; | |
274 | ||
275 | /* Both addresses are > 1T. */ | |
276 | return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); | |
277 | } | |
278 | ||
1da177e4 LT |
279 | /* Flush all user entries from the segment table of the current processor. */ |
280 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |
281 | { | |
9c1e1052 | 282 | unsigned long offset; |
1da177e4 LT |
283 | unsigned long pc = KSTK_EIP(tsk); |
284 | unsigned long stack = KSTK_ESP(tsk); | |
de4376c2 | 285 | unsigned long exec_base; |
1da177e4 | 286 | |
9c1e1052 PM |
287 | /* |
288 | * We need interrupts hard-disabled here, not just soft-disabled, | |
289 | * so that a PMU interrupt can't occur, which might try to access | |
290 | * user memory (to get a stack trace) and possible cause an SLB miss | |
291 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | |
292 | */ | |
293 | hard_irq_disable(); | |
294 | offset = get_paca()->slb_cache_ptr; | |
44ae3ab3 | 295 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && |
f66bce5e | 296 | offset <= SLB_CACHE_ENTRIES) { |
8b92887c | 297 | unsigned long slbie_data = 0; |
1da177e4 | 298 | int i; |
8b92887c | 299 | |
1da177e4 LT |
300 | asm volatile("isync" : : : "memory"); |
301 | for (i = 0; i < offset; i++) { | |
1189be65 PM |
302 | slbie_data = (unsigned long)get_paca()->slb_cache[i] |
303 | << SID_SHIFT; /* EA */ | |
304 | slbie_data |= user_segment_size(slbie_data) | |
305 | << SLBIE_SSIZE_SHIFT; | |
306 | slbie_data |= SLBIE_C; /* C set for user addresses */ | |
307 | asm volatile("slbie %0" : : "r" (slbie_data)); | |
1da177e4 | 308 | } |
1da177e4 | 309 | |
505ea82e | 310 | /* Workaround POWER5 < DD2.1 issue */ |
8b92887c | 311 | if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) |
505ea82e | 312 | asm volatile("slbie %0" : : "r" (slbie_data)); |
8b92887c NP |
313 | |
314 | asm volatile("isync" : : : "memory"); | |
315 | } else { | |
5141c182 NP |
316 | struct slb_shadow *p = get_slb_shadow(); |
317 | unsigned long ksp_esid_data = | |
318 | be64_to_cpu(p->save_area[KSTACK_INDEX].esid); | |
319 | unsigned long ksp_vsid_data = | |
320 | be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); | |
321 | ||
322 | asm volatile("isync\n" | |
323 | PPC_SLBIA(1) "\n" | |
324 | "slbmte %0,%1\n" | |
325 | "isync" | |
326 | :: "r"(ksp_vsid_data), | |
327 | "r"(ksp_esid_data)); | |
328 | ||
329 | asm volatile("isync" : : : "memory"); | |
505ea82e | 330 | } |
1da177e4 LT |
331 | |
332 | get_paca()->slb_cache_ptr = 0; | |
52b1e665 | 333 | copy_mm_to_paca(mm); |
1da177e4 LT |
334 | |
335 | /* | |
336 | * preload some userspace segments into the SLB. | |
de4376c2 AB |
337 | * Almost all 32 and 64bit PowerPC executables are linked at |
338 | * 0x10000000 so it makes sense to preload this segment. | |
1da177e4 | 339 | */ |
de4376c2 | 340 | exec_base = 0x10000000; |
1da177e4 | 341 | |
5eb9bac0 | 342 | if (is_kernel_addr(pc) || is_kernel_addr(stack) || |
de4376c2 | 343 | is_kernel_addr(exec_base)) |
1da177e4 LT |
344 | return; |
345 | ||
5eb9bac0 | 346 | slb_allocate(pc); |
1da177e4 | 347 | |
5eb9bac0 AB |
348 | if (!esids_match(pc, stack)) |
349 | slb_allocate(stack); | |
1da177e4 | 350 | |
de4376c2 AB |
351 | if (!esids_match(pc, exec_base) && |
352 | !esids_match(stack, exec_base)) | |
353 | slb_allocate(exec_base); | |
1da177e4 LT |
354 | } |
355 | ||
3c726f8d BH |
356 | static inline void patch_slb_encoding(unsigned int *insn_addr, |
357 | unsigned int immed) | |
358 | { | |
79d0be74 AK |
359 | |
360 | /* | |
361 | * This function patches either an li or a cmpldi instruction with | |
362 | * a new immediate value. This relies on the fact that both li | |
363 | * (which is actually addi) and cmpldi both take a 16-bit immediate | |
364 | * value, and it is situated in the same location in the instruction, | |
365 | * ie. bits 16-31 (Big endian bit order) or the lower 16 bits. | |
366 | * The signedness of the immediate operand differs between the two | |
367 | * instructions however this code is only ever patching a small value, | |
368 | * much less than 1 << 15, so we can get away with it. | |
369 | * To patch the value we read the existing instruction, clear the | |
370 | * immediate value, and or in our new value, then write the instruction | |
371 | * back. | |
372 | */ | |
373 | unsigned int insn = (*insn_addr & 0xffff0000) | immed; | |
b68a70c4 | 374 | patch_instruction(insn_addr, insn); |
3c726f8d BH |
375 | } |
376 | ||
b86206e4 AB |
377 | extern u32 slb_miss_kernel_load_linear[]; |
378 | extern u32 slb_miss_kernel_load_io[]; | |
379 | extern u32 slb_compare_rr_to_size[]; | |
380 | extern u32 slb_miss_kernel_load_vmemmap[]; | |
381 | ||
46db2f86 BK |
382 | void slb_set_size(u16 size) |
383 | { | |
46db2f86 BK |
384 | if (mmu_slb_size == size) |
385 | return; | |
386 | ||
387 | mmu_slb_size = size; | |
388 | patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size); | |
389 | } | |
390 | ||
1da177e4 LT |
391 | void slb_initialize(void) |
392 | { | |
bf72aeba | 393 | unsigned long linear_llp, vmalloc_llp, io_llp; |
85376e2a | 394 | unsigned long lflags; |
3c726f8d | 395 | static int slb_encoding_inited; |
cec08e7a | 396 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
cec08e7a BH |
397 | unsigned long vmemmap_llp; |
398 | #endif | |
3c726f8d BH |
399 | |
400 | /* Prepare our SLB miss handler based on our page size */ | |
401 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
bf72aeba PM |
402 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
403 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
404 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | |
cec08e7a BH |
405 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
406 | vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; | |
407 | #endif | |
3c726f8d BH |
408 | if (!slb_encoding_inited) { |
409 | slb_encoding_inited = 1; | |
410 | patch_slb_encoding(slb_miss_kernel_load_linear, | |
411 | SLB_VSID_KERNEL | linear_llp); | |
bf72aeba PM |
412 | patch_slb_encoding(slb_miss_kernel_load_io, |
413 | SLB_VSID_KERNEL | io_llp); | |
584f8b71 MN |
414 | patch_slb_encoding(slb_compare_rr_to_size, |
415 | mmu_slb_size); | |
3c726f8d | 416 | |
651e2dd2 ME |
417 | pr_devel("SLB: linear LLP = %04lx\n", linear_llp); |
418 | pr_devel("SLB: io LLP = %04lx\n", io_llp); | |
cec08e7a BH |
419 | |
420 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
421 | patch_slb_encoding(slb_miss_kernel_load_vmemmap, | |
422 | SLB_VSID_KERNEL | vmemmap_llp); | |
651e2dd2 | 423 | pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); |
cec08e7a | 424 | #endif |
3c726f8d BH |
425 | } |
426 | ||
09b4438d | 427 | get_paca()->stab_rr = SLB_NUM_BOLTED - 1; |
56291e19 | 428 | |
3c726f8d | 429 | lflags = SLB_VSID_KERNEL | linear_llp; |
1da177e4 | 430 | |
2be682af | 431 | /* Invalidate the entire SLB (even entry 0) & all the ERATS */ |
175587cc PM |
432 | asm volatile("isync":::"memory"); |
433 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | |
434 | asm volatile("isync; slbia; isync":::"memory"); | |
1d15010c | 435 | create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); |
175587cc | 436 | |
3b575064 PM |
437 | /* For the boot cpu, we're running on the stack in init_thread_union, |
438 | * which is in the first segment of the linear mapping, and also | |
439 | * get_paca()->kstack hasn't been initialized yet. | |
440 | * For secondary cpus, we need to bolt the kernel stack entry now. | |
441 | */ | |
1d15010c | 442 | slb_shadow_clear(KSTACK_INDEX); |
3b575064 PM |
443 | if (raw_smp_processor_id() != boot_cpuid && |
444 | (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) | |
445 | create_shadowed_slbe(get_paca()->kstack, | |
1d15010c | 446 | mmu_kernel_ssize, lflags, KSTACK_INDEX); |
dfbe0d3b | 447 | |
175587cc | 448 | asm volatile("isync":::"memory"); |
1da177e4 | 449 | } |
f384796c AK |
450 | |
451 | static void insert_slb_entry(unsigned long vsid, unsigned long ea, | |
452 | int bpsize, int ssize) | |
453 | { | |
454 | unsigned long flags, vsid_data, esid_data; | |
455 | enum slb_index index; | |
456 | int slb_cache_index; | |
457 | ||
458 | /* | |
459 | * We are irq disabled, hence should be safe to access PACA. | |
460 | */ | |
a5db5060 AK |
461 | VM_WARN_ON(!irqs_disabled()); |
462 | ||
463 | /* | |
464 | * We can't take a PMU exception in the following code, so hard | |
465 | * disable interrupts. | |
466 | */ | |
467 | hard_irq_disable(); | |
468 | ||
f384796c AK |
469 | index = get_paca()->stab_rr; |
470 | ||
471 | /* | |
472 | * simple round-robin replacement of slb starting at SLB_NUM_BOLTED. | |
473 | */ | |
474 | if (index < (mmu_slb_size - 1)) | |
475 | index++; | |
476 | else | |
477 | index = SLB_NUM_BOLTED; | |
478 | ||
479 | get_paca()->stab_rr = index; | |
480 | ||
481 | flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; | |
482 | vsid_data = (vsid << slb_vsid_shift(ssize)) | flags | | |
483 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); | |
484 | esid_data = mk_esid_data(ea, ssize, index); | |
485 | ||
a5db5060 AK |
486 | /* |
487 | * No need for an isync before or after this slbmte. The exception | |
488 | * we enter with and the rfid we exit with are context synchronizing. | |
489 | * Also we only handle user segments here. | |
490 | */ | |
f384796c AK |
491 | asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data) |
492 | : "memory"); | |
493 | ||
494 | /* | |
495 | * Now update slb cache entries | |
496 | */ | |
497 | slb_cache_index = get_paca()->slb_cache_ptr; | |
498 | if (slb_cache_index < SLB_CACHE_ENTRIES) { | |
499 | /* | |
500 | * We have space in slb cache for optimized switch_slb(). | |
501 | * Top 36 bits from esid_data as per ISA | |
502 | */ | |
503 | get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28; | |
504 | get_paca()->slb_cache_ptr++; | |
505 | } else { | |
506 | /* | |
507 | * Our cache is full and the current cache content strictly | |
508 | * doesn't indicate the active SLB conents. Bump the ptr | |
509 | * so that switch_slb() will ignore the cache. | |
510 | */ | |
511 | get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; | |
512 | } | |
513 | } | |
514 | ||
515 | static void handle_multi_context_slb_miss(int context_id, unsigned long ea) | |
516 | { | |
517 | struct mm_struct *mm = current->mm; | |
518 | unsigned long vsid; | |
519 | int bpsize; | |
520 | ||
521 | /* | |
522 | * We are always above 1TB, hence use high user segment size. | |
523 | */ | |
524 | vsid = get_vsid(context_id, ea, mmu_highuser_ssize); | |
525 | bpsize = get_slice_psize(mm, ea); | |
526 | insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize); | |
527 | } | |
528 | ||
529 | void slb_miss_large_addr(struct pt_regs *regs) | |
530 | { | |
531 | enum ctx_state prev_state = exception_enter(); | |
532 | unsigned long ea = regs->dar; | |
533 | int context; | |
534 | ||
535 | if (REGION_ID(ea) != USER_REGION_ID) | |
536 | goto slb_bad_addr; | |
537 | ||
538 | /* | |
539 | * Are we beyound what the page table layout supports ? | |
540 | */ | |
541 | if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) | |
542 | goto slb_bad_addr; | |
543 | ||
544 | /* Lower address should have been handled by asm code */ | |
545 | if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT)) | |
546 | goto slb_bad_addr; | |
547 | ||
548 | /* | |
549 | * consider this as bad access if we take a SLB miss | |
550 | * on an address above addr limit. | |
551 | */ | |
552 | if (ea >= current->mm->context.slb_addr_limit) | |
553 | goto slb_bad_addr; | |
554 | ||
555 | context = get_ea_context(¤t->mm->context, ea); | |
556 | if (!context) | |
557 | goto slb_bad_addr; | |
558 | ||
559 | handle_multi_context_slb_miss(context, ea); | |
560 | exception_exit(prev_state); | |
561 | return; | |
562 | ||
563 | slb_bad_addr: | |
564 | if (user_mode(regs)) | |
565 | _exception(SIGSEGV, regs, SEGV_BNDERR, ea); | |
566 | else | |
567 | bad_page_fault(regs, ea, SIGSEGV); | |
568 | exception_exit(prev_state); | |
569 | } |