]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Low-level SLB routines |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5 | * | |
6 | * Based on earlier C version: | |
7 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com | |
8 | * Copyright (c) 2001 Dave Engebretsen | |
9 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
1da177e4 | 17 | #include <asm/processor.h> |
1da177e4 | 18 | #include <asm/ppc_asm.h> |
0013a854 | 19 | #include <asm/asm-offsets.h> |
1da177e4 | 20 | #include <asm/cputable.h> |
3c726f8d BH |
21 | #include <asm/page.h> |
22 | #include <asm/mmu.h> | |
23 | #include <asm/pgtable.h> | |
3f639ee8 | 24 | #include <asm/firmware.h> |
1da177e4 | 25 | |
3c726f8d | 26 | /* void slb_allocate_realmode(unsigned long ea); |
1da177e4 LT |
27 | * |
28 | * Create an SLB entry for the given EA (user or kernel). | |
29 | * r3 = faulting address, r13 = PACA | |
30 | * r9, r10, r11 are clobbered by this function | |
31 | * No other registers are examined or changed. | |
32 | */ | |
3c726f8d BH |
33 | _GLOBAL(slb_allocate_realmode) |
34 | /* r3 = faulting address */ | |
1da177e4 LT |
35 | |
36 | srdi r9,r3,60 /* get region */ | |
3c726f8d | 37 | srdi r10,r3,28 /* get esid */ |
b5666f70 | 38 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
1da177e4 | 39 | |
b5666f70 | 40 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
1da177e4 LT |
41 | blt cr7,0f /* user or kernel? */ |
42 | ||
43 | /* kernel address: proto-VSID = ESID */ | |
44 | /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but | |
45 | * this code will generate the protoVSID 0xfffffffff for the | |
46 | * top segment. That's ok, the scramble below will translate | |
47 | * it to VSID 0, which is reserved as a bad VSID - one which | |
48 | * will never have any pages in it. */ | |
1da177e4 | 49 | |
cec08e7a | 50 | /* Check if hitting the linear mapping or some other kernel space |
3c726f8d BH |
51 | */ |
52 | bne cr7,1f | |
53 | ||
54 | /* Linear mapping encoding bits, the "li" instruction below will | |
55 | * be patched by the kernel at boot | |
56 | */ | |
57 | _GLOBAL(slb_miss_kernel_load_linear) | |
58 | li r11,0 | |
1189be65 | 59 | BEGIN_FTR_SECTION |
3c726f8d | 60 | b slb_finish_load |
1189be65 PM |
61 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) |
62 | b slb_finish_load_1T | |
3c726f8d | 63 | |
cec08e7a BH |
64 | 1: |
65 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
66 | /* Check virtual memmap region. To be patches at kernel boot */ | |
67 | cmpldi cr0,r9,0xf | |
68 | bne 1f | |
69 | _GLOBAL(slb_miss_kernel_load_vmemmap) | |
70 | li r11,0 | |
71 | b 6f | |
72 | 1: | |
73 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | |
74 | ||
8d8997f3 BH |
75 | /* vmalloc mapping gets the encoding from the PACA as the mapping |
76 | * can be demoted from 64K -> 4K dynamically on some machines | |
3c726f8d | 77 | */ |
bf72aeba PM |
78 | clrldi r11,r10,48 |
79 | cmpldi r11,(VMALLOC_SIZE >> 28) - 1 | |
80 | bgt 5f | |
81 | lhz r11,PACAVMALLOCSLLP(r13) | |
1189be65 | 82 | b 6f |
bf72aeba | 83 | 5: |
8d8997f3 BH |
84 | /* IO mapping */ |
85 | _GLOBAL(slb_miss_kernel_load_io) | |
3c726f8d | 86 | li r11,0 |
1189be65 PM |
87 | 6: |
88 | BEGIN_FTR_SECTION | |
3c726f8d | 89 | b slb_finish_load |
1189be65 PM |
90 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) |
91 | b slb_finish_load_1T | |
3c726f8d BH |
92 | |
93 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | |
94 | * if the address is within the boundaries of the user region | |
95 | */ | |
96 | srdi. r9,r10,USER_ESID_BITS | |
1da177e4 LT |
97 | bne- 8f /* invalid ea bits set */ |
98 | ||
d0f13e3c BH |
99 | |
100 | /* when using slices, we extract the psize off the slice bitmaps | |
101 | * and then we need to get the sllp encoding off the mmu_psize_defs | |
102 | * array. | |
103 | * | |
104 | * XXX This is a bit inefficient especially for the normal case, | |
105 | * so we should try to implement a fast path for the standard page | |
106 | * size using the old sllp value so we avoid the array. We cannot | |
107 | * really do dynamic patching unfortunately as processes might flip | |
108 | * between 4k and 64k standard page size | |
109 | */ | |
110 | #ifdef CONFIG_PPC_MM_SLICES | |
7d24f0b8 DG |
111 | cmpldi r10,16 |
112 | ||
d0f13e3c BH |
113 | /* Get the slice index * 4 in r11 and matching slice size mask in r9 */ |
114 | ld r9,PACALOWSLICESPSIZE(r13) | |
115 | sldi r11,r10,2 | |
7d24f0b8 | 116 | blt 5f |
d0f13e3c BH |
117 | ld r9,PACAHIGHSLICEPSIZE(r13) |
118 | srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) | |
119 | andi. r11,r11,0x3c | |
7d24f0b8 | 120 | |
d0f13e3c BH |
121 | 5: /* Extract the psize and multiply to get an array offset */ |
122 | srd r9,r9,r11 | |
123 | andi. r9,r9,0xf | |
124 | mulli r9,r9,MMUPSIZEDEFSIZE | |
c594adad | 125 | |
d0f13e3c BH |
126 | /* Now get to the array and obtain the sllp |
127 | */ | |
128 | ld r11,PACATOC(r13) | |
129 | ld r11,mmu_psize_defs@got(r11) | |
130 | add r11,r11,r9 | |
131 | ld r11,MMUPSIZESLLP(r11) | |
132 | ori r11,r11,SLB_VSID_USER | |
133 | #else | |
134 | /* paca context sllp already contains the SLB_VSID_USER bits */ | |
bf72aeba | 135 | lhz r11,PACACONTEXTSLLP(r13) |
d0f13e3c BH |
136 | #endif /* CONFIG_PPC_MM_SLICES */ |
137 | ||
3c726f8d | 138 | ld r9,PACACONTEXTID(r13) |
1189be65 PM |
139 | BEGIN_FTR_SECTION |
140 | cmpldi r10,0x1000 | |
141 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | |
3c726f8d | 142 | rldimi r10,r9,USER_ESID_BITS,0 |
1189be65 PM |
143 | BEGIN_FTR_SECTION |
144 | bge slb_finish_load_1T | |
145 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | |
3c726f8d BH |
146 | b slb_finish_load |
147 | ||
148 | 8: /* invalid EA */ | |
149 | li r10,0 /* BAD_VSID */ | |
150 | li r11,SLB_VSID_USER /* flags don't much matter */ | |
151 | b slb_finish_load | |
152 | ||
153 | #ifdef __DISABLED__ | |
154 | ||
155 | /* void slb_allocate_user(unsigned long ea); | |
156 | * | |
157 | * Create an SLB entry for the given EA (user or kernel). | |
158 | * r3 = faulting address, r13 = PACA | |
159 | * r9, r10, r11 are clobbered by this function | |
160 | * No other registers are examined or changed. | |
161 | * | |
162 | * It is called with translation enabled in order to be able to walk the | |
163 | * page tables. This is not currently used. | |
164 | */ | |
165 | _GLOBAL(slb_allocate_user) | |
166 | /* r3 = faulting address */ | |
167 | srdi r10,r3,28 /* get esid */ | |
168 | ||
169 | crset 4*cr7+lt /* set "user" flag for later */ | |
170 | ||
171 | /* check if we fit in the range covered by the pagetables*/ | |
172 | srdi. r9,r3,PGTABLE_EADDR_SIZE | |
173 | crnot 4*cr0+eq,4*cr0+eq | |
174 | beqlr | |
1da177e4 | 175 | |
3c726f8d BH |
176 | /* now we need to get to the page tables in order to get the page |
177 | * size encoding from the PMD. In the future, we'll be able to deal | |
178 | * with 1T segments too by getting the encoding from the PGD instead | |
179 | */ | |
180 | ld r9,PACAPGDIR(r13) | |
181 | cmpldi cr0,r9,0 | |
182 | beqlr | |
183 | rlwinm r11,r10,8,25,28 | |
184 | ldx r9,r9,r11 /* get pgd_t */ | |
185 | cmpldi cr0,r9,0 | |
186 | beqlr | |
187 | rlwinm r11,r10,3,17,28 | |
188 | ldx r9,r9,r11 /* get pmd_t */ | |
189 | cmpldi cr0,r9,0 | |
190 | beqlr | |
191 | ||
192 | /* build vsid flags */ | |
193 | andi. r11,r9,SLB_VSID_LLP | |
194 | ori r11,r11,SLB_VSID_USER | |
195 | ||
196 | /* get context to calculate proto-VSID */ | |
319e76a1 | 197 | ld r9,PACACONTEXTID(r13) |
3c726f8d BH |
198 | rldimi r10,r9,USER_ESID_BITS,0 |
199 | ||
200 | /* fall through slb_finish_load */ | |
201 | ||
202 | #endif /* __DISABLED__ */ | |
1da177e4 | 203 | |
1da177e4 | 204 | |
3c726f8d BH |
205 | /* |
206 | * Finish loading of an SLB entry and return | |
207 | * | |
b5666f70 | 208 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
3c726f8d BH |
209 | */ |
210 | slb_finish_load: | |
1189be65 | 211 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
3c726f8d BH |
212 | rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ |
213 | ||
214 | /* r3 = EA, r11 = VSID data */ | |
215 | /* | |
216 | * Find a slot, round robin. Previously we tried to find a | |
217 | * free slot first but that took too long. Unfortunately we | |
218 | * dont have any LRU information to help us choose a slot. | |
219 | */ | |
220 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 221 | BEGIN_FW_FTR_SECTION |
3c726f8d BH |
222 | /* |
223 | * On iSeries, the "bolted" stack segment can be cast out on | |
224 | * shared processor switch so we need to check for a miss on | |
225 | * it and restore it to the right slot. | |
226 | */ | |
227 | ld r9,PACAKSAVE(r13) | |
228 | clrrdi r9,r9,28 | |
229 | clrrdi r3,r3,28 | |
230 | li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */ | |
231 | cmpld r9,r3 | |
232 | beq 3f | |
3f639ee8 | 233 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
234 | #endif /* CONFIG_PPC_ISERIES */ |
235 | ||
1189be65 | 236 | 7: ld r10,PACASTABRR(r13) |
3c726f8d | 237 | addi r10,r10,1 |
584f8b71 MN |
238 | /* This gets soft patched on boot. */ |
239 | _GLOBAL(slb_compare_rr_to_size) | |
240 | cmpldi r10,0 | |
3c726f8d BH |
241 | |
242 | blt+ 4f | |
243 | li r10,SLB_NUM_BOLTED | |
244 | ||
245 | 4: | |
246 | std r10,PACASTABRR(r13) | |
247 | ||
248 | 3: | |
249 | rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */ | |
250 | oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */ | |
251 | ||
252 | /* r3 = ESID data, r11 = VSID data */ | |
1da177e4 LT |
253 | |
254 | /* | |
255 | * No need for an isync before or after this slbmte. The exception | |
256 | * we enter with and the rfid we exit with are context synchronizing. | |
257 | */ | |
258 | slbmte r11,r10 | |
259 | ||
3c726f8d BH |
260 | /* we're done for kernel addresses */ |
261 | crclr 4*cr0+eq /* set result to "success" */ | |
262 | bgelr cr7 | |
1da177e4 LT |
263 | |
264 | /* Update the slb cache */ | |
265 | lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ | |
266 | cmpldi r3,SLB_CACHE_ENTRIES | |
267 | bge 1f | |
268 | ||
269 | /* still room in the slb cache */ | |
270 | sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ | |
271 | rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ | |
272 | add r11,r11,r13 /* r11 = (u16 *)paca + offset */ | |
273 | sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ | |
274 | addi r3,r3,1 /* offset++ */ | |
275 | b 2f | |
276 | 1: /* offset >= SLB_CACHE_ENTRIES */ | |
277 | li r3,SLB_CACHE_ENTRIES+1 | |
278 | 2: | |
279 | sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ | |
3c726f8d | 280 | crclr 4*cr0+eq /* set result to "success" */ |
1da177e4 LT |
281 | blr |
282 | ||
1189be65 PM |
283 | /* |
284 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | |
285 | * We assume legacy iSeries will never have 1T segments. | |
286 | * | |
287 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | |
288 | */ | |
289 | slb_finish_load_1T: | |
290 | srdi r10,r10,40-28 /* get 1T ESID */ | |
291 | ASM_VSID_SCRAMBLE(r10,r9,1T) | |
292 | rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ | |
293 | li r10,MMU_SEGSIZE_1T | |
294 | rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ | |
295 | ||
296 | /* r3 = EA, r11 = VSID data */ | |
297 | clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ | |
298 | b 7b | |
299 |