]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ |
3 | * | |
4 | * PowerPC version | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
8 | * Adapted for Power Macintosh by Paul Mackerras. | |
9 | * Low-level exception handlers and MMU support | |
10 | * rewritten by Paul Mackerras. | |
11 | * Copyright (C) 1996 Paul Mackerras. | |
12 | * | |
13 | * This file contains low-level assembler routines for managing | |
14 | * the PowerPC MMU hash table. (PPC 8xx processors don't use a | |
15 | * hash table, so this file is not used on them.) | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or | |
18 | * modify it under the terms of the GNU General Public License | |
19 | * as published by the Free Software Foundation; either version | |
20 | * 2 of the License, or (at your option) any later version. | |
21 | * | |
22 | */ | |
23 | ||
1da177e4 LT |
24 | #include <asm/processor.h> |
25 | #include <asm/page.h> | |
26 | #include <asm/pgtable.h> | |
27 | #include <asm/cputable.h> | |
28 | #include <asm/ppc_asm.h> | |
29 | #include <asm/thread_info.h> | |
0013a854 | 30 | #include <asm/asm-offsets.h> |
1da177e4 LT |
31 | |
32 | #ifdef CONFIG_SMP | |
9c05e63e KG |
33 | .section .bss |
34 | .align 2 | |
35 | .globl mmu_hash_lock | |
36 | mmu_hash_lock: | |
37 | .space 4 | |
1da177e4 LT |
38 | #endif /* CONFIG_SMP */ |
39 | ||
40 | /* | |
41 | * Sync CPUs with hash_page taking & releasing the hash | |
42 | * table lock | |
43 | */ | |
44 | #ifdef CONFIG_SMP | |
45 | .text | |
46 | _GLOBAL(hash_page_sync) | |
47 | lis r8,mmu_hash_lock@h | |
48 | ori r8,r8,mmu_hash_lock@l | |
49 | lis r0,0x0fff | |
50 | b 10f | |
51 | 11: lwz r6,0(r8) | |
52 | cmpwi 0,r6,0 | |
53 | bne 11b | |
54 | 10: lwarx r6,0,r8 | |
55 | cmpwi 0,r6,0 | |
56 | bne- 11b | |
57 | stwcx. r0,0,r8 | |
58 | bne- 10b | |
59 | isync | |
60 | eieio | |
61 | li r0,0 | |
62 | stw r0,0(r8) | |
63 | blr | |
64 | #endif | |
65 | ||
66 | /* | |
67 | * Load a PTE into the hash table, if possible. | |
68 | * The address is in r4, and r3 contains an access flag: | |
69 | * _PAGE_RW (0x400) if a write. | |
70 | * r9 contains the SRR1 value, from which we use the MSR_PR bit. | |
71 | * SPRG3 contains the physical address of the current task's thread. | |
72 | * | |
73 | * Returns to the caller if the access is illegal or there is no | |
74 | * mapping for the address. Otherwise it places an appropriate PTE | |
75 | * in the hash table and returns from the exception. | |
76 | * Uses r0, r3 - r8, ctr, lr. | |
77 | */ | |
78 | .text | |
79 | _GLOBAL(hash_page) | |
1da177e4 LT |
80 | tophys(r7,0) /* gets -KERNELBASE into r7 */ |
81 | #ifdef CONFIG_SMP | |
82 | addis r8,r7,mmu_hash_lock@h | |
83 | ori r8,r8,mmu_hash_lock@l | |
84 | lis r0,0x0fff | |
85 | b 10f | |
86 | 11: lwz r6,0(r8) | |
87 | cmpwi 0,r6,0 | |
88 | bne 11b | |
89 | 10: lwarx r6,0,r8 | |
90 | cmpwi 0,r6,0 | |
91 | bne- 11b | |
92 | stwcx. r0,0,r8 | |
93 | bne- 10b | |
94 | isync | |
95 | #endif | |
96 | /* Get PTE (linux-style) and check access */ | |
97 | lis r0,KERNELBASE@h /* check if kernel address */ | |
98 | cmplw 0,r4,r0 | |
99 | mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */ | |
100 | ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ | |
101 | lwz r5,PGDIR(r8) /* virt page-table root */ | |
102 | blt+ 112f /* assume user more likely */ | |
103 | lis r5,swapper_pg_dir@ha /* if kernel address, use */ | |
104 | addi r5,r5,swapper_pg_dir@l /* kernel page table */ | |
105 | rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ | |
106 | 112: add r5,r5,r7 /* convert to phys addr */ | |
107 | rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ | |
108 | lwz r8,0(r5) /* get pmd entry */ | |
109 | rlwinm. r8,r8,0,0,19 /* extract address of pte page */ | |
110 | #ifdef CONFIG_SMP | |
111 | beq- hash_page_out /* return if no mapping */ | |
112 | #else | |
113 | /* XXX it seems like the 601 will give a machine fault on the | |
114 | rfi if its alignment is wrong (bottom 4 bits of address are | |
115 | 8 or 0xc) and we have had a not-taken conditional branch | |
116 | to the address following the rfi. */ | |
117 | beqlr- | |
118 | #endif | |
119 | rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ | |
120 | rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ | |
121 | ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE | |
122 | ||
123 | /* | |
124 | * Update the linux PTE atomically. We do the lwarx up-front | |
125 | * because almost always, there won't be a permission violation | |
126 | * and there won't already be an HPTE, and thus we will have | |
127 | * to update the PTE to set _PAGE_HASHPTE. -- paulus. | |
128 | */ | |
129 | retry: | |
130 | lwarx r6,0,r8 /* get linux-style pte */ | |
131 | andc. r5,r3,r6 /* check access & ~permission */ | |
132 | #ifdef CONFIG_SMP | |
133 | bne- hash_page_out /* return if access not permitted */ | |
134 | #else | |
135 | bnelr- | |
136 | #endif | |
137 | or r5,r0,r6 /* set accessed/dirty bits */ | |
138 | stwcx. r5,0,r8 /* attempt to update PTE */ | |
139 | bne- retry /* retry if someone got there first */ | |
140 | ||
141 | mfsrin r3,r4 /* get segment reg for segment */ | |
142 | mfctr r0 | |
143 | stw r0,_CTR(r11) | |
144 | bl create_hpte /* add the hash table entry */ | |
145 | ||
146 | /* | |
147 | * htab_reloads counts the number of times we have to fault an | |
148 | * HPTE into the hash table. This should only happen after a | |
149 | * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap. | |
150 | * Where a page is faulted into a process's address space, | |
151 | * update_mmu_cache gets called to put the HPTE into the hash table | |
152 | * and those are counted as preloads rather than reloads. | |
153 | */ | |
154 | addis r8,r7,htab_reloads@ha | |
155 | lwz r3,htab_reloads@l(r8) | |
156 | addi r3,r3,1 | |
157 | stw r3,htab_reloads@l(r8) | |
158 | ||
159 | #ifdef CONFIG_SMP | |
160 | eieio | |
161 | addis r8,r7,mmu_hash_lock@ha | |
162 | li r0,0 | |
163 | stw r0,mmu_hash_lock@l(r8) | |
164 | #endif | |
165 | ||
166 | /* Return from the exception */ | |
167 | lwz r5,_CTR(r11) | |
168 | mtctr r5 | |
169 | lwz r0,GPR0(r11) | |
170 | lwz r7,GPR7(r11) | |
171 | lwz r8,GPR8(r11) | |
172 | b fast_exception_return | |
173 | ||
174 | #ifdef CONFIG_SMP | |
175 | hash_page_out: | |
176 | eieio | |
177 | addis r8,r7,mmu_hash_lock@ha | |
178 | li r0,0 | |
179 | stw r0,mmu_hash_lock@l(r8) | |
180 | blr | |
181 | #endif /* CONFIG_SMP */ | |
182 | ||
183 | /* | |
184 | * Add an entry for a particular page to the hash table. | |
185 | * | |
186 | * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval) | |
187 | * | |
188 | * We assume any necessary modifications to the pte (e.g. setting | |
189 | * the accessed bit) have already been done and that there is actually | |
190 | * a hash table in use (i.e. we're not on a 603). | |
191 | */ | |
192 | _GLOBAL(add_hash_page) | |
193 | mflr r0 | |
194 | stw r0,4(r1) | |
195 | ||
196 | /* Convert context and va to VSID */ | |
197 | mulli r3,r3,897*16 /* multiply context by context skew */ | |
198 | rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ | |
199 | mulli r0,r0,0x111 /* multiply by ESID skew */ | |
200 | add r3,r3,r0 /* note create_hpte trims to 24 bits */ | |
201 | ||
202 | #ifdef CONFIG_SMP | |
203 | rlwinm r8,r1,0,0,18 /* use cpu number to make tag */ | |
204 | lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ | |
205 | oris r8,r8,12 | |
206 | #endif /* CONFIG_SMP */ | |
207 | ||
208 | /* | |
209 | * We disable interrupts here, even on UP, because we don't | |
210 | * want to race with hash_page, and because we want the | |
211 | * _PAGE_HASHPTE bit to be a reliable indication of whether | |
212 | * the HPTE exists (or at least whether one did once). | |
213 | * We also turn off the MMU for data accesses so that we | |
214 | * we can't take a hash table miss (assuming the code is | |
215 | * covered by a BAT). -- paulus | |
216 | */ | |
217 | mfmsr r10 | |
218 | SYNC | |
219 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
220 | rlwinm r0,r0,0,28,26 /* clear MSR_DR */ | |
221 | mtmsr r0 | |
222 | SYNC_601 | |
223 | isync | |
224 | ||
225 | tophys(r7,0) | |
226 | ||
227 | #ifdef CONFIG_SMP | |
228 | addis r9,r7,mmu_hash_lock@ha | |
229 | addi r9,r9,mmu_hash_lock@l | |
230 | 10: lwarx r0,0,r9 /* take the mmu_hash_lock */ | |
231 | cmpi 0,r0,0 | |
232 | bne- 11f | |
233 | stwcx. r8,0,r9 | |
234 | beq+ 12f | |
235 | 11: lwz r0,0(r9) | |
236 | cmpi 0,r0,0 | |
237 | beq 10b | |
238 | b 11b | |
239 | 12: isync | |
240 | #endif | |
241 | ||
242 | /* | |
243 | * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. | |
244 | * If _PAGE_HASHPTE was already set, we don't replace the existing | |
245 | * HPTE, so we just unlock and return. | |
246 | */ | |
247 | mr r8,r5 | |
248 | rlwimi r8,r4,22,20,29 | |
249 | 1: lwarx r6,0,r8 | |
250 | andi. r0,r6,_PAGE_HASHPTE | |
251 | bne 9f /* if HASHPTE already set, done */ | |
252 | ori r5,r6,_PAGE_HASHPTE | |
253 | stwcx. r5,0,r8 | |
254 | bne- 1b | |
255 | ||
256 | bl create_hpte | |
257 | ||
258 | addis r8,r7,htab_preloads@ha | |
259 | lwz r3,htab_preloads@l(r8) | |
260 | addi r3,r3,1 | |
261 | stw r3,htab_preloads@l(r8) | |
262 | ||
263 | 9: | |
264 | #ifdef CONFIG_SMP | |
265 | eieio | |
266 | li r0,0 | |
267 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
268 | #endif | |
269 | ||
270 | /* reenable interrupts and DR */ | |
271 | mtmsr r10 | |
272 | SYNC_601 | |
273 | isync | |
274 | ||
275 | lwz r0,4(r1) | |
276 | mtlr r0 | |
277 | blr | |
278 | ||
279 | /* | |
280 | * This routine adds a hardware PTE to the hash table. | |
281 | * It is designed to be called with the MMU either on or off. | |
282 | * r3 contains the VSID, r4 contains the virtual address, | |
283 | * r5 contains the linux PTE, r6 contains the old value of the | |
284 | * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the | |
285 | * offset to be added to addresses (0 if the MMU is on, | |
286 | * -KERNELBASE if it is off). | |
287 | * On SMP, the caller should have the mmu_hash_lock held. | |
288 | * We assume that the caller has (or will) set the _PAGE_HASHPTE | |
289 | * bit in the linux PTE in memory. The value passed in r6 should | |
290 | * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set | |
291 | * this routine will skip the search for an existing HPTE. | |
292 | * This procedure modifies r0, r3 - r6, r8, cr0. | |
293 | * -- paulus. | |
294 | * | |
295 | * For speed, 4 of the instructions get patched once the size and | |
296 | * physical address of the hash table are known. These definitions | |
297 | * of Hash_base and Hash_bits below are just an example. | |
298 | */ | |
299 | Hash_base = 0xc0180000 | |
300 | Hash_bits = 12 /* e.g. 256kB hash table */ | |
301 | Hash_msk = (((1 << Hash_bits) - 1) * 64) | |
302 | ||
1da177e4 LT |
303 | /* defines for the PTE format for 32-bit PPCs */ |
304 | #define PTE_SIZE 8 | |
305 | #define PTEG_SIZE 64 | |
306 | #define LG_PTEG_SIZE 6 | |
307 | #define LDPTEu lwzu | |
308 | #define STPTE stw | |
309 | #define CMPPTE cmpw | |
310 | #define PTE_H 0x40 | |
311 | #define PTE_V 0x80000000 | |
312 | #define TST_V(r) rlwinm. r,r,0,0,0 | |
313 | #define SET_V(r) oris r,r,PTE_V@h | |
314 | #define CLR_V(r,t) rlwinm r,r,0,1,31 | |
315 | ||
1da177e4 LT |
316 | #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) |
317 | #define HASH_RIGHT 31-LG_PTEG_SIZE | |
318 | ||
319 | _GLOBAL(create_hpte) | |
320 | /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ | |
321 | rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */ | |
322 | rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ | |
323 | and r8,r8,r0 /* writable if _RW & _DIRTY */ | |
324 | rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ | |
325 | rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ | |
326 | ori r8,r8,0xe14 /* clear out reserved bits and M */ | |
327 | andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ | |
328 | BEGIN_FTR_SECTION | |
329 | ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ | |
330 | END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) | |
331 | ||
332 | /* Construct the high word of the PPC-style PTE (r5) */ | |
1da177e4 LT |
333 | rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ |
334 | rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ | |
1da177e4 LT |
335 | SET_V(r5) /* set V (valid) bit */ |
336 | ||
337 | /* Get the address of the primary PTE group in the hash table (r3) */ | |
338 | _GLOBAL(hash_page_patch_A) | |
339 | addis r0,r7,Hash_base@h /* base address of hash table */ | |
340 | rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ | |
341 | rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ | |
342 | xor r3,r3,r0 /* make primary hash */ | |
343 | li r0,8 /* PTEs/group */ | |
344 | ||
345 | /* | |
346 | * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search | |
347 | * if it is clear, meaning that the HPTE isn't there already... | |
348 | */ | |
349 | andi. r6,r6,_PAGE_HASHPTE | |
350 | beq+ 10f /* no PTE: go look for an empty slot */ | |
351 | tlbie r4 | |
352 | ||
353 | addis r4,r7,htab_hash_searches@ha | |
354 | lwz r6,htab_hash_searches@l(r4) | |
355 | addi r6,r6,1 /* count how many searches we do */ | |
356 | stw r6,htab_hash_searches@l(r4) | |
357 | ||
358 | /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ | |
359 | mtctr r0 | |
360 | addi r4,r3,-PTE_SIZE | |
361 | 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ | |
362 | CMPPTE 0,r6,r5 | |
363 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | |
364 | beq+ found_slot | |
365 | ||
366 | /* Search the secondary PTEG for a matching PTE */ | |
367 | ori r5,r5,PTE_H /* set H (secondary hash) bit */ | |
368 | _GLOBAL(hash_page_patch_B) | |
369 | xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ | |
370 | xori r4,r4,(-PTEG_SIZE & 0xffff) | |
371 | addi r4,r4,-PTE_SIZE | |
372 | mtctr r0 | |
373 | 2: LDPTEu r6,PTE_SIZE(r4) | |
374 | CMPPTE 0,r6,r5 | |
375 | bdnzf 2,2b | |
376 | beq+ found_slot | |
377 | xori r5,r5,PTE_H /* clear H bit again */ | |
378 | ||
379 | /* Search the primary PTEG for an empty slot */ | |
380 | 10: mtctr r0 | |
381 | addi r4,r3,-PTE_SIZE /* search primary PTEG */ | |
382 | 1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ | |
383 | TST_V(r6) /* test valid bit */ | |
384 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | |
385 | beq+ found_empty | |
386 | ||
387 | /* update counter of times that the primary PTEG is full */ | |
388 | addis r4,r7,primary_pteg_full@ha | |
389 | lwz r6,primary_pteg_full@l(r4) | |
390 | addi r6,r6,1 | |
391 | stw r6,primary_pteg_full@l(r4) | |
392 | ||
393 | /* Search the secondary PTEG for an empty slot */ | |
394 | ori r5,r5,PTE_H /* set H (secondary hash) bit */ | |
395 | _GLOBAL(hash_page_patch_C) | |
396 | xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ | |
397 | xori r4,r4,(-PTEG_SIZE & 0xffff) | |
398 | addi r4,r4,-PTE_SIZE | |
399 | mtctr r0 | |
400 | 2: LDPTEu r6,PTE_SIZE(r4) | |
401 | TST_V(r6) | |
402 | bdnzf 2,2b | |
403 | beq+ found_empty | |
404 | xori r5,r5,PTE_H /* clear H bit again */ | |
405 | ||
406 | /* | |
407 | * Choose an arbitrary slot in the primary PTEG to overwrite. | |
408 | * Since both the primary and secondary PTEGs are full, and we | |
409 | * have no information that the PTEs in the primary PTEG are | |
410 | * more important or useful than those in the secondary PTEG, | |
411 | * and we know there is a definite (although small) speed | |
412 | * advantage to putting the PTE in the primary PTEG, we always | |
413 | * put the PTE in the primary PTEG. | |
414 | */ | |
415 | addis r4,r7,next_slot@ha | |
416 | lwz r6,next_slot@l(r4) | |
417 | addi r6,r6,PTE_SIZE | |
418 | andi. r6,r6,7*PTE_SIZE | |
419 | stw r6,next_slot@l(r4) | |
420 | add r4,r3,r6 | |
421 | ||
422 | /* update counter of evicted pages */ | |
423 | addis r6,r7,htab_evicts@ha | |
424 | lwz r3,htab_evicts@l(r6) | |
425 | addi r3,r3,1 | |
426 | stw r3,htab_evicts@l(r6) | |
427 | ||
428 | #ifndef CONFIG_SMP | |
429 | /* Store PTE in PTEG */ | |
430 | found_empty: | |
431 | STPTE r5,0(r4) | |
432 | found_slot: | |
433 | STPTE r8,PTE_SIZE/2(r4) | |
434 | ||
435 | #else /* CONFIG_SMP */ | |
436 | /* | |
437 | * Between the tlbie above and updating the hash table entry below, | |
438 | * another CPU could read the hash table entry and put it in its TLB. | |
439 | * There are 3 cases: | |
440 | * 1. using an empty slot | |
441 | * 2. updating an earlier entry to change permissions (i.e. enable write) | |
442 | * 3. taking over the PTE for an unrelated address | |
443 | * | |
444 | * In each case it doesn't really matter if the other CPUs have the old | |
445 | * PTE in their TLB. So we don't need to bother with another tlbie here, | |
446 | * which is convenient as we've overwritten the register that had the | |
447 | * address. :-) The tlbie above is mainly to make sure that this CPU comes | |
448 | * and gets the new PTE from the hash table. | |
449 | * | |
450 | * We do however have to make sure that the PTE is never in an invalid | |
451 | * state with the V bit set. | |
452 | */ | |
453 | found_empty: | |
454 | found_slot: | |
455 | CLR_V(r5,r0) /* clear V (valid) bit in PTE */ | |
456 | STPTE r5,0(r4) | |
457 | sync | |
458 | TLBSYNC | |
459 | STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ | |
460 | sync | |
461 | SET_V(r5) | |
462 | STPTE r5,0(r4) /* finally set V bit in PTE */ | |
463 | #endif /* CONFIG_SMP */ | |
464 | ||
465 | sync /* make sure pte updates get to memory */ | |
466 | blr | |
467 | ||
9c05e63e KG |
468 | .section .bss |
469 | .align 2 | |
470 | next_slot: | |
471 | .space 4 | |
472 | .globl primary_pteg_full | |
473 | primary_pteg_full: | |
474 | .space 4 | |
475 | .globl htab_hash_searches | |
476 | htab_hash_searches: | |
477 | .space 4 | |
478 | .previous | |
1da177e4 LT |
479 | |
480 | /* | |
481 | * Flush the entry for a particular page from the hash table. | |
482 | * | |
483 | * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, | |
484 | * int count) | |
485 | * | |
486 | * We assume that there is a hash table in use (Hash != 0). | |
487 | */ | |
488 | _GLOBAL(flush_hash_pages) | |
489 | tophys(r7,0) | |
490 | ||
491 | /* | |
492 | * We disable interrupts here, even on UP, because we want | |
493 | * the _PAGE_HASHPTE bit to be a reliable indication of | |
494 | * whether the HPTE exists (or at least whether one did once). | |
495 | * We also turn off the MMU for data accesses so that we | |
496 | * we can't take a hash table miss (assuming the code is | |
497 | * covered by a BAT). -- paulus | |
498 | */ | |
499 | mfmsr r10 | |
500 | SYNC | |
501 | rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ | |
502 | rlwinm r0,r0,0,28,26 /* clear MSR_DR */ | |
503 | mtmsr r0 | |
504 | SYNC_601 | |
505 | isync | |
506 | ||
507 | /* First find a PTE in the range that has _PAGE_HASHPTE set */ | |
508 | rlwimi r5,r4,22,20,29 | |
509 | 1: lwz r0,0(r5) | |
510 | cmpwi cr1,r6,1 | |
511 | andi. r0,r0,_PAGE_HASHPTE | |
512 | bne 2f | |
513 | ble cr1,19f | |
514 | addi r4,r4,0x1000 | |
515 | addi r5,r5,4 | |
516 | addi r6,r6,-1 | |
517 | b 1b | |
518 | ||
519 | /* Convert context and va to VSID */ | |
520 | 2: mulli r3,r3,897*16 /* multiply context by context skew */ | |
521 | rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ | |
522 | mulli r0,r0,0x111 /* multiply by ESID skew */ | |
523 | add r3,r3,r0 /* note code below trims to 24 bits */ | |
524 | ||
525 | /* Construct the high word of the PPC-style PTE (r11) */ | |
1da177e4 LT |
526 | rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ |
527 | rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ | |
1da177e4 LT |
528 | SET_V(r11) /* set V (valid) bit */ |
529 | ||
530 | #ifdef CONFIG_SMP | |
531 | addis r9,r7,mmu_hash_lock@ha | |
532 | addi r9,r9,mmu_hash_lock@l | |
533 | rlwinm r8,r1,0,0,18 | |
534 | add r8,r8,r7 | |
535 | lwz r8,TI_CPU(r8) | |
536 | oris r8,r8,9 | |
537 | 10: lwarx r0,0,r9 | |
538 | cmpi 0,r0,0 | |
539 | bne- 11f | |
540 | stwcx. r8,0,r9 | |
541 | beq+ 12f | |
542 | 11: lwz r0,0(r9) | |
543 | cmpi 0,r0,0 | |
544 | beq 10b | |
545 | b 11b | |
546 | 12: isync | |
547 | #endif | |
548 | ||
549 | /* | |
550 | * Check the _PAGE_HASHPTE bit in the linux PTE. If it is | |
551 | * already clear, we're done (for this pte). If not, | |
552 | * clear it (atomically) and proceed. -- paulus. | |
553 | */ | |
554 | 33: lwarx r8,0,r5 /* fetch the pte */ | |
555 | andi. r0,r8,_PAGE_HASHPTE | |
556 | beq 8f /* done if HASHPTE is already clear */ | |
557 | rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ | |
558 | stwcx. r8,0,r5 /* update the pte */ | |
559 | bne- 33b | |
560 | ||
561 | /* Get the address of the primary PTE group in the hash table (r3) */ | |
562 | _GLOBAL(flush_hash_patch_A) | |
563 | addis r8,r7,Hash_base@h /* base address of hash table */ | |
564 | rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ | |
565 | rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ | |
566 | xor r8,r0,r8 /* make primary hash */ | |
567 | ||
568 | /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ | |
569 | li r0,8 /* PTEs/group */ | |
570 | mtctr r0 | |
571 | addi r12,r8,-PTE_SIZE | |
572 | 1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */ | |
573 | CMPPTE 0,r0,r11 | |
574 | bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ | |
575 | beq+ 3f | |
576 | ||
577 | /* Search the secondary PTEG for a matching PTE */ | |
578 | ori r11,r11,PTE_H /* set H (secondary hash) bit */ | |
579 | li r0,8 /* PTEs/group */ | |
580 | _GLOBAL(flush_hash_patch_B) | |
581 | xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ | |
582 | xori r12,r12,(-PTEG_SIZE & 0xffff) | |
583 | addi r12,r12,-PTE_SIZE | |
584 | mtctr r0 | |
585 | 2: LDPTEu r0,PTE_SIZE(r12) | |
586 | CMPPTE 0,r0,r11 | |
587 | bdnzf 2,2b | |
588 | xori r11,r11,PTE_H /* clear H again */ | |
589 | bne- 4f /* should rarely fail to find it */ | |
590 | ||
591 | 3: li r0,0 | |
592 | STPTE r0,0(r12) /* invalidate entry */ | |
593 | 4: sync | |
594 | tlbie r4 /* in hw tlb too */ | |
595 | sync | |
596 | ||
597 | 8: ble cr1,9f /* if all ptes checked */ | |
598 | 81: addi r6,r6,-1 | |
599 | addi r5,r5,4 /* advance to next pte */ | |
600 | addi r4,r4,0x1000 | |
601 | lwz r0,0(r5) /* check next pte */ | |
602 | cmpwi cr1,r6,1 | |
603 | andi. r0,r0,_PAGE_HASHPTE | |
604 | bne 33b | |
605 | bgt cr1,81b | |
606 | ||
607 | 9: | |
608 | #ifdef CONFIG_SMP | |
609 | TLBSYNC | |
610 | li r0,0 | |
611 | stw r0,0(r9) /* clear mmu_hash_lock */ | |
612 | #endif | |
613 | ||
614 | 19: mtmsr r10 | |
615 | SYNC_601 | |
616 | isync | |
617 | blr |