]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/ppc/mm/hashtable.S
kbuild: m68k,parisc,ppc,ppc64,s390,xtensa use generic asm-offsets.h support
[mirror_ubuntu-bionic-kernel.git] / arch / ppc / mm / hashtable.S
CommitLineData
1da177e4
LT
1/*
2 * arch/ppc/kernel/hashtable.S
3 *
4 * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 *
15 * This file contains low-level assembler routines for managing
16 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
17 * hash table, so this file is not used on them.)
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
23 *
24 */
25
26#include <linux/config.h>
27#include <asm/processor.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/cputable.h>
31#include <asm/ppc_asm.h>
32#include <asm/thread_info.h>
0013a854 33#include <asm/asm-offsets.h>
1da177e4
LT
34
35#ifdef CONFIG_SMP
36 .comm mmu_hash_lock,4
37#endif /* CONFIG_SMP */
38
39/*
40 * Sync CPUs with hash_page taking & releasing the hash
41 * table lock
42 */
43#ifdef CONFIG_SMP
44 .text
45_GLOBAL(hash_page_sync)
46 lis r8,mmu_hash_lock@h
47 ori r8,r8,mmu_hash_lock@l
48 lis r0,0x0fff
49 b 10f
5011: lwz r6,0(r8)
51 cmpwi 0,r6,0
52 bne 11b
5310: lwarx r6,0,r8
54 cmpwi 0,r6,0
55 bne- 11b
56 stwcx. r0,0,r8
57 bne- 10b
58 isync
59 eieio
60 li r0,0
61 stw r0,0(r8)
62 blr
63#endif
64
65/*
66 * Load a PTE into the hash table, if possible.
67 * The address is in r4, and r3 contains an access flag:
68 * _PAGE_RW (0x400) if a write.
69 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
70 * SPRG3 contains the physical address of the current task's thread.
71 *
72 * Returns to the caller if the access is illegal or there is no
73 * mapping for the address. Otherwise it places an appropriate PTE
74 * in the hash table and returns from the exception.
75 * Uses r0, r3 - r8, ctr, lr.
76 */
77 .text
78_GLOBAL(hash_page)
79#ifdef CONFIG_PPC64BRIDGE
80 mfmsr r0
81 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
82 MTMSRD(r0)
83 isync
84#endif
85 tophys(r7,0) /* gets -KERNELBASE into r7 */
86#ifdef CONFIG_SMP
87 addis r8,r7,mmu_hash_lock@h
88 ori r8,r8,mmu_hash_lock@l
89 lis r0,0x0fff
90 b 10f
9111: lwz r6,0(r8)
92 cmpwi 0,r6,0
93 bne 11b
9410: lwarx r6,0,r8
95 cmpwi 0,r6,0
96 bne- 11b
97 stwcx. r0,0,r8
98 bne- 10b
99 isync
100#endif
101 /* Get PTE (linux-style) and check access */
102 lis r0,KERNELBASE@h /* check if kernel address */
103 cmplw 0,r4,r0
104 mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */
105 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
106 lwz r5,PGDIR(r8) /* virt page-table root */
107 blt+ 112f /* assume user more likely */
108 lis r5,swapper_pg_dir@ha /* if kernel address, use */
109 addi r5,r5,swapper_pg_dir@l /* kernel page table */
110 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
111112: add r5,r5,r7 /* convert to phys addr */
112 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
113 lwz r8,0(r5) /* get pmd entry */
114 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
115#ifdef CONFIG_SMP
116 beq- hash_page_out /* return if no mapping */
117#else
118 /* XXX it seems like the 601 will give a machine fault on the
119 rfi if its alignment is wrong (bottom 4 bits of address are
120 8 or 0xc) and we have had a not-taken conditional branch
121 to the address following the rfi. */
122 beqlr-
123#endif
124 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
125 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
126 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
127
128 /*
129 * Update the linux PTE atomically. We do the lwarx up-front
130 * because almost always, there won't be a permission violation
131 * and there won't already be an HPTE, and thus we will have
132 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
133 */
134retry:
135 lwarx r6,0,r8 /* get linux-style pte */
136 andc. r5,r3,r6 /* check access & ~permission */
137#ifdef CONFIG_SMP
138 bne- hash_page_out /* return if access not permitted */
139#else
140 bnelr-
141#endif
142 or r5,r0,r6 /* set accessed/dirty bits */
143 stwcx. r5,0,r8 /* attempt to update PTE */
144 bne- retry /* retry if someone got there first */
145
146 mfsrin r3,r4 /* get segment reg for segment */
147 mfctr r0
148 stw r0,_CTR(r11)
149 bl create_hpte /* add the hash table entry */
150
151/*
152 * htab_reloads counts the number of times we have to fault an
153 * HPTE into the hash table. This should only happen after a
154 * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap.
155 * Where a page is faulted into a process's address space,
156 * update_mmu_cache gets called to put the HPTE into the hash table
157 * and those are counted as preloads rather than reloads.
158 */
159 addis r8,r7,htab_reloads@ha
160 lwz r3,htab_reloads@l(r8)
161 addi r3,r3,1
162 stw r3,htab_reloads@l(r8)
163
164#ifdef CONFIG_SMP
165 eieio
166 addis r8,r7,mmu_hash_lock@ha
167 li r0,0
168 stw r0,mmu_hash_lock@l(r8)
169#endif
170
171 /* Return from the exception */
172 lwz r5,_CTR(r11)
173 mtctr r5
174 lwz r0,GPR0(r11)
175 lwz r7,GPR7(r11)
176 lwz r8,GPR8(r11)
177 b fast_exception_return
178
179#ifdef CONFIG_SMP
180hash_page_out:
181 eieio
182 addis r8,r7,mmu_hash_lock@ha
183 li r0,0
184 stw r0,mmu_hash_lock@l(r8)
185 blr
186#endif /* CONFIG_SMP */
187
188/*
189 * Add an entry for a particular page to the hash table.
190 *
191 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
192 *
193 * We assume any necessary modifications to the pte (e.g. setting
194 * the accessed bit) have already been done and that there is actually
195 * a hash table in use (i.e. we're not on a 603).
196 */
197_GLOBAL(add_hash_page)
198 mflr r0
199 stw r0,4(r1)
200
201 /* Convert context and va to VSID */
202 mulli r3,r3,897*16 /* multiply context by context skew */
203 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
204 mulli r0,r0,0x111 /* multiply by ESID skew */
205 add r3,r3,r0 /* note create_hpte trims to 24 bits */
206
207#ifdef CONFIG_SMP
208 rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
209 lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
210 oris r8,r8,12
211#endif /* CONFIG_SMP */
212
213 /*
214 * We disable interrupts here, even on UP, because we don't
215 * want to race with hash_page, and because we want the
216 * _PAGE_HASHPTE bit to be a reliable indication of whether
217 * the HPTE exists (or at least whether one did once).
218 * We also turn off the MMU for data accesses so that we
219 * we can't take a hash table miss (assuming the code is
220 * covered by a BAT). -- paulus
221 */
222 mfmsr r10
223 SYNC
224 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
225 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
226 mtmsr r0
227 SYNC_601
228 isync
229
230 tophys(r7,0)
231
232#ifdef CONFIG_SMP
233 addis r9,r7,mmu_hash_lock@ha
234 addi r9,r9,mmu_hash_lock@l
23510: lwarx r0,0,r9 /* take the mmu_hash_lock */
236 cmpi 0,r0,0
237 bne- 11f
238 stwcx. r8,0,r9
239 beq+ 12f
24011: lwz r0,0(r9)
241 cmpi 0,r0,0
242 beq 10b
243 b 11b
24412: isync
245#endif
246
247 /*
248 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
249 * If _PAGE_HASHPTE was already set, we don't replace the existing
250 * HPTE, so we just unlock and return.
251 */
252 mr r8,r5
253 rlwimi r8,r4,22,20,29
2541: lwarx r6,0,r8
255 andi. r0,r6,_PAGE_HASHPTE
256 bne 9f /* if HASHPTE already set, done */
257 ori r5,r6,_PAGE_HASHPTE
258 stwcx. r5,0,r8
259 bne- 1b
260
261 bl create_hpte
262
263 addis r8,r7,htab_preloads@ha
264 lwz r3,htab_preloads@l(r8)
265 addi r3,r3,1
266 stw r3,htab_preloads@l(r8)
267
2689:
269#ifdef CONFIG_SMP
270 eieio
271 li r0,0
272 stw r0,0(r9) /* clear mmu_hash_lock */
273#endif
274
275 /* reenable interrupts and DR */
276 mtmsr r10
277 SYNC_601
278 isync
279
280 lwz r0,4(r1)
281 mtlr r0
282 blr
283
284/*
285 * This routine adds a hardware PTE to the hash table.
286 * It is designed to be called with the MMU either on or off.
287 * r3 contains the VSID, r4 contains the virtual address,
288 * r5 contains the linux PTE, r6 contains the old value of the
289 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
290 * offset to be added to addresses (0 if the MMU is on,
291 * -KERNELBASE if it is off).
292 * On SMP, the caller should have the mmu_hash_lock held.
293 * We assume that the caller has (or will) set the _PAGE_HASHPTE
294 * bit in the linux PTE in memory. The value passed in r6 should
295 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
296 * this routine will skip the search for an existing HPTE.
297 * This procedure modifies r0, r3 - r6, r8, cr0.
298 * -- paulus.
299 *
300 * For speed, 4 of the instructions get patched once the size and
301 * physical address of the hash table are known. These definitions
302 * of Hash_base and Hash_bits below are just an example.
303 */
304Hash_base = 0xc0180000
305Hash_bits = 12 /* e.g. 256kB hash table */
306Hash_msk = (((1 << Hash_bits) - 1) * 64)
307
308#ifndef CONFIG_PPC64BRIDGE
309/* defines for the PTE format for 32-bit PPCs */
310#define PTE_SIZE 8
311#define PTEG_SIZE 64
312#define LG_PTEG_SIZE 6
313#define LDPTEu lwzu
314#define STPTE stw
315#define CMPPTE cmpw
316#define PTE_H 0x40
317#define PTE_V 0x80000000
318#define TST_V(r) rlwinm. r,r,0,0,0
319#define SET_V(r) oris r,r,PTE_V@h
320#define CLR_V(r,t) rlwinm r,r,0,1,31
321
322#else
323/* defines for the PTE format for 64-bit PPCs */
324#define PTE_SIZE 16
325#define PTEG_SIZE 128
326#define LG_PTEG_SIZE 7
327#define LDPTEu ldu
328#define STPTE std
329#define CMPPTE cmpd
330#define PTE_H 2
331#define PTE_V 1
332#define TST_V(r) andi. r,r,PTE_V
333#define SET_V(r) ori r,r,PTE_V
334#define CLR_V(r,t) li t,PTE_V; andc r,r,t
335#endif /* CONFIG_PPC64BRIDGE */
336
337#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
338#define HASH_RIGHT 31-LG_PTEG_SIZE
339
340_GLOBAL(create_hpte)
341 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
342 rlwinm r8,r5,32-10,31,31 /* _PAGE_RW -> PP lsb */
343 rlwinm r0,r5,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
344 and r8,r8,r0 /* writable if _RW & _DIRTY */
345 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
346 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
347 ori r8,r8,0xe14 /* clear out reserved bits and M */
348 andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */
349BEGIN_FTR_SECTION
350 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
351END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
352
353 /* Construct the high word of the PPC-style PTE (r5) */
354#ifndef CONFIG_PPC64BRIDGE
355 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
356 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
357#else /* CONFIG_PPC64BRIDGE */
358 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
359 sldi r5,r3,12 /* shift vsid into position */
360 rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
361#endif /* CONFIG_PPC64BRIDGE */
362 SET_V(r5) /* set V (valid) bit */
363
364 /* Get the address of the primary PTE group in the hash table (r3) */
365_GLOBAL(hash_page_patch_A)
366 addis r0,r7,Hash_base@h /* base address of hash table */
367 rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
368 rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
369 xor r3,r3,r0 /* make primary hash */
370 li r0,8 /* PTEs/group */
371
372 /*
373 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
374 * if it is clear, meaning that the HPTE isn't there already...
375 */
376 andi. r6,r6,_PAGE_HASHPTE
377 beq+ 10f /* no PTE: go look for an empty slot */
378 tlbie r4
379
380 addis r4,r7,htab_hash_searches@ha
381 lwz r6,htab_hash_searches@l(r4)
382 addi r6,r6,1 /* count how many searches we do */
383 stw r6,htab_hash_searches@l(r4)
384
385 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
386 mtctr r0
387 addi r4,r3,-PTE_SIZE
3881: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
389 CMPPTE 0,r6,r5
390 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
391 beq+ found_slot
392
393 /* Search the secondary PTEG for a matching PTE */
394 ori r5,r5,PTE_H /* set H (secondary hash) bit */
395_GLOBAL(hash_page_patch_B)
396 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
397 xori r4,r4,(-PTEG_SIZE & 0xffff)
398 addi r4,r4,-PTE_SIZE
399 mtctr r0
4002: LDPTEu r6,PTE_SIZE(r4)
401 CMPPTE 0,r6,r5
402 bdnzf 2,2b
403 beq+ found_slot
404 xori r5,r5,PTE_H /* clear H bit again */
405
406 /* Search the primary PTEG for an empty slot */
40710: mtctr r0
408 addi r4,r3,-PTE_SIZE /* search primary PTEG */
4091: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */
410 TST_V(r6) /* test valid bit */
411 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
412 beq+ found_empty
413
414 /* update counter of times that the primary PTEG is full */
415 addis r4,r7,primary_pteg_full@ha
416 lwz r6,primary_pteg_full@l(r4)
417 addi r6,r6,1
418 stw r6,primary_pteg_full@l(r4)
419
420 /* Search the secondary PTEG for an empty slot */
421 ori r5,r5,PTE_H /* set H (secondary hash) bit */
422_GLOBAL(hash_page_patch_C)
423 xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
424 xori r4,r4,(-PTEG_SIZE & 0xffff)
425 addi r4,r4,-PTE_SIZE
426 mtctr r0
4272: LDPTEu r6,PTE_SIZE(r4)
428 TST_V(r6)
429 bdnzf 2,2b
430 beq+ found_empty
431 xori r5,r5,PTE_H /* clear H bit again */
432
433 /*
434 * Choose an arbitrary slot in the primary PTEG to overwrite.
435 * Since both the primary and secondary PTEGs are full, and we
436 * have no information that the PTEs in the primary PTEG are
437 * more important or useful than those in the secondary PTEG,
438 * and we know there is a definite (although small) speed
439 * advantage to putting the PTE in the primary PTEG, we always
440 * put the PTE in the primary PTEG.
441 */
442 addis r4,r7,next_slot@ha
443 lwz r6,next_slot@l(r4)
444 addi r6,r6,PTE_SIZE
445 andi. r6,r6,7*PTE_SIZE
446 stw r6,next_slot@l(r4)
447 add r4,r3,r6
448
449 /* update counter of evicted pages */
450 addis r6,r7,htab_evicts@ha
451 lwz r3,htab_evicts@l(r6)
452 addi r3,r3,1
453 stw r3,htab_evicts@l(r6)
454
455#ifndef CONFIG_SMP
456 /* Store PTE in PTEG */
457found_empty:
458 STPTE r5,0(r4)
459found_slot:
460 STPTE r8,PTE_SIZE/2(r4)
461
462#else /* CONFIG_SMP */
463/*
464 * Between the tlbie above and updating the hash table entry below,
465 * another CPU could read the hash table entry and put it in its TLB.
466 * There are 3 cases:
467 * 1. using an empty slot
468 * 2. updating an earlier entry to change permissions (i.e. enable write)
469 * 3. taking over the PTE for an unrelated address
470 *
471 * In each case it doesn't really matter if the other CPUs have the old
472 * PTE in their TLB. So we don't need to bother with another tlbie here,
473 * which is convenient as we've overwritten the register that had the
474 * address. :-) The tlbie above is mainly to make sure that this CPU comes
475 * and gets the new PTE from the hash table.
476 *
477 * We do however have to make sure that the PTE is never in an invalid
478 * state with the V bit set.
479 */
480found_empty:
481found_slot:
482 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
483 STPTE r5,0(r4)
484 sync
485 TLBSYNC
486 STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
487 sync
488 SET_V(r5)
489 STPTE r5,0(r4) /* finally set V bit in PTE */
490#endif /* CONFIG_SMP */
491
492 sync /* make sure pte updates get to memory */
493 blr
494
495 .comm next_slot,4
496 .comm primary_pteg_full,4
497 .comm htab_hash_searches,4
498
499/*
500 * Flush the entry for a particular page from the hash table.
501 *
502 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
503 * int count)
504 *
505 * We assume that there is a hash table in use (Hash != 0).
506 */
507_GLOBAL(flush_hash_pages)
508 tophys(r7,0)
509
510 /*
511 * We disable interrupts here, even on UP, because we want
512 * the _PAGE_HASHPTE bit to be a reliable indication of
513 * whether the HPTE exists (or at least whether one did once).
514 * We also turn off the MMU for data accesses so that we
515 * we can't take a hash table miss (assuming the code is
516 * covered by a BAT). -- paulus
517 */
518 mfmsr r10
519 SYNC
520 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
521 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
522 mtmsr r0
523 SYNC_601
524 isync
525
526 /* First find a PTE in the range that has _PAGE_HASHPTE set */
527 rlwimi r5,r4,22,20,29
5281: lwz r0,0(r5)
529 cmpwi cr1,r6,1
530 andi. r0,r0,_PAGE_HASHPTE
531 bne 2f
532 ble cr1,19f
533 addi r4,r4,0x1000
534 addi r5,r5,4
535 addi r6,r6,-1
536 b 1b
537
538 /* Convert context and va to VSID */
5392: mulli r3,r3,897*16 /* multiply context by context skew */
540 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
541 mulli r0,r0,0x111 /* multiply by ESID skew */
542 add r3,r3,r0 /* note code below trims to 24 bits */
543
544 /* Construct the high word of the PPC-style PTE (r11) */
545#ifndef CONFIG_PPC64BRIDGE
546 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
547 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
548#else /* CONFIG_PPC64BRIDGE */
549 clrlwi r3,r3,8 /* reduce vsid to 24 bits */
550 sldi r11,r3,12 /* shift vsid into position */
551 rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
552#endif /* CONFIG_PPC64BRIDGE */
553 SET_V(r11) /* set V (valid) bit */
554
555#ifdef CONFIG_SMP
556 addis r9,r7,mmu_hash_lock@ha
557 addi r9,r9,mmu_hash_lock@l
558 rlwinm r8,r1,0,0,18
559 add r8,r8,r7
560 lwz r8,TI_CPU(r8)
561 oris r8,r8,9
56210: lwarx r0,0,r9
563 cmpi 0,r0,0
564 bne- 11f
565 stwcx. r8,0,r9
566 beq+ 12f
56711: lwz r0,0(r9)
568 cmpi 0,r0,0
569 beq 10b
570 b 11b
57112: isync
572#endif
573
574 /*
575 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
576 * already clear, we're done (for this pte). If not,
577 * clear it (atomically) and proceed. -- paulus.
578 */
57933: lwarx r8,0,r5 /* fetch the pte */
580 andi. r0,r8,_PAGE_HASHPTE
581 beq 8f /* done if HASHPTE is already clear */
582 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
583 stwcx. r8,0,r5 /* update the pte */
584 bne- 33b
585
586 /* Get the address of the primary PTE group in the hash table (r3) */
587_GLOBAL(flush_hash_patch_A)
588 addis r8,r7,Hash_base@h /* base address of hash table */
589 rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
590 rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
591 xor r8,r0,r8 /* make primary hash */
592
593 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
594 li r0,8 /* PTEs/group */
595 mtctr r0
596 addi r12,r8,-PTE_SIZE
5971: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
598 CMPPTE 0,r0,r11
599 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
600 beq+ 3f
601
602 /* Search the secondary PTEG for a matching PTE */
603 ori r11,r11,PTE_H /* set H (secondary hash) bit */
604 li r0,8 /* PTEs/group */
605_GLOBAL(flush_hash_patch_B)
606 xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
607 xori r12,r12,(-PTEG_SIZE & 0xffff)
608 addi r12,r12,-PTE_SIZE
609 mtctr r0
6102: LDPTEu r0,PTE_SIZE(r12)
611 CMPPTE 0,r0,r11
612 bdnzf 2,2b
613 xori r11,r11,PTE_H /* clear H again */
614 bne- 4f /* should rarely fail to find it */
615
6163: li r0,0
617 STPTE r0,0(r12) /* invalidate entry */
6184: sync
619 tlbie r4 /* in hw tlb too */
620 sync
621
6228: ble cr1,9f /* if all ptes checked */
62381: addi r6,r6,-1
624 addi r5,r5,4 /* advance to next pte */
625 addi r4,r4,0x1000
626 lwz r0,0(r5) /* check next pte */
627 cmpwi cr1,r6,1
628 andi. r0,r0,_PAGE_HASHPTE
629 bne 33b
630 bgt cr1,81b
631
6329:
633#ifdef CONFIG_SMP
634 TLBSYNC
635 li r0,0
636 stw r0,0(r9) /* clear mmu_hash_lock */
637#endif
638
63919: mtmsr r10
640 SYNC_601
641 isync
642 blr