]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/mm/book3s32/hash_low.S
Merge tag 'char-misc-5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / mm / book3s32 / hash_low.S
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 * Adapted for Power Macintosh by Paul Mackerras.
8 * Low-level exception handlers and MMU support
9 * rewritten by Paul Mackerras.
10 * Copyright (C) 1996 Paul Mackerras.
11 *
12 * This file contains low-level assembler routines for managing
13 * the PowerPC MMU hash table. (PPC 8xx processors don't use a
14 * hash table, so this file is not used on them.)
15 */
16
17 #include <asm/reg.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/cputable.h>
21 #include <asm/ppc_asm.h>
22 #include <asm/thread_info.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/export.h>
25 #include <asm/feature-fixups.h>
26 #include <asm/code-patching-asm.h>
27
28 #ifdef CONFIG_SMP
29 .section .bss
30 .align 2
31 mmu_hash_lock:
32 .space 4
33 #endif /* CONFIG_SMP */
34
35 /*
36 * Load a PTE into the hash table, if possible.
37 * The address is in r4, and r3 contains an access flag:
38 * _PAGE_RW (0x400) if a write.
39 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
40 * SPRG_THREAD contains the physical address of the current task's thread.
41 *
42 * Returns to the caller if the access is illegal or there is no
43 * mapping for the address. Otherwise it places an appropriate PTE
44 * in the hash table and returns from the exception.
45 * Uses r0, r3 - r6, r8, r10, ctr, lr.
46 */
47 .text
48 _GLOBAL(hash_page)
49 #ifdef CONFIG_SMP
50 lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
51 ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
52 lis r0,0x0fff
53 b 10f
54 11: lwz r6,0(r8)
55 cmpwi 0,r6,0
56 bne 11b
57 10: lwarx r6,0,r8
58 cmpwi 0,r6,0
59 bne- 11b
60 stwcx. r0,0,r8
61 bne- 10b
62 isync
63 #endif
64 /* Get PTE (linux-style) and check access */
65 lis r0,KERNELBASE@h /* check if kernel address */
66 cmplw 0,r4,r0
67 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
68 mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
69 blt+ 112f /* assume user more likely */
70 lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
71 addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
72 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
73 112:
74 #ifndef CONFIG_PTE_64BIT
75 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
76 lwz r8,0(r5) /* get pmd entry */
77 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
78 #else
79 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
80 lwzx r8,r8,r5 /* Get L1 entry */
81 rlwinm. r8,r8,0,0,20 /* extract pt base address */
82 #endif
83 #ifdef CONFIG_SMP
84 beq- .Lhash_page_out /* return if no mapping */
85 #else
86 /* XXX it seems like the 601 will give a machine fault on the
87 rfi if its alignment is wrong (bottom 4 bits of address are
88 8 or 0xc) and we have had a not-taken conditional branch
89 to the address following the rfi. */
90 beqlr-
91 #endif
92 #ifndef CONFIG_PTE_64BIT
93 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
94 #else
95 rlwimi r8,r4,23,20,28 /* compute pte address */
96 #endif
97 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
98 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
99
100 /*
101 * Update the linux PTE atomically. We do the lwarx up-front
102 * because almost always, there won't be a permission violation
103 * and there won't already be an HPTE, and thus we will have
104 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
105 *
106 * If PTE_64BIT is set, the low word is the flags word; use that
107 * word for locking since it contains all the interesting bits.
108 */
109 #if (PTE_FLAGS_OFFSET != 0)
110 addi r8,r8,PTE_FLAGS_OFFSET
111 #endif
112 .Lretry:
113 lwarx r6,0,r8 /* get linux-style pte, flag word */
114 andc. r5,r3,r6 /* check access & ~permission */
115 #ifdef CONFIG_SMP
116 bne- .Lhash_page_out /* return if access not permitted */
117 #else
118 bnelr-
119 #endif
120 or r5,r0,r6 /* set accessed/dirty bits */
121 #ifdef CONFIG_PTE_64BIT
122 #ifdef CONFIG_SMP
123 subf r10,r6,r8 /* create false data dependency */
124 subi r10,r10,PTE_FLAGS_OFFSET
125 lwzx r10,r6,r10 /* Get upper PTE word */
126 #else
127 lwz r10,-PTE_FLAGS_OFFSET(r8)
128 #endif /* CONFIG_SMP */
129 #endif /* CONFIG_PTE_64BIT */
130 stwcx. r5,0,r8 /* attempt to update PTE */
131 bne- .Lretry /* retry if someone got there first */
132
133 mfsrin r3,r4 /* get segment reg for segment */
134 #ifndef CONFIG_VMAP_STACK
135 mfctr r0
136 stw r0,_CTR(r11)
137 #endif
138 bl create_hpte /* add the hash table entry */
139
140 #ifdef CONFIG_SMP
141 eieio
142 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
143 li r0,0
144 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
145 #endif
146
147 #ifdef CONFIG_VMAP_STACK
148 b fast_hash_page_return
149 #else
150 /* Return from the exception */
151 lwz r5,_CTR(r11)
152 mtctr r5
153 lwz r0,GPR0(r11)
154 lwz r8,GPR8(r11)
155 b fast_exception_return
156 #endif
157
158 #ifdef CONFIG_SMP
159 .Lhash_page_out:
160 eieio
161 lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
162 li r0,0
163 stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
164 blr
165 #endif /* CONFIG_SMP */
166 _ASM_NOKPROBE_SYMBOL(hash_page)
167
168 /*
169 * Add an entry for a particular page to the hash table.
170 *
171 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
172 *
173 * We assume any necessary modifications to the pte (e.g. setting
174 * the accessed bit) have already been done and that there is actually
175 * a hash table in use (i.e. we're not on a 603).
176 */
177 _GLOBAL(add_hash_page)
178 mflr r0
179 stw r0,4(r1)
180
181 /* Convert context and va to VSID */
182 mulli r3,r3,897*16 /* multiply context by context skew */
183 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
184 mulli r0,r0,0x111 /* multiply by ESID skew */
185 add r3,r3,r0 /* note create_hpte trims to 24 bits */
186
187 #ifdef CONFIG_SMP
188 lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
189 oris r8,r8,12
190 #endif /* CONFIG_SMP */
191
192 /*
193 * We disable interrupts here, even on UP, because we don't
194 * want to race with hash_page, and because we want the
195 * _PAGE_HASHPTE bit to be a reliable indication of whether
196 * the HPTE exists (or at least whether one did once).
197 * We also turn off the MMU for data accesses so that we
198 * we can't take a hash table miss (assuming the code is
199 * covered by a BAT). -- paulus
200 */
201 mfmsr r9
202 SYNC
203 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
204 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
205 mtmsr r0
206 SYNC_601
207 isync
208
209 #ifdef CONFIG_SMP
210 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
211 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
212 10: lwarx r0,0,r6 /* take the mmu_hash_lock */
213 cmpi 0,r0,0
214 bne- 11f
215 stwcx. r8,0,r6
216 beq+ 12f
217 11: lwz r0,0(r6)
218 cmpi 0,r0,0
219 beq 10b
220 b 11b
221 12: isync
222 #endif
223
224 /*
225 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
226 * If _PAGE_HASHPTE was already set, we don't replace the existing
227 * HPTE, so we just unlock and return.
228 */
229 mr r8,r5
230 #ifndef CONFIG_PTE_64BIT
231 rlwimi r8,r4,22,20,29
232 #else
233 rlwimi r8,r4,23,20,28
234 addi r8,r8,PTE_FLAGS_OFFSET
235 #endif
236 1: lwarx r6,0,r8
237 andi. r0,r6,_PAGE_HASHPTE
238 bne 9f /* if HASHPTE already set, done */
239 #ifdef CONFIG_PTE_64BIT
240 #ifdef CONFIG_SMP
241 subf r10,r6,r8 /* create false data dependency */
242 subi r10,r10,PTE_FLAGS_OFFSET
243 lwzx r10,r6,r10 /* Get upper PTE word */
244 #else
245 lwz r10,-PTE_FLAGS_OFFSET(r8)
246 #endif /* CONFIG_SMP */
247 #endif /* CONFIG_PTE_64BIT */
248 ori r5,r6,_PAGE_HASHPTE
249 stwcx. r5,0,r8
250 bne- 1b
251
252 bl create_hpte
253
254 9:
255 #ifdef CONFIG_SMP
256 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
257 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
258 eieio
259 li r0,0
260 stw r0,0(r6) /* clear mmu_hash_lock */
261 #endif
262
263 /* reenable interrupts and DR */
264 mtmsr r9
265 SYNC_601
266 isync
267
268 lwz r0,4(r1)
269 mtlr r0
270 blr
271 _ASM_NOKPROBE_SYMBOL(add_hash_page)
272
273 /*
274 * This routine adds a hardware PTE to the hash table.
275 * It is designed to be called with the MMU either on or off.
276 * r3 contains the VSID, r4 contains the virtual address,
277 * r5 contains the linux PTE, r6 contains the old value of the
278 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
279 * upper half of the PTE if CONFIG_PTE_64BIT.
280 * On SMP, the caller should have the mmu_hash_lock held.
281 * We assume that the caller has (or will) set the _PAGE_HASHPTE
282 * bit in the linux PTE in memory. The value passed in r6 should
283 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
284 * this routine will skip the search for an existing HPTE.
285 * This procedure modifies r0, r3 - r6, r8, cr0.
286 * -- paulus.
287 *
288 * For speed, 4 of the instructions get patched once the size and
289 * physical address of the hash table are known. These definitions
290 * of Hash_base and Hash_bits below are just an example.
291 */
292 Hash_base = 0xc0180000
293 Hash_bits = 12 /* e.g. 256kB hash table */
294 Hash_msk = (((1 << Hash_bits) - 1) * 64)
295
296 /* defines for the PTE format for 32-bit PPCs */
297 #define HPTE_SIZE 8
298 #define PTEG_SIZE 64
299 #define LG_PTEG_SIZE 6
300 #define LDPTEu lwzu
301 #define LDPTE lwz
302 #define STPTE stw
303 #define CMPPTE cmpw
304 #define PTE_H 0x40
305 #define PTE_V 0x80000000
306 #define TST_V(r) rlwinm. r,r,0,0,0
307 #define SET_V(r) oris r,r,PTE_V@h
308 #define CLR_V(r,t) rlwinm r,r,0,1,31
309
310 #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
311 #define HASH_RIGHT 31-LG_PTEG_SIZE
312
313 _GLOBAL(create_hpte)
314 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
315 rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
316 rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
317 and r8,r8,r0 /* writable if _RW & _DIRTY */
318 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
319 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
320 ori r8,r8,0xe04 /* clear out reserved bits */
321 andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
322 BEGIN_FTR_SECTION
323 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
324 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
325 #ifdef CONFIG_PTE_64BIT
326 /* Put the XPN bits into the PTE */
327 rlwimi r8,r10,8,20,22
328 rlwimi r8,r10,2,29,29
329 #endif
330
331 /* Construct the high word of the PPC-style PTE (r5) */
332 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
333 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
334 SET_V(r5) /* set V (valid) bit */
335
336 patch_site 0f, patch__hash_page_A0
337 patch_site 1f, patch__hash_page_A1
338 patch_site 2f, patch__hash_page_A2
339 /* Get the address of the primary PTE group in the hash table (r3) */
340 0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
341 1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
342 2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
343 xor r3,r3,r0 /* make primary hash */
344 li r0,8 /* PTEs/group */
345
346 /*
347 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
348 * if it is clear, meaning that the HPTE isn't there already...
349 */
350 andi. r6,r6,_PAGE_HASHPTE
351 beq+ 10f /* no PTE: go look for an empty slot */
352 tlbie r4
353
354 lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
355 lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
356 addi r6,r6,1 /* count how many searches we do */
357 stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
358
359 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
360 mtctr r0
361 addi r4,r3,-HPTE_SIZE
362 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
363 CMPPTE 0,r6,r5
364 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
365 beq+ .Lfound_slot
366
367 patch_site 0f, patch__hash_page_B
368 /* Search the secondary PTEG for a matching PTE */
369 ori r5,r5,PTE_H /* set H (secondary hash) bit */
370 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
371 xori r4,r4,(-PTEG_SIZE & 0xffff)
372 addi r4,r4,-HPTE_SIZE
373 mtctr r0
374 2: LDPTEu r6,HPTE_SIZE(r4)
375 CMPPTE 0,r6,r5
376 bdnzf 2,2b
377 beq+ .Lfound_slot
378 xori r5,r5,PTE_H /* clear H bit again */
379
380 /* Search the primary PTEG for an empty slot */
381 10: mtctr r0
382 addi r4,r3,-HPTE_SIZE /* search primary PTEG */
383 1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
384 TST_V(r6) /* test valid bit */
385 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
386 beq+ .Lfound_empty
387
388 /* update counter of times that the primary PTEG is full */
389 lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
390 lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
391 addi r6,r6,1
392 stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
393
394 patch_site 0f, patch__hash_page_C
395 /* Search the secondary PTEG for an empty slot */
396 ori r5,r5,PTE_H /* set H (secondary hash) bit */
397 0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
398 xori r4,r4,(-PTEG_SIZE & 0xffff)
399 addi r4,r4,-HPTE_SIZE
400 mtctr r0
401 2: LDPTEu r6,HPTE_SIZE(r4)
402 TST_V(r6)
403 bdnzf 2,2b
404 beq+ .Lfound_empty
405 xori r5,r5,PTE_H /* clear H bit again */
406
407 /*
408 * Choose an arbitrary slot in the primary PTEG to overwrite.
409 * Since both the primary and secondary PTEGs are full, and we
410 * have no information that the PTEs in the primary PTEG are
411 * more important or useful than those in the secondary PTEG,
412 * and we know there is a definite (although small) speed
413 * advantage to putting the PTE in the primary PTEG, we always
414 * put the PTE in the primary PTEG.
415 *
416 * In addition, we skip any slot that is mapping kernel text in
417 * order to avoid a deadlock when not using BAT mappings if
418 * trying to hash in the kernel hash code itself after it has
419 * already taken the hash table lock. This works in conjunction
420 * with pre-faulting of the kernel text.
421 *
422 * If the hash table bucket is full of kernel text entries, we'll
423 * lockup here but that shouldn't happen
424 */
425
426 1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
427 lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
428 addi r6,r6,HPTE_SIZE /* search for candidate */
429 andi. r6,r6,7*HPTE_SIZE
430 stw r6,next_slot@l(r4)
431 add r4,r3,r6
432 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
433 clrrwi r0,r0,12
434 lis r6,etext@h
435 ori r6,r6,etext@l /* get etext */
436 tophys(r6,r6)
437 cmpl cr0,r0,r6 /* compare and try again */
438 blt 1b
439
440 #ifndef CONFIG_SMP
441 /* Store PTE in PTEG */
442 .Lfound_empty:
443 STPTE r5,0(r4)
444 .Lfound_slot:
445 STPTE r8,HPTE_SIZE/2(r4)
446
447 #else /* CONFIG_SMP */
448 /*
449 * Between the tlbie above and updating the hash table entry below,
450 * another CPU could read the hash table entry and put it in its TLB.
451 * There are 3 cases:
452 * 1. using an empty slot
453 * 2. updating an earlier entry to change permissions (i.e. enable write)
454 * 3. taking over the PTE for an unrelated address
455 *
456 * In each case it doesn't really matter if the other CPUs have the old
457 * PTE in their TLB. So we don't need to bother with another tlbie here,
458 * which is convenient as we've overwritten the register that had the
459 * address. :-) The tlbie above is mainly to make sure that this CPU comes
460 * and gets the new PTE from the hash table.
461 *
462 * We do however have to make sure that the PTE is never in an invalid
463 * state with the V bit set.
464 */
465 .Lfound_empty:
466 .Lfound_slot:
467 CLR_V(r5,r0) /* clear V (valid) bit in PTE */
468 STPTE r5,0(r4)
469 sync
470 TLBSYNC
471 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
472 sync
473 SET_V(r5)
474 STPTE r5,0(r4) /* finally set V bit in PTE */
475 #endif /* CONFIG_SMP */
476
477 sync /* make sure pte updates get to memory */
478 blr
479 _ASM_NOKPROBE_SYMBOL(create_hpte)
480
481 .section .bss
482 .align 2
483 next_slot:
484 .space 4
485 primary_pteg_full:
486 .space 4
487 htab_hash_searches:
488 .space 4
489 .previous
490
491 /*
492 * Flush the entry for a particular page from the hash table.
493 *
494 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
495 * int count)
496 *
497 * We assume that there is a hash table in use (Hash != 0).
498 */
499 _GLOBAL(flush_hash_pages)
500 /*
501 * We disable interrupts here, even on UP, because we want
502 * the _PAGE_HASHPTE bit to be a reliable indication of
503 * whether the HPTE exists (or at least whether one did once).
504 * We also turn off the MMU for data accesses so that we
505 * we can't take a hash table miss (assuming the code is
506 * covered by a BAT). -- paulus
507 */
508 mfmsr r10
509 SYNC
510 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
511 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
512 mtmsr r0
513 SYNC_601
514 isync
515
516 /* First find a PTE in the range that has _PAGE_HASHPTE set */
517 #ifndef CONFIG_PTE_64BIT
518 rlwimi r5,r4,22,20,29
519 #else
520 rlwimi r5,r4,23,20,28
521 #endif
522 1: lwz r0,PTE_FLAGS_OFFSET(r5)
523 cmpwi cr1,r6,1
524 andi. r0,r0,_PAGE_HASHPTE
525 bne 2f
526 ble cr1,19f
527 addi r4,r4,0x1000
528 addi r5,r5,PTE_SIZE
529 addi r6,r6,-1
530 b 1b
531
532 /* Convert context and va to VSID */
533 2: mulli r3,r3,897*16 /* multiply context by context skew */
534 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
535 mulli r0,r0,0x111 /* multiply by ESID skew */
536 add r3,r3,r0 /* note code below trims to 24 bits */
537
538 /* Construct the high word of the PPC-style PTE (r11) */
539 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
540 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
541 SET_V(r11) /* set V (valid) bit */
542
543 #ifdef CONFIG_SMP
544 lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
545 addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
546 tophys (r8, r2)
547 lwz r8, TASK_CPU(r8)
548 oris r8,r8,9
549 10: lwarx r0,0,r9
550 cmpi 0,r0,0
551 bne- 11f
552 stwcx. r8,0,r9
553 beq+ 12f
554 11: lwz r0,0(r9)
555 cmpi 0,r0,0
556 beq 10b
557 b 11b
558 12: isync
559 #endif
560
561 /*
562 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
563 * already clear, we're done (for this pte). If not,
564 * clear it (atomically) and proceed. -- paulus.
565 */
566 #if (PTE_FLAGS_OFFSET != 0)
567 addi r5,r5,PTE_FLAGS_OFFSET
568 #endif
569 33: lwarx r8,0,r5 /* fetch the pte flags word */
570 andi. r0,r8,_PAGE_HASHPTE
571 beq 8f /* done if HASHPTE is already clear */
572 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
573 stwcx. r8,0,r5 /* update the pte */
574 bne- 33b
575
576 patch_site 0f, patch__flush_hash_A0
577 patch_site 1f, patch__flush_hash_A1
578 patch_site 2f, patch__flush_hash_A2
579 /* Get the address of the primary PTE group in the hash table (r3) */
580 0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
581 1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
582 2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
583 xor r8,r0,r8 /* make primary hash */
584
585 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
586 li r0,8 /* PTEs/group */
587 mtctr r0
588 addi r12,r8,-HPTE_SIZE
589 1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
590 CMPPTE 0,r0,r11
591 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
592 beq+ 3f
593
594 patch_site 0f, patch__flush_hash_B
595 /* Search the secondary PTEG for a matching PTE */
596 ori r11,r11,PTE_H /* set H (secondary hash) bit */
597 li r0,8 /* PTEs/group */
598 0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
599 xori r12,r12,(-PTEG_SIZE & 0xffff)
600 addi r12,r12,-HPTE_SIZE
601 mtctr r0
602 2: LDPTEu r0,HPTE_SIZE(r12)
603 CMPPTE 0,r0,r11
604 bdnzf 2,2b
605 xori r11,r11,PTE_H /* clear H again */
606 bne- 4f /* should rarely fail to find it */
607
608 3: li r0,0
609 STPTE r0,0(r12) /* invalidate entry */
610 4: sync
611 tlbie r4 /* in hw tlb too */
612 sync
613
614 8: ble cr1,9f /* if all ptes checked */
615 81: addi r6,r6,-1
616 addi r5,r5,PTE_SIZE
617 addi r4,r4,0x1000
618 lwz r0,0(r5) /* check next pte */
619 cmpwi cr1,r6,1
620 andi. r0,r0,_PAGE_HASHPTE
621 bne 33b
622 bgt cr1,81b
623
624 9:
625 #ifdef CONFIG_SMP
626 TLBSYNC
627 li r0,0
628 stw r0,0(r9) /* clear mmu_hash_lock */
629 #endif
630
631 19: mtmsr r10
632 SYNC_601
633 isync
634 blr
635 EXPORT_SYMBOL(flush_hash_pages)
636 _ASM_NOKPROBE_SYMBOL(flush_hash_pages)
637
638 /*
639 * Flush an entry from the TLB
640 */
641 _GLOBAL(_tlbie)
642 #ifdef CONFIG_SMP
643 lwz r8,TASK_CPU(r2)
644 oris r8,r8,11
645 mfmsr r10
646 SYNC
647 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
648 rlwinm r0,r0,0,28,26 /* clear DR */
649 mtmsr r0
650 SYNC_601
651 isync
652 lis r9,mmu_hash_lock@h
653 ori r9,r9,mmu_hash_lock@l
654 tophys(r9,r9)
655 10: lwarx r7,0,r9
656 cmpwi 0,r7,0
657 bne- 10b
658 stwcx. r8,0,r9
659 bne- 10b
660 eieio
661 tlbie r3
662 sync
663 TLBSYNC
664 li r0,0
665 stw r0,0(r9) /* clear mmu_hash_lock */
666 mtmsr r10
667 SYNC_601
668 isync
669 #else /* CONFIG_SMP */
670 tlbie r3
671 sync
672 #endif /* CONFIG_SMP */
673 blr
674 _ASM_NOKPROBE_SYMBOL(_tlbie)
675
676 /*
677 * Flush the entire TLB. 603/603e only
678 */
679 _GLOBAL(_tlbia)
680 #if defined(CONFIG_SMP)
681 lwz r8,TASK_CPU(r2)
682 oris r8,r8,10
683 mfmsr r10
684 SYNC
685 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
686 rlwinm r0,r0,0,28,26 /* clear DR */
687 mtmsr r0
688 SYNC_601
689 isync
690 lis r9,mmu_hash_lock@h
691 ori r9,r9,mmu_hash_lock@l
692 tophys(r9,r9)
693 10: lwarx r7,0,r9
694 cmpwi 0,r7,0
695 bne- 10b
696 stwcx. r8,0,r9
697 bne- 10b
698 #endif /* CONFIG_SMP */
699 li r5, 32
700 lis r4, KERNELBASE@h
701 mtctr r5
702 sync
703 0: tlbie r4
704 addi r4, r4, 0x1000
705 bdnz 0b
706 sync
707 #ifdef CONFIG_SMP
708 TLBSYNC
709 li r0,0
710 stw r0,0(r9) /* clear mmu_hash_lock */
711 mtmsr r10
712 SYNC_601
713 isync
714 #endif /* CONFIG_SMP */
715 blr
716 _ASM_NOKPROBE_SYMBOL(_tlbia)