]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/mm/hash_native_64.c
Merge remote-tracking branches 'spi/topic/devprop', 'spi/topic/fsl', 'spi/topic/fsl...
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / mm / hash_native_64.c
1 /*
2 * native hashtable management.
3 *
4 * SMP scalability work:
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #undef DEBUG_LOW
14
15 #include <linux/spinlock.h>
16 #include <linux/bitops.h>
17 #include <linux/of.h>
18 #include <linux/threads.h>
19 #include <linux/smp.h>
20
21 #include <asm/machdep.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/tlb.h>
27 #include <asm/cputable.h>
28 #include <asm/udbg.h>
29 #include <asm/kexec.h>
30 #include <asm/ppc-opcode.h>
31
32 #include <misc/cxl-base.h>
33
34 #ifdef DEBUG_LOW
35 #define DBG_LOW(fmt...) udbg_printf(fmt)
36 #else
37 #define DBG_LOW(fmt...)
38 #endif
39
40 #ifdef __BIG_ENDIAN__
41 #define HPTE_LOCK_BIT 3
42 #else
43 #define HPTE_LOCK_BIT (56+3)
44 #endif
45
46 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
47
48 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
49 {
50 unsigned long va;
51 unsigned int penc;
52 unsigned long sllp;
53
54 /*
55 * We need 14 to 65 bits of va for a tlibe of 4K page
56 * With vpn we ignore the lower VPN_SHIFT bits already.
57 * And top two bits are already ignored because we can
58 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
59 * of 12.
60 */
61 va = vpn << VPN_SHIFT;
62 /*
63 * clear top 16 bits of 64bit va, non SLS segment
64 * Older versions of the architecture (2.02 and earler) require the
65 * masking of the top 16 bits.
66 */
67 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
68 va &= ~(0xffffULL << 48);
69
70 switch (psize) {
71 case MMU_PAGE_4K:
72 /* clear out bits after (52) [0....52.....63] */
73 va &= ~((1ul << (64 - 52)) - 1);
74 va |= ssize << 8;
75 sllp = get_sllp_encoding(apsize);
76 va |= sllp << 5;
77 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
78 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
79 : "memory");
80 break;
81 default:
82 /* We need 14 to 14 + i bits of va */
83 penc = mmu_psize_defs[psize].penc[apsize];
84 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
85 va |= penc << 12;
86 va |= ssize << 8;
87 /*
88 * AVAL bits:
89 * We don't need all the bits, but rest of the bits
90 * must be ignored by the processor.
91 * vpn cover upto 65 bits of va. (0...65) and we need
92 * 58..64 bits of va.
93 */
94 va |= (vpn & 0xfe); /* AVAL */
95 va |= 1; /* L */
96 asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
97 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
98 : "memory");
99 break;
100 }
101 }
102
103 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
104 {
105 unsigned long va;
106 unsigned int penc;
107 unsigned long sllp;
108
109 /* VPN_SHIFT can be atmost 12 */
110 va = vpn << VPN_SHIFT;
111 /*
112 * clear top 16 bits of 64 bit va, non SLS segment
113 * Older versions of the architecture (2.02 and earler) require the
114 * masking of the top 16 bits.
115 */
116 if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
117 va &= ~(0xffffULL << 48);
118
119 switch (psize) {
120 case MMU_PAGE_4K:
121 /* clear out bits after(52) [0....52.....63] */
122 va &= ~((1ul << (64 - 52)) - 1);
123 va |= ssize << 8;
124 sllp = get_sllp_encoding(apsize);
125 va |= sllp << 5;
126 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
127 : : "r" (va), "i" (CPU_FTR_ARCH_206)
128 : "memory");
129 break;
130 default:
131 /* We need 14 to 14 + i bits of va */
132 penc = mmu_psize_defs[psize].penc[apsize];
133 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
134 va |= penc << 12;
135 va |= ssize << 8;
136 /*
137 * AVAL bits:
138 * We don't need all the bits, but rest of the bits
139 * must be ignored by the processor.
140 * vpn cover upto 65 bits of va. (0...65) and we need
141 * 58..64 bits of va.
142 */
143 va |= (vpn & 0xfe);
144 va |= 1; /* L */
145 asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
146 : : "r" (va), "i" (CPU_FTR_ARCH_206)
147 : "memory");
148 break;
149 }
150
151 }
152
153 static inline void tlbie(unsigned long vpn, int psize, int apsize,
154 int ssize, int local)
155 {
156 unsigned int use_local;
157 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
158
159 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
160
161 if (use_local)
162 use_local = mmu_psize_defs[psize].tlbiel;
163 if (lock_tlbie && !use_local)
164 raw_spin_lock(&native_tlbie_lock);
165 asm volatile("ptesync": : :"memory");
166 if (use_local) {
167 __tlbiel(vpn, psize, apsize, ssize);
168 asm volatile("ptesync": : :"memory");
169 } else {
170 __tlbie(vpn, psize, apsize, ssize);
171 asm volatile("eieio; tlbsync; ptesync": : :"memory");
172 }
173 if (lock_tlbie && !use_local)
174 raw_spin_unlock(&native_tlbie_lock);
175 }
176
177 static inline void native_lock_hpte(struct hash_pte *hptep)
178 {
179 unsigned long *word = (unsigned long *)&hptep->v;
180
181 while (1) {
182 if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
183 break;
184 while(test_bit(HPTE_LOCK_BIT, word))
185 cpu_relax();
186 }
187 }
188
189 static inline void native_unlock_hpte(struct hash_pte *hptep)
190 {
191 unsigned long *word = (unsigned long *)&hptep->v;
192
193 clear_bit_unlock(HPTE_LOCK_BIT, word);
194 }
195
196 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
197 unsigned long pa, unsigned long rflags,
198 unsigned long vflags, int psize, int apsize, int ssize)
199 {
200 struct hash_pte *hptep = htab_address + hpte_group;
201 unsigned long hpte_v, hpte_r;
202 int i;
203
204 if (!(vflags & HPTE_V_BOLTED)) {
205 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
206 " rflags=%lx, vflags=%lx, psize=%d)\n",
207 hpte_group, vpn, pa, rflags, vflags, psize);
208 }
209
210 for (i = 0; i < HPTES_PER_GROUP; i++) {
211 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
212 /* retry with lock held */
213 native_lock_hpte(hptep);
214 if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
215 break;
216 native_unlock_hpte(hptep);
217 }
218
219 hptep++;
220 }
221
222 if (i == HPTES_PER_GROUP)
223 return -1;
224
225 hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
226 hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
227
228 if (!(vflags & HPTE_V_BOLTED)) {
229 DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
230 i, hpte_v, hpte_r);
231 }
232
233 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
234 hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
235 hpte_v = hpte_old_to_new_v(hpte_v);
236 }
237
238 hptep->r = cpu_to_be64(hpte_r);
239 /* Guarantee the second dword is visible before the valid bit */
240 eieio();
241 /*
242 * Now set the first dword including the valid bit
243 * NOTE: this also unlocks the hpte
244 */
245 hptep->v = cpu_to_be64(hpte_v);
246
247 __asm__ __volatile__ ("ptesync" : : : "memory");
248
249 return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
250 }
251
252 static long native_hpte_remove(unsigned long hpte_group)
253 {
254 struct hash_pte *hptep;
255 int i;
256 int slot_offset;
257 unsigned long hpte_v;
258
259 DBG_LOW(" remove(group=%lx)\n", hpte_group);
260
261 /* pick a random entry to start at */
262 slot_offset = mftb() & 0x7;
263
264 for (i = 0; i < HPTES_PER_GROUP; i++) {
265 hptep = htab_address + hpte_group + slot_offset;
266 hpte_v = be64_to_cpu(hptep->v);
267
268 if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
269 /* retry with lock held */
270 native_lock_hpte(hptep);
271 hpte_v = be64_to_cpu(hptep->v);
272 if ((hpte_v & HPTE_V_VALID)
273 && !(hpte_v & HPTE_V_BOLTED))
274 break;
275 native_unlock_hpte(hptep);
276 }
277
278 slot_offset++;
279 slot_offset &= 0x7;
280 }
281
282 if (i == HPTES_PER_GROUP)
283 return -1;
284
285 /* Invalidate the hpte. NOTE: this also unlocks it */
286 hptep->v = 0;
287
288 return i;
289 }
290
291 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
292 unsigned long vpn, int bpsize,
293 int apsize, int ssize, unsigned long flags)
294 {
295 struct hash_pte *hptep = htab_address + slot;
296 unsigned long hpte_v, want_v;
297 int ret = 0, local = 0;
298
299 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
300
301 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
302 vpn, want_v & HPTE_V_AVPN, slot, newpp);
303
304 hpte_v = be64_to_cpu(hptep->v);
305 if (cpu_has_feature(CPU_FTR_ARCH_300))
306 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
307 /*
308 * We need to invalidate the TLB always because hpte_remove doesn't do
309 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
310 * random entry from it. When we do that we don't invalidate the TLB
311 * (hpte_remove) because we assume the old translation is still
312 * technically "valid".
313 */
314 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
315 DBG_LOW(" -> miss\n");
316 ret = -1;
317 } else {
318 native_lock_hpte(hptep);
319 /* recheck with locks held */
320 hpte_v = be64_to_cpu(hptep->v);
321 if (cpu_has_feature(CPU_FTR_ARCH_300))
322 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
323 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
324 !(hpte_v & HPTE_V_VALID))) {
325 ret = -1;
326 } else {
327 DBG_LOW(" -> hit\n");
328 /* Update the HPTE */
329 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
330 ~(HPTE_R_PPP | HPTE_R_N)) |
331 (newpp & (HPTE_R_PPP | HPTE_R_N |
332 HPTE_R_C)));
333 }
334 native_unlock_hpte(hptep);
335 }
336
337 if (flags & HPTE_LOCAL_UPDATE)
338 local = 1;
339 /*
340 * Ensure it is out of the tlb too if it is not a nohpte fault
341 */
342 if (!(flags & HPTE_NOHPTE_UPDATE))
343 tlbie(vpn, bpsize, apsize, ssize, local);
344
345 return ret;
346 }
347
348 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
349 {
350 struct hash_pte *hptep;
351 unsigned long hash;
352 unsigned long i;
353 long slot;
354 unsigned long want_v, hpte_v;
355
356 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
357 want_v = hpte_encode_avpn(vpn, psize, ssize);
358
359 /* Bolted mappings are only ever in the primary group */
360 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
361 for (i = 0; i < HPTES_PER_GROUP; i++) {
362 hptep = htab_address + slot;
363 hpte_v = be64_to_cpu(hptep->v);
364 if (cpu_has_feature(CPU_FTR_ARCH_300))
365 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
366
367 if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
368 /* HPTE matches */
369 return slot;
370 ++slot;
371 }
372
373 return -1;
374 }
375
376 /*
377 * Update the page protection bits. Intended to be used to create
378 * guard pages for kernel data structures on pages which are bolted
379 * in the HPT. Assumes pages being operated on will not be stolen.
380 *
381 * No need to lock here because we should be the only user.
382 */
383 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
384 int psize, int ssize)
385 {
386 unsigned long vpn;
387 unsigned long vsid;
388 long slot;
389 struct hash_pte *hptep;
390
391 vsid = get_kernel_vsid(ea, ssize);
392 vpn = hpt_vpn(ea, vsid, ssize);
393
394 slot = native_hpte_find(vpn, psize, ssize);
395 if (slot == -1)
396 panic("could not find page to bolt\n");
397 hptep = htab_address + slot;
398
399 /* Update the HPTE */
400 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
401 ~(HPTE_R_PPP | HPTE_R_N)) |
402 (newpp & (HPTE_R_PPP | HPTE_R_N)));
403 /*
404 * Ensure it is out of the tlb too. Bolted entries base and
405 * actual page size will be same.
406 */
407 tlbie(vpn, psize, psize, ssize, 0);
408 }
409
410 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
411 int bpsize, int apsize, int ssize, int local)
412 {
413 struct hash_pte *hptep = htab_address + slot;
414 unsigned long hpte_v;
415 unsigned long want_v;
416 unsigned long flags;
417
418 local_irq_save(flags);
419
420 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
421
422 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
423 native_lock_hpte(hptep);
424 hpte_v = be64_to_cpu(hptep->v);
425 if (cpu_has_feature(CPU_FTR_ARCH_300))
426 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
427
428 /*
429 * We need to invalidate the TLB always because hpte_remove doesn't do
430 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
431 * random entry from it. When we do that we don't invalidate the TLB
432 * (hpte_remove) because we assume the old translation is still
433 * technically "valid".
434 */
435 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
436 native_unlock_hpte(hptep);
437 else
438 /* Invalidate the hpte. NOTE: this also unlocks it */
439 hptep->v = 0;
440
441 /* Invalidate the TLB */
442 tlbie(vpn, bpsize, apsize, ssize, local);
443
444 local_irq_restore(flags);
445 }
446
447 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
448 static void native_hugepage_invalidate(unsigned long vsid,
449 unsigned long addr,
450 unsigned char *hpte_slot_array,
451 int psize, int ssize, int local)
452 {
453 int i;
454 struct hash_pte *hptep;
455 int actual_psize = MMU_PAGE_16M;
456 unsigned int max_hpte_count, valid;
457 unsigned long flags, s_addr = addr;
458 unsigned long hpte_v, want_v, shift;
459 unsigned long hidx, vpn = 0, hash, slot;
460
461 shift = mmu_psize_defs[psize].shift;
462 max_hpte_count = 1U << (PMD_SHIFT - shift);
463
464 local_irq_save(flags);
465 for (i = 0; i < max_hpte_count; i++) {
466 valid = hpte_valid(hpte_slot_array, i);
467 if (!valid)
468 continue;
469 hidx = hpte_hash_index(hpte_slot_array, i);
470
471 /* get the vpn */
472 addr = s_addr + (i * (1ul << shift));
473 vpn = hpt_vpn(addr, vsid, ssize);
474 hash = hpt_hash(vpn, shift, ssize);
475 if (hidx & _PTEIDX_SECONDARY)
476 hash = ~hash;
477
478 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
479 slot += hidx & _PTEIDX_GROUP_IX;
480
481 hptep = htab_address + slot;
482 want_v = hpte_encode_avpn(vpn, psize, ssize);
483 native_lock_hpte(hptep);
484 hpte_v = be64_to_cpu(hptep->v);
485 if (cpu_has_feature(CPU_FTR_ARCH_300))
486 hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
487
488 /* Even if we miss, we need to invalidate the TLB */
489 if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
490 native_unlock_hpte(hptep);
491 else
492 /* Invalidate the hpte. NOTE: this also unlocks it */
493 hptep->v = 0;
494 /*
495 * We need to do tlb invalidate for all the address, tlbie
496 * instruction compares entry_VA in tlb with the VA specified
497 * here
498 */
499 tlbie(vpn, psize, actual_psize, ssize, local);
500 }
501 local_irq_restore(flags);
502 }
503 #else
504 static void native_hugepage_invalidate(unsigned long vsid,
505 unsigned long addr,
506 unsigned char *hpte_slot_array,
507 int psize, int ssize, int local)
508 {
509 WARN(1, "%s called without THP support\n", __func__);
510 }
511 #endif
512
513 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
514 int *psize, int *apsize, int *ssize, unsigned long *vpn)
515 {
516 unsigned long avpn, pteg, vpi;
517 unsigned long hpte_v = be64_to_cpu(hpte->v);
518 unsigned long hpte_r = be64_to_cpu(hpte->r);
519 unsigned long vsid, seg_off;
520 int size, a_size, shift;
521 /* Look at the 8 bit LP value */
522 unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
523
524 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
525 hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
526 hpte_r = hpte_new_to_old_r(hpte_r);
527 }
528 if (!(hpte_v & HPTE_V_LARGE)) {
529 size = MMU_PAGE_4K;
530 a_size = MMU_PAGE_4K;
531 } else {
532 size = hpte_page_sizes[lp] & 0xf;
533 a_size = hpte_page_sizes[lp] >> 4;
534 }
535 /* This works for all page sizes, and for 256M and 1T segments */
536 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
537 shift = mmu_psize_defs[size].shift;
538
539 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
540 pteg = slot / HPTES_PER_GROUP;
541 if (hpte_v & HPTE_V_SECONDARY)
542 pteg = ~pteg;
543
544 switch (*ssize) {
545 case MMU_SEGSIZE_256M:
546 /* We only have 28 - 23 bits of seg_off in avpn */
547 seg_off = (avpn & 0x1f) << 23;
548 vsid = avpn >> 5;
549 /* We can find more bits from the pteg value */
550 if (shift < 23) {
551 vpi = (vsid ^ pteg) & htab_hash_mask;
552 seg_off |= vpi << shift;
553 }
554 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
555 break;
556 case MMU_SEGSIZE_1T:
557 /* We only have 40 - 23 bits of seg_off in avpn */
558 seg_off = (avpn & 0x1ffff) << 23;
559 vsid = avpn >> 17;
560 if (shift < 23) {
561 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
562 seg_off |= vpi << shift;
563 }
564 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
565 break;
566 default:
567 *vpn = size = 0;
568 }
569 *psize = size;
570 *apsize = a_size;
571 }
572
573 /*
574 * clear all mappings on kexec. All cpus are in real mode (or they will
575 * be when they isi), and we are the only one left. We rely on our kernel
576 * mapping being 0xC0's and the hardware ignoring those two real bits.
577 *
578 * This must be called with interrupts disabled.
579 *
580 * Taking the native_tlbie_lock is unsafe here due to the possibility of
581 * lockdep being on. On pre POWER5 hardware, not taking the lock could
582 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
583 * gets called during boot before secondary CPUs have come up and during
584 * crashdump and all bets are off anyway.
585 *
586 * TODO: add batching support when enabled. remember, no dynamic memory here,
587 * although there is the control page available...
588 */
589 static void native_hpte_clear(void)
590 {
591 unsigned long vpn = 0;
592 unsigned long slot, slots;
593 struct hash_pte *hptep = htab_address;
594 unsigned long hpte_v;
595 unsigned long pteg_count;
596 int psize, apsize, ssize;
597
598 pteg_count = htab_hash_mask + 1;
599
600 slots = pteg_count * HPTES_PER_GROUP;
601
602 for (slot = 0; slot < slots; slot++, hptep++) {
603 /*
604 * we could lock the pte here, but we are the only cpu
605 * running, right? and for crash dump, we probably
606 * don't want to wait for a maybe bad cpu.
607 */
608 hpte_v = be64_to_cpu(hptep->v);
609
610 /*
611 * Call __tlbie() here rather than tlbie() since we can't take the
612 * native_tlbie_lock.
613 */
614 if (hpte_v & HPTE_V_VALID) {
615 hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
616 hptep->v = 0;
617 __tlbie(vpn, psize, apsize, ssize);
618 }
619 }
620
621 asm volatile("eieio; tlbsync; ptesync":::"memory");
622 }
623
624 /*
625 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
626 * the lock all the time
627 */
628 static void native_flush_hash_range(unsigned long number, int local)
629 {
630 unsigned long vpn;
631 unsigned long hash, index, hidx, shift, slot;
632 struct hash_pte *hptep;
633 unsigned long hpte_v;
634 unsigned long want_v;
635 unsigned long flags;
636 real_pte_t pte;
637 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
638 unsigned long psize = batch->psize;
639 int ssize = batch->ssize;
640 int i;
641 unsigned int use_local;
642
643 use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
644 mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
645
646 local_irq_save(flags);
647
648 for (i = 0; i < number; i++) {
649 vpn = batch->vpn[i];
650 pte = batch->pte[i];
651
652 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
653 hash = hpt_hash(vpn, shift, ssize);
654 hidx = __rpte_to_hidx(pte, index);
655 if (hidx & _PTEIDX_SECONDARY)
656 hash = ~hash;
657 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
658 slot += hidx & _PTEIDX_GROUP_IX;
659 hptep = htab_address + slot;
660 want_v = hpte_encode_avpn(vpn, psize, ssize);
661 native_lock_hpte(hptep);
662 hpte_v = be64_to_cpu(hptep->v);
663 if (cpu_has_feature(CPU_FTR_ARCH_300))
664 hpte_v = hpte_new_to_old_v(hpte_v,
665 be64_to_cpu(hptep->r));
666 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
667 !(hpte_v & HPTE_V_VALID))
668 native_unlock_hpte(hptep);
669 else
670 hptep->v = 0;
671 } pte_iterate_hashed_end();
672 }
673
674 if (use_local) {
675 asm volatile("ptesync":::"memory");
676 for (i = 0; i < number; i++) {
677 vpn = batch->vpn[i];
678 pte = batch->pte[i];
679
680 pte_iterate_hashed_subpages(pte, psize,
681 vpn, index, shift) {
682 __tlbiel(vpn, psize, psize, ssize);
683 } pte_iterate_hashed_end();
684 }
685 asm volatile("ptesync":::"memory");
686 } else {
687 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
688
689 if (lock_tlbie)
690 raw_spin_lock(&native_tlbie_lock);
691
692 asm volatile("ptesync":::"memory");
693 for (i = 0; i < number; i++) {
694 vpn = batch->vpn[i];
695 pte = batch->pte[i];
696
697 pte_iterate_hashed_subpages(pte, psize,
698 vpn, index, shift) {
699 __tlbie(vpn, psize, psize, ssize);
700 } pte_iterate_hashed_end();
701 }
702 asm volatile("eieio; tlbsync; ptesync":::"memory");
703
704 if (lock_tlbie)
705 raw_spin_unlock(&native_tlbie_lock);
706 }
707
708 local_irq_restore(flags);
709 }
710
711 static int native_register_proc_table(unsigned long base, unsigned long page_size,
712 unsigned long table_size)
713 {
714 unsigned long patb1 = base << 25; /* VSID */
715
716 patb1 |= (page_size << 5); /* sllp */
717 patb1 |= table_size;
718
719 partition_tb->patb1 = cpu_to_be64(patb1);
720 return 0;
721 }
722
723 void __init hpte_init_native(void)
724 {
725 mmu_hash_ops.hpte_invalidate = native_hpte_invalidate;
726 mmu_hash_ops.hpte_updatepp = native_hpte_updatepp;
727 mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
728 mmu_hash_ops.hpte_insert = native_hpte_insert;
729 mmu_hash_ops.hpte_remove = native_hpte_remove;
730 mmu_hash_ops.hpte_clear_all = native_hpte_clear;
731 mmu_hash_ops.flush_hash_range = native_flush_hash_range;
732 mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate;
733
734 if (cpu_has_feature(CPU_FTR_ARCH_300))
735 register_process_table = native_register_proc_table;
736 }