]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * native hashtable management. | |
3 | * | |
4 | * SMP scalability work: | |
5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
3c726f8d BH |
12 | |
13 | #undef DEBUG_LOW | |
14 | ||
1da177e4 LT |
15 | #include <linux/spinlock.h> |
16 | #include <linux/bitops.h> | |
beacc6da | 17 | #include <linux/of.h> |
1da177e4 LT |
18 | #include <linux/threads.h> |
19 | #include <linux/smp.h> | |
20 | ||
1da177e4 LT |
21 | #include <asm/machdep.h> |
22 | #include <asm/mmu.h> | |
23 | #include <asm/mmu_context.h> | |
24 | #include <asm/pgtable.h> | |
25 | #include <asm/tlbflush.h> | |
26 | #include <asm/tlb.h> | |
27 | #include <asm/cputable.h> | |
3c726f8d | 28 | #include <asm/udbg.h> |
71bf08b6 | 29 | #include <asm/kexec.h> |
60dbf438 | 30 | #include <asm/ppc-opcode.h> |
3c726f8d | 31 | |
4c6d9acc IM |
32 | #include <misc/cxl.h> |
33 | ||
3c726f8d BH |
34 | #ifdef DEBUG_LOW |
35 | #define DBG_LOW(fmt...) udbg_printf(fmt) | |
36 | #else | |
37 | #define DBG_LOW(fmt...) | |
38 | #endif | |
1da177e4 | 39 | |
12f04f2b | 40 | #ifdef __BIG_ENDIAN__ |
1da177e4 | 41 | #define HPTE_LOCK_BIT 3 |
12f04f2b AB |
42 | #else |
43 | #define HPTE_LOCK_BIT (56+3) | |
44 | #endif | |
1da177e4 | 45 | |
9e368f29 | 46 | DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
1da177e4 | 47 | |
b1022fbd | 48 | static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) |
3c726f8d | 49 | { |
5524a27d | 50 | unsigned long va; |
3c726f8d | 51 | unsigned int penc; |
de640959 | 52 | unsigned long sllp; |
3c726f8d | 53 | |
5524a27d AK |
54 | /* |
55 | * We need 14 to 65 bits of va for a tlibe of 4K page | |
56 | * With vpn we ignore the lower VPN_SHIFT bits already. | |
57 | * And top two bits are already ignored because we can | |
58 | * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT | |
59 | * of 12. | |
60 | */ | |
61 | va = vpn << VPN_SHIFT; | |
62 | /* | |
63 | * clear top 16 bits of 64bit va, non SLS segment | |
64 | * Older versions of the architecture (2.02 and earler) require the | |
65 | * masking of the top 16 bits. | |
66 | */ | |
3c726f8d BH |
67 | va &= ~(0xffffULL << 48); |
68 | ||
69 | switch (psize) { | |
70 | case MMU_PAGE_4K: | |
1f6aaacc AK |
71 | /* clear out bits after (52) [0....52.....63] */ |
72 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 73 | va |= ssize << 8; |
de640959 AK |
74 | sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | |
75 | ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); | |
76 | va |= sllp << 5; | |
a32e252f | 77 | asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) |
969391c5 | 78 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 79 | : "memory"); |
3c726f8d BH |
80 | break; |
81 | default: | |
5524a27d | 82 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 83 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 84 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 85 | va |= penc << 12; |
1189be65 | 86 | va |= ssize << 8; |
29ef7a3e AK |
87 | /* |
88 | * AVAL bits: | |
89 | * We don't need all the bits, but rest of the bits | |
90 | * must be ignored by the processor. | |
91 | * vpn cover upto 65 bits of va. (0...65) and we need | |
92 | * 58..64 bits of va. | |
93 | */ | |
94 | va |= (vpn & 0xfe); /* AVAL */ | |
60dbf438 | 95 | va |= 1; /* L */ |
a32e252f | 96 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) |
969391c5 | 97 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 98 | : "memory"); |
3c726f8d BH |
99 | break; |
100 | } | |
101 | } | |
102 | ||
b1022fbd | 103 | static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) |
3c726f8d | 104 | { |
5524a27d | 105 | unsigned long va; |
3c726f8d | 106 | unsigned int penc; |
de640959 | 107 | unsigned long sllp; |
3c726f8d | 108 | |
5524a27d AK |
109 | /* VPN_SHIFT can be atmost 12 */ |
110 | va = vpn << VPN_SHIFT; | |
111 | /* | |
112 | * clear top 16 bits of 64 bit va, non SLS segment | |
113 | * Older versions of the architecture (2.02 and earler) require the | |
114 | * masking of the top 16 bits. | |
115 | */ | |
3c726f8d BH |
116 | va &= ~(0xffffULL << 48); |
117 | ||
118 | switch (psize) { | |
119 | case MMU_PAGE_4K: | |
1f6aaacc AK |
120 | /* clear out bits after(52) [0....52.....63] */ |
121 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 122 | va |= ssize << 8; |
de640959 AK |
123 | sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) | |
124 | ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4); | |
125 | va |= sllp << 5; | |
3c726f8d BH |
126 | asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" |
127 | : : "r"(va) : "memory"); | |
128 | break; | |
129 | default: | |
5524a27d | 130 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 131 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 132 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 133 | va |= penc << 12; |
1189be65 | 134 | va |= ssize << 8; |
29ef7a3e AK |
135 | /* |
136 | * AVAL bits: | |
137 | * We don't need all the bits, but rest of the bits | |
138 | * must be ignored by the processor. | |
139 | * vpn cover upto 65 bits of va. (0...65) and we need | |
140 | * 58..64 bits of va. | |
141 | */ | |
142 | va |= (vpn & 0xfe); | |
60dbf438 | 143 | va |= 1; /* L */ |
3c726f8d BH |
144 | asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" |
145 | : : "r"(va) : "memory"); | |
146 | break; | |
147 | } | |
148 | ||
149 | } | |
150 | ||
b1022fbd AK |
151 | static inline void tlbie(unsigned long vpn, int psize, int apsize, |
152 | int ssize, int local) | |
3c726f8d | 153 | { |
4c6d9acc | 154 | unsigned int use_local; |
44ae3ab3 | 155 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
3c726f8d | 156 | |
4c6d9acc IM |
157 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); |
158 | ||
3c726f8d BH |
159 | if (use_local) |
160 | use_local = mmu_psize_defs[psize].tlbiel; | |
161 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 162 | raw_spin_lock(&native_tlbie_lock); |
3c726f8d BH |
163 | asm volatile("ptesync": : :"memory"); |
164 | if (use_local) { | |
b1022fbd | 165 | __tlbiel(vpn, psize, apsize, ssize); |
3c726f8d BH |
166 | asm volatile("ptesync": : :"memory"); |
167 | } else { | |
b1022fbd | 168 | __tlbie(vpn, psize, apsize, ssize); |
3c726f8d BH |
169 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
170 | } | |
171 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 172 | raw_spin_unlock(&native_tlbie_lock); |
3c726f8d BH |
173 | } |
174 | ||
8e561e7e | 175 | static inline void native_lock_hpte(struct hash_pte *hptep) |
1da177e4 | 176 | { |
12f04f2b | 177 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 LT |
178 | |
179 | while (1) { | |
66d99b88 | 180 | if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) |
1da177e4 LT |
181 | break; |
182 | while(test_bit(HPTE_LOCK_BIT, word)) | |
183 | cpu_relax(); | |
184 | } | |
185 | } | |
186 | ||
8e561e7e | 187 | static inline void native_unlock_hpte(struct hash_pte *hptep) |
1da177e4 | 188 | { |
12f04f2b | 189 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 | 190 | |
66d99b88 | 191 | clear_bit_unlock(HPTE_LOCK_BIT, word); |
1da177e4 LT |
192 | } |
193 | ||
5524a27d | 194 | static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, |
3c726f8d | 195 | unsigned long pa, unsigned long rflags, |
b1022fbd | 196 | unsigned long vflags, int psize, int apsize, int ssize) |
1da177e4 | 197 | { |
8e561e7e | 198 | struct hash_pte *hptep = htab_address + hpte_group; |
96e28449 | 199 | unsigned long hpte_v, hpte_r; |
1da177e4 LT |
200 | int i; |
201 | ||
3c726f8d | 202 | if (!(vflags & HPTE_V_BOLTED)) { |
5524a27d | 203 | DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," |
3c726f8d | 204 | " rflags=%lx, vflags=%lx, psize=%d)\n", |
5524a27d | 205 | hpte_group, vpn, pa, rflags, vflags, psize); |
3c726f8d BH |
206 | } |
207 | ||
1da177e4 | 208 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
12f04f2b | 209 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { |
1da177e4 LT |
210 | /* retry with lock held */ |
211 | native_lock_hpte(hptep); | |
12f04f2b | 212 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) |
1da177e4 LT |
213 | break; |
214 | native_unlock_hpte(hptep); | |
215 | } | |
216 | ||
217 | hptep++; | |
218 | } | |
219 | ||
220 | if (i == HPTES_PER_GROUP) | |
221 | return -1; | |
222 | ||
b1022fbd AK |
223 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
224 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; | |
3c726f8d BH |
225 | |
226 | if (!(vflags & HPTE_V_BOLTED)) { | |
227 | DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", | |
228 | i, hpte_v, hpte_r); | |
229 | } | |
1da177e4 | 230 | |
12f04f2b | 231 | hptep->r = cpu_to_be64(hpte_r); |
1da177e4 | 232 | /* Guarantee the second dword is visible before the valid bit */ |
74a0ba61 | 233 | eieio(); |
1da177e4 LT |
234 | /* |
235 | * Now set the first dword including the valid bit | |
236 | * NOTE: this also unlocks the hpte | |
237 | */ | |
12f04f2b | 238 | hptep->v = cpu_to_be64(hpte_v); |
1da177e4 LT |
239 | |
240 | __asm__ __volatile__ ("ptesync" : : : "memory"); | |
241 | ||
96e28449 | 242 | return i | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
243 | } |
244 | ||
245 | static long native_hpte_remove(unsigned long hpte_group) | |
246 | { | |
8e561e7e | 247 | struct hash_pte *hptep; |
1da177e4 LT |
248 | int i; |
249 | int slot_offset; | |
96e28449 | 250 | unsigned long hpte_v; |
1da177e4 | 251 | |
3c726f8d BH |
252 | DBG_LOW(" remove(group=%lx)\n", hpte_group); |
253 | ||
1da177e4 LT |
254 | /* pick a random entry to start at */ |
255 | slot_offset = mftb() & 0x7; | |
256 | ||
257 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
258 | hptep = htab_address + hpte_group + slot_offset; | |
12f04f2b | 259 | hpte_v = be64_to_cpu(hptep->v); |
1da177e4 | 260 | |
96e28449 | 261 | if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { |
1da177e4 LT |
262 | /* retry with lock held */ |
263 | native_lock_hpte(hptep); | |
12f04f2b | 264 | hpte_v = be64_to_cpu(hptep->v); |
96e28449 DG |
265 | if ((hpte_v & HPTE_V_VALID) |
266 | && !(hpte_v & HPTE_V_BOLTED)) | |
1da177e4 LT |
267 | break; |
268 | native_unlock_hpte(hptep); | |
269 | } | |
270 | ||
271 | slot_offset++; | |
272 | slot_offset &= 0x7; | |
273 | } | |
274 | ||
275 | if (i == HPTES_PER_GROUP) | |
276 | return -1; | |
277 | ||
278 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
96e28449 | 279 | hptep->v = 0; |
1da177e4 LT |
280 | |
281 | return i; | |
282 | } | |
283 | ||
3c726f8d | 284 | static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, |
db3d8534 AK |
285 | unsigned long vpn, int bpsize, |
286 | int apsize, int ssize, int local) | |
1da177e4 | 287 | { |
8e561e7e | 288 | struct hash_pte *hptep = htab_address + slot; |
3c726f8d BH |
289 | unsigned long hpte_v, want_v; |
290 | int ret = 0; | |
291 | ||
db3d8534 | 292 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
3c726f8d | 293 | |
5524a27d AK |
294 | DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", |
295 | vpn, want_v & HPTE_V_AVPN, slot, newpp); | |
3c726f8d BH |
296 | |
297 | native_lock_hpte(hptep); | |
298 | ||
12f04f2b | 299 | hpte_v = be64_to_cpu(hptep->v); |
0608d692 AK |
300 | /* |
301 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
302 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
303 | * random entry from it. When we do that we don't invalidate the TLB | |
304 | * (hpte_remove) because we assume the old translation is still | |
305 | * technically "valid". | |
306 | */ | |
db3d8534 | 307 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { |
3c726f8d | 308 | DBG_LOW(" -> miss\n"); |
3c726f8d BH |
309 | ret = -1; |
310 | } else { | |
311 | DBG_LOW(" -> hit\n"); | |
312 | /* Update the HPTE */ | |
12f04f2b AB |
313 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | |
314 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); | |
3c726f8d | 315 | } |
3f1df7a2 | 316 | native_unlock_hpte(hptep); |
3c726f8d BH |
317 | |
318 | /* Ensure it is out of the tlb too. */ | |
db3d8534 | 319 | tlbie(vpn, bpsize, apsize, ssize, local); |
3c726f8d BH |
320 | |
321 | return ret; | |
1da177e4 LT |
322 | } |
323 | ||
5524a27d | 324 | static long native_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 325 | { |
8e561e7e | 326 | struct hash_pte *hptep; |
1da177e4 | 327 | unsigned long hash; |
1189be65 | 328 | unsigned long i; |
1da177e4 | 329 | long slot; |
3c726f8d | 330 | unsigned long want_v, hpte_v; |
1da177e4 | 331 | |
5524a27d | 332 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
74f227b2 | 333 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 334 | |
1189be65 PM |
335 | /* Bolted mappings are only ever in the primary group */ |
336 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
337 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
338 | hptep = htab_address + slot; | |
12f04f2b | 339 | hpte_v = be64_to_cpu(hptep->v); |
1da177e4 | 340 | |
1189be65 PM |
341 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) |
342 | /* HPTE matches */ | |
343 | return slot; | |
344 | ++slot; | |
1da177e4 LT |
345 | } |
346 | ||
347 | return -1; | |
348 | } | |
349 | ||
1da177e4 LT |
350 | /* |
351 | * Update the page protection bits. Intended to be used to create | |
352 | * guard pages for kernel data structures on pages which are bolted | |
353 | * in the HPT. Assumes pages being operated on will not be stolen. | |
1da177e4 LT |
354 | * |
355 | * No need to lock here because we should be the only user. | |
356 | */ | |
3c726f8d | 357 | static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, |
1189be65 | 358 | int psize, int ssize) |
1da177e4 | 359 | { |
5524a27d AK |
360 | unsigned long vpn; |
361 | unsigned long vsid; | |
1da177e4 | 362 | long slot; |
8e561e7e | 363 | struct hash_pte *hptep; |
1da177e4 | 364 | |
1189be65 | 365 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 366 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 367 | |
5524a27d | 368 | slot = native_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
369 | if (slot == -1) |
370 | panic("could not find page to bolt\n"); | |
371 | hptep = htab_address + slot; | |
372 | ||
3c726f8d | 373 | /* Update the HPTE */ |
12f04f2b AB |
374 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & |
375 | ~(HPTE_R_PP | HPTE_R_N)) | | |
376 | (newpp & (HPTE_R_PP | HPTE_R_N))); | |
db3d8534 AK |
377 | /* |
378 | * Ensure it is out of the tlb too. Bolted entries base and | |
379 | * actual page size will be same. | |
380 | */ | |
381 | tlbie(vpn, psize, psize, ssize, 0); | |
1da177e4 LT |
382 | } |
383 | ||
5524a27d | 384 | static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 | 385 | int bpsize, int apsize, int ssize, int local) |
1da177e4 | 386 | { |
8e561e7e | 387 | struct hash_pte *hptep = htab_address + slot; |
96e28449 | 388 | unsigned long hpte_v; |
3c726f8d | 389 | unsigned long want_v; |
1da177e4 | 390 | unsigned long flags; |
1da177e4 LT |
391 | |
392 | local_irq_save(flags); | |
1da177e4 | 393 | |
5524a27d | 394 | DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); |
3c726f8d | 395 | |
db3d8534 | 396 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
3c726f8d | 397 | native_lock_hpte(hptep); |
12f04f2b | 398 | hpte_v = be64_to_cpu(hptep->v); |
1da177e4 | 399 | |
0608d692 AK |
400 | /* |
401 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
402 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
403 | * random entry from it. When we do that we don't invalidate the TLB | |
404 | * (hpte_remove) because we assume the old translation is still | |
405 | * technically "valid". | |
406 | */ | |
db3d8534 | 407 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) |
1da177e4 | 408 | native_unlock_hpte(hptep); |
3c726f8d | 409 | else |
1da177e4 | 410 | /* Invalidate the hpte. NOTE: this also unlocks it */ |
96e28449 | 411 | hptep->v = 0; |
1da177e4 | 412 | |
3c726f8d | 413 | /* Invalidate the TLB */ |
db3d8534 AK |
414 | tlbie(vpn, bpsize, apsize, ssize, local); |
415 | ||
1da177e4 LT |
416 | local_irq_restore(flags); |
417 | } | |
418 | ||
fa1f8ae8 AK |
419 | static void native_hugepage_invalidate(unsigned long vsid, |
420 | unsigned long addr, | |
1a527286 | 421 | unsigned char *hpte_slot_array, |
fa1f8ae8 | 422 | int psize, int ssize) |
1a527286 | 423 | { |
969b7b20 | 424 | int i; |
1a527286 AK |
425 | struct hash_pte *hptep; |
426 | int actual_psize = MMU_PAGE_16M; | |
427 | unsigned int max_hpte_count, valid; | |
428 | unsigned long flags, s_addr = addr; | |
429 | unsigned long hpte_v, want_v, shift; | |
fa1f8ae8 | 430 | unsigned long hidx, vpn = 0, hash, slot; |
1a527286 AK |
431 | |
432 | shift = mmu_psize_defs[psize].shift; | |
433 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
434 | ||
435 | local_irq_save(flags); | |
436 | for (i = 0; i < max_hpte_count; i++) { | |
437 | valid = hpte_valid(hpte_slot_array, i); | |
438 | if (!valid) | |
439 | continue; | |
440 | hidx = hpte_hash_index(hpte_slot_array, i); | |
441 | ||
442 | /* get the vpn */ | |
443 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
444 | vpn = hpt_vpn(addr, vsid, ssize); |
445 | hash = hpt_hash(vpn, shift, ssize); | |
446 | if (hidx & _PTEIDX_SECONDARY) | |
447 | hash = ~hash; | |
448 | ||
449 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
450 | slot += hidx & _PTEIDX_GROUP_IX; | |
451 | ||
452 | hptep = htab_address + slot; | |
453 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
454 | native_lock_hpte(hptep); | |
12f04f2b | 455 | hpte_v = be64_to_cpu(hptep->v); |
1a527286 AK |
456 | |
457 | /* Even if we miss, we need to invalidate the TLB */ | |
458 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) | |
459 | native_unlock_hpte(hptep); | |
460 | else | |
461 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
462 | hptep->v = 0; | |
969b7b20 AK |
463 | /* |
464 | * We need to do tlb invalidate for all the address, tlbie | |
465 | * instruction compares entry_VA in tlb with the VA specified | |
466 | * here | |
467 | */ | |
468 | tlbie(vpn, psize, actual_psize, ssize, 0); | |
1a527286 | 469 | } |
1a527286 AK |
470 | local_irq_restore(flags); |
471 | } | |
472 | ||
db3d8534 AK |
473 | static inline int __hpte_actual_psize(unsigned int lp, int psize) |
474 | { | |
475 | int i, shift; | |
476 | unsigned int mask; | |
477 | ||
478 | /* start from 1 ignoring MMU_PAGE_4K */ | |
479 | for (i = 1; i < MMU_PAGE_COUNT; i++) { | |
480 | ||
481 | /* invalid penc */ | |
482 | if (mmu_psize_defs[psize].penc[i] == -1) | |
483 | continue; | |
484 | /* | |
485 | * encoding bits per actual page size | |
486 | * PTE LP actual page size | |
487 | * rrrr rrrz >=8KB | |
488 | * rrrr rrzz >=16KB | |
489 | * rrrr rzzz >=32KB | |
490 | * rrrr zzzz >=64KB | |
491 | * ....... | |
492 | */ | |
493 | shift = mmu_psize_defs[i].shift - LP_SHIFT; | |
494 | if (shift > LP_BITS) | |
495 | shift = LP_BITS; | |
496 | mask = (1 << shift) - 1; | |
497 | if ((lp & mask) == mmu_psize_defs[psize].penc[i]) | |
498 | return i; | |
499 | } | |
500 | return -1; | |
501 | } | |
502 | ||
8e561e7e | 503 | static void hpte_decode(struct hash_pte *hpte, unsigned long slot, |
b1022fbd | 504 | int *psize, int *apsize, int *ssize, unsigned long *vpn) |
71bf08b6 | 505 | { |
dcda287a | 506 | unsigned long avpn, pteg, vpi; |
12f04f2b AB |
507 | unsigned long hpte_v = be64_to_cpu(hpte->v); |
508 | unsigned long hpte_r = be64_to_cpu(hpte->r); | |
dcda287a | 509 | unsigned long vsid, seg_off; |
7e74c392 AK |
510 | int size, a_size, shift; |
511 | /* Look at the 8 bit LP value */ | |
12f04f2b | 512 | unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); |
71bf08b6 | 513 | |
b1022fbd AK |
514 | if (!(hpte_v & HPTE_V_LARGE)) { |
515 | size = MMU_PAGE_4K; | |
516 | a_size = MMU_PAGE_4K; | |
517 | } else { | |
71bf08b6 | 518 | for (size = 0; size < MMU_PAGE_COUNT; size++) { |
3c726f8d | 519 | |
71bf08b6 LB |
520 | /* valid entries have a shift value */ |
521 | if (!mmu_psize_defs[size].shift) | |
522 | continue; | |
b1022fbd | 523 | |
7e74c392 AK |
524 | a_size = __hpte_actual_psize(lp, size); |
525 | if (a_size != -1) | |
526 | break; | |
71bf08b6 LB |
527 | } |
528 | } | |
2454c7e9 | 529 | /* This works for all page sizes, and for 256M and 1T segments */ |
dcda287a | 530 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; |
71bf08b6 | 531 | shift = mmu_psize_defs[size].shift; |
71bf08b6 | 532 | |
dcda287a AK |
533 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); |
534 | pteg = slot / HPTES_PER_GROUP; | |
535 | if (hpte_v & HPTE_V_SECONDARY) | |
536 | pteg = ~pteg; | |
537 | ||
538 | switch (*ssize) { | |
539 | case MMU_SEGSIZE_256M: | |
540 | /* We only have 28 - 23 bits of seg_off in avpn */ | |
541 | seg_off = (avpn & 0x1f) << 23; | |
542 | vsid = avpn >> 5; | |
543 | /* We can find more bits from the pteg value */ | |
544 | if (shift < 23) { | |
545 | vpi = (vsid ^ pteg) & htab_hash_mask; | |
546 | seg_off |= vpi << shift; | |
547 | } | |
5524a27d | 548 | *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 549 | break; |
dcda287a AK |
550 | case MMU_SEGSIZE_1T: |
551 | /* We only have 40 - 23 bits of seg_off in avpn */ | |
552 | seg_off = (avpn & 0x1ffff) << 23; | |
553 | vsid = avpn >> 17; | |
554 | if (shift < 23) { | |
2454c7e9 | 555 | vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; |
dcda287a | 556 | seg_off |= vpi << shift; |
71bf08b6 | 557 | } |
5524a27d | 558 | *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 559 | break; |
dcda287a | 560 | default: |
5524a27d | 561 | *vpn = size = 0; |
3c726f8d | 562 | } |
b1022fbd AK |
563 | *psize = size; |
564 | *apsize = a_size; | |
3c726f8d BH |
565 | } |
566 | ||
f4c82d51 S |
567 | /* |
568 | * clear all mappings on kexec. All cpus are in real mode (or they will | |
569 | * be when they isi), and we are the only one left. We rely on our kernel | |
570 | * mapping being 0xC0's and the hardware ignoring those two real bits. | |
571 | * | |
572 | * TODO: add batching support when enabled. remember, no dynamic memory here, | |
573 | * athough there is the control page available... | |
574 | */ | |
575 | static void native_hpte_clear(void) | |
576 | { | |
5524a27d | 577 | unsigned long vpn = 0; |
f4c82d51 | 578 | unsigned long slot, slots, flags; |
8e561e7e | 579 | struct hash_pte *hptep = htab_address; |
5524a27d | 580 | unsigned long hpte_v; |
f4c82d51 | 581 | unsigned long pteg_count; |
b1022fbd | 582 | int psize, apsize, ssize; |
f4c82d51 S |
583 | |
584 | pteg_count = htab_hash_mask + 1; | |
585 | ||
586 | local_irq_save(flags); | |
587 | ||
588 | /* we take the tlbie lock and hold it. Some hardware will | |
589 | * deadlock if we try to tlbie from two processors at once. | |
590 | */ | |
6b9c9b8a | 591 | raw_spin_lock(&native_tlbie_lock); |
f4c82d51 S |
592 | |
593 | slots = pteg_count * HPTES_PER_GROUP; | |
594 | ||
595 | for (slot = 0; slot < slots; slot++, hptep++) { | |
596 | /* | |
597 | * we could lock the pte here, but we are the only cpu | |
598 | * running, right? and for crash dump, we probably | |
599 | * don't want to wait for a maybe bad cpu. | |
600 | */ | |
12f04f2b | 601 | hpte_v = be64_to_cpu(hptep->v); |
f4c82d51 | 602 | |
47f78a49 S |
603 | /* |
604 | * Call __tlbie() here rather than tlbie() since we | |
605 | * already hold the native_tlbie_lock. | |
606 | */ | |
96e28449 | 607 | if (hpte_v & HPTE_V_VALID) { |
b1022fbd | 608 | hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); |
96e28449 | 609 | hptep->v = 0; |
b1022fbd | 610 | __tlbie(vpn, psize, apsize, ssize); |
f4c82d51 S |
611 | } |
612 | } | |
613 | ||
47f78a49 | 614 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
6b9c9b8a | 615 | raw_spin_unlock(&native_tlbie_lock); |
f4c82d51 S |
616 | local_irq_restore(flags); |
617 | } | |
618 | ||
3c726f8d BH |
619 | /* |
620 | * Batched hash table flush, we batch the tlbie's to avoid taking/releasing | |
621 | * the lock all the time | |
622 | */ | |
61b1a942 | 623 | static void native_flush_hash_range(unsigned long number, int local) |
1da177e4 | 624 | { |
5524a27d AK |
625 | unsigned long vpn; |
626 | unsigned long hash, index, hidx, shift, slot; | |
8e561e7e | 627 | struct hash_pte *hptep; |
96e28449 | 628 | unsigned long hpte_v; |
3c726f8d BH |
629 | unsigned long want_v; |
630 | unsigned long flags; | |
631 | real_pte_t pte; | |
1da177e4 | 632 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
3c726f8d | 633 | unsigned long psize = batch->psize; |
1189be65 | 634 | int ssize = batch->ssize; |
3c726f8d | 635 | int i; |
1da177e4 LT |
636 | |
637 | local_irq_save(flags); | |
638 | ||
1da177e4 | 639 | for (i = 0; i < number; i++) { |
5524a27d | 640 | vpn = batch->vpn[i]; |
3c726f8d BH |
641 | pte = batch->pte[i]; |
642 | ||
5524a27d AK |
643 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
644 | hash = hpt_hash(vpn, shift, ssize); | |
3c726f8d BH |
645 | hidx = __rpte_to_hidx(pte, index); |
646 | if (hidx & _PTEIDX_SECONDARY) | |
647 | hash = ~hash; | |
648 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
649 | slot += hidx & _PTEIDX_GROUP_IX; | |
650 | hptep = htab_address + slot; | |
74f227b2 | 651 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
3c726f8d | 652 | native_lock_hpte(hptep); |
12f04f2b | 653 | hpte_v = be64_to_cpu(hptep->v); |
3c726f8d BH |
654 | if (!HPTE_V_COMPARE(hpte_v, want_v) || |
655 | !(hpte_v & HPTE_V_VALID)) | |
656 | native_unlock_hpte(hptep); | |
657 | else | |
658 | hptep->v = 0; | |
659 | } pte_iterate_hashed_end(); | |
1da177e4 LT |
660 | } |
661 | ||
44ae3ab3 | 662 | if (mmu_has_feature(MMU_FTR_TLBIEL) && |
3c726f8d | 663 | mmu_psize_defs[psize].tlbiel && local) { |
1da177e4 | 664 | asm volatile("ptesync":::"memory"); |
3c726f8d | 665 | for (i = 0; i < number; i++) { |
5524a27d | 666 | vpn = batch->vpn[i]; |
3c726f8d BH |
667 | pte = batch->pte[i]; |
668 | ||
5524a27d AK |
669 | pte_iterate_hashed_subpages(pte, psize, |
670 | vpn, index, shift) { | |
b1022fbd | 671 | __tlbiel(vpn, psize, psize, ssize); |
3c726f8d BH |
672 | } pte_iterate_hashed_end(); |
673 | } | |
1da177e4 LT |
674 | asm volatile("ptesync":::"memory"); |
675 | } else { | |
44ae3ab3 | 676 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
1da177e4 LT |
677 | |
678 | if (lock_tlbie) | |
6b9c9b8a | 679 | raw_spin_lock(&native_tlbie_lock); |
1da177e4 LT |
680 | |
681 | asm volatile("ptesync":::"memory"); | |
3c726f8d | 682 | for (i = 0; i < number; i++) { |
5524a27d | 683 | vpn = batch->vpn[i]; |
3c726f8d BH |
684 | pte = batch->pte[i]; |
685 | ||
5524a27d AK |
686 | pte_iterate_hashed_subpages(pte, psize, |
687 | vpn, index, shift) { | |
b1022fbd | 688 | __tlbie(vpn, psize, psize, ssize); |
3c726f8d BH |
689 | } pte_iterate_hashed_end(); |
690 | } | |
1da177e4 LT |
691 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
692 | ||
693 | if (lock_tlbie) | |
6b9c9b8a | 694 | raw_spin_unlock(&native_tlbie_lock); |
1da177e4 LT |
695 | } |
696 | ||
697 | local_irq_restore(flags); | |
698 | } | |
699 | ||
7d0daae4 | 700 | void __init hpte_init_native(void) |
1da177e4 LT |
701 | { |
702 | ppc_md.hpte_invalidate = native_hpte_invalidate; | |
703 | ppc_md.hpte_updatepp = native_hpte_updatepp; | |
704 | ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; | |
705 | ppc_md.hpte_insert = native_hpte_insert; | |
f4c82d51 S |
706 | ppc_md.hpte_remove = native_hpte_remove; |
707 | ppc_md.hpte_clear_all = native_hpte_clear; | |
8e166991 | 708 | ppc_md.flush_hash_range = native_flush_hash_range; |
1a527286 | 709 | ppc_md.hugepage_invalidate = native_hugepage_invalidate; |
1da177e4 | 710 | } |