]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * native hashtable management. | |
4 | * | |
5 | * SMP scalability work: | |
6 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
1da177e4 | 7 | */ |
3c726f8d BH |
8 | |
9 | #undef DEBUG_LOW | |
10 | ||
1da177e4 LT |
11 | #include <linux/spinlock.h> |
12 | #include <linux/bitops.h> | |
beacc6da | 13 | #include <linux/of.h> |
4e287e65 | 14 | #include <linux/processor.h> |
1da177e4 LT |
15 | #include <linux/threads.h> |
16 | #include <linux/smp.h> | |
17 | ||
1da177e4 LT |
18 | #include <asm/machdep.h> |
19 | #include <asm/mmu.h> | |
20 | #include <asm/mmu_context.h> | |
21 | #include <asm/pgtable.h> | |
0428491c | 22 | #include <asm/trace.h> |
1da177e4 LT |
23 | #include <asm/tlb.h> |
24 | #include <asm/cputable.h> | |
3c726f8d | 25 | #include <asm/udbg.h> |
71bf08b6 | 26 | #include <asm/kexec.h> |
60dbf438 | 27 | #include <asm/ppc-opcode.h> |
2c86cd18 | 28 | #include <asm/feature-fixups.h> |
3c726f8d | 29 | |
ec249dd8 | 30 | #include <misc/cxl-base.h> |
4c6d9acc | 31 | |
3c726f8d BH |
32 | #ifdef DEBUG_LOW |
33 | #define DBG_LOW(fmt...) udbg_printf(fmt) | |
34 | #else | |
35 | #define DBG_LOW(fmt...) | |
36 | #endif | |
1da177e4 | 37 | |
12f04f2b | 38 | #ifdef __BIG_ENDIAN__ |
1da177e4 | 39 | #define HPTE_LOCK_BIT 3 |
12f04f2b AB |
40 | #else |
41 | #define HPTE_LOCK_BIT (56+3) | |
42 | #endif | |
1da177e4 | 43 | |
9e368f29 | 44 | DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
1da177e4 | 45 | |
d4748276 NP |
46 | static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is) |
47 | { | |
48 | unsigned long rb; | |
49 | ||
50 | rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); | |
51 | ||
52 | asm volatile("tlbiel %0" : : "r" (rb)); | |
53 | } | |
54 | ||
55 | /* | |
56 | * tlbiel instruction for hash, set invalidation | |
57 | * i.e., r=1 and is=01 or is=10 or is=11 | |
58 | */ | |
59 | static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, | |
60 | unsigned int pid, | |
61 | unsigned int ric, unsigned int prs) | |
62 | { | |
63 | unsigned long rb; | |
64 | unsigned long rs; | |
65 | unsigned int r = 0; /* hash format */ | |
66 | ||
67 | rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); | |
68 | rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); | |
69 | ||
70 | asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) | |
71 | : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) | |
72 | : "memory"); | |
73 | } | |
74 | ||
75 | ||
76 | static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) | |
77 | { | |
78 | unsigned int set; | |
79 | ||
80 | asm volatile("ptesync": : :"memory"); | |
81 | ||
82 | for (set = 0; set < num_sets; set++) | |
83 | tlbiel_hash_set_isa206(set, is); | |
84 | ||
85 | asm volatile("ptesync": : :"memory"); | |
86 | } | |
87 | ||
88 | static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) | |
89 | { | |
90 | unsigned int set; | |
91 | ||
92 | asm volatile("ptesync": : :"memory"); | |
93 | ||
94 | /* | |
95 | * Flush the first set of the TLB, and any caching of partition table | |
96 | * entries. Then flush the remaining sets of the TLB. Hash mode uses | |
97 | * partition scoped TLB translations. | |
98 | */ | |
99 | tlbiel_hash_set_isa300(0, is, 0, 2, 0); | |
100 | for (set = 1; set < num_sets; set++) | |
101 | tlbiel_hash_set_isa300(set, is, 0, 0, 0); | |
102 | ||
103 | /* | |
104 | * Now invalidate the process table cache. | |
105 | * | |
106 | * From ISA v3.0B p. 1078: | |
107 | * The following forms are invalid. | |
108 | * * PRS=1, R=0, and RIC!=2 (The only process-scoped | |
109 | * HPT caching is of the Process Table.) | |
110 | */ | |
111 | tlbiel_hash_set_isa300(0, is, 0, 2, 1); | |
112 | ||
113 | asm volatile("ptesync": : :"memory"); | |
bc276ecb NP |
114 | |
115 | asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); | |
d4748276 NP |
116 | } |
117 | ||
118 | void hash__tlbiel_all(unsigned int action) | |
119 | { | |
120 | unsigned int is; | |
121 | ||
122 | switch (action) { | |
123 | case TLB_INVAL_SCOPE_GLOBAL: | |
124 | is = 3; | |
125 | break; | |
126 | case TLB_INVAL_SCOPE_LPID: | |
127 | is = 2; | |
128 | break; | |
129 | default: | |
130 | BUG(); | |
131 | } | |
132 | ||
133 | if (early_cpu_has_feature(CPU_FTR_ARCH_300)) | |
134 | tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is); | |
135 | else if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) | |
136 | tlbiel_all_isa206(POWER8_TLB_SETS, is); | |
137 | else if (early_cpu_has_feature(CPU_FTR_ARCH_206)) | |
138 | tlbiel_all_isa206(POWER7_TLB_SETS, is); | |
139 | else | |
140 | WARN(1, "%s called on pre-POWER7 CPU\n", __func__); | |
d4748276 NP |
141 | } |
142 | ||
a3961f82 MS |
143 | static inline unsigned long ___tlbie(unsigned long vpn, int psize, |
144 | int apsize, int ssize) | |
3c726f8d | 145 | { |
5524a27d | 146 | unsigned long va; |
3c726f8d | 147 | unsigned int penc; |
de640959 | 148 | unsigned long sllp; |
3c726f8d | 149 | |
5524a27d AK |
150 | /* |
151 | * We need 14 to 65 bits of va for a tlibe of 4K page | |
152 | * With vpn we ignore the lower VPN_SHIFT bits already. | |
153 | * And top two bits are already ignored because we can | |
027dfac6 | 154 | * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT |
5524a27d AK |
155 | * of 12. |
156 | */ | |
157 | va = vpn << VPN_SHIFT; | |
158 | /* | |
159 | * clear top 16 bits of 64bit va, non SLS segment | |
160 | * Older versions of the architecture (2.02 and earler) require the | |
161 | * masking of the top 16 bits. | |
162 | */ | |
accfad7d AK |
163 | if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) |
164 | va &= ~(0xffffULL << 48); | |
3c726f8d BH |
165 | |
166 | switch (psize) { | |
167 | case MMU_PAGE_4K: | |
1f6aaacc AK |
168 | /* clear out bits after (52) [0....52.....63] */ |
169 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 170 | va |= ssize << 8; |
138ee7ee | 171 | sllp = get_sllp_encoding(apsize); |
de640959 | 172 | va |= sllp << 5; |
a32e252f | 173 | asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) |
969391c5 | 174 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 175 | : "memory"); |
3c726f8d BH |
176 | break; |
177 | default: | |
5524a27d | 178 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 179 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 180 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 181 | va |= penc << 12; |
1189be65 | 182 | va |= ssize << 8; |
29ef7a3e AK |
183 | /* |
184 | * AVAL bits: | |
185 | * We don't need all the bits, but rest of the bits | |
186 | * must be ignored by the processor. | |
187 | * vpn cover upto 65 bits of va. (0...65) and we need | |
188 | * 58..64 bits of va. | |
189 | */ | |
190 | va |= (vpn & 0xfe); /* AVAL */ | |
60dbf438 | 191 | va |= 1; /* L */ |
a32e252f | 192 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) |
969391c5 | 193 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 194 | : "memory"); |
3c726f8d BH |
195 | break; |
196 | } | |
a3961f82 MS |
197 | return va; |
198 | } | |
199 | ||
a5d4b589 AK |
200 | static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize) |
201 | { | |
202 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { | |
203 | /* Need the extra ptesync to ensure we don't reorder tlbie*/ | |
204 | asm volatile("ptesync": : :"memory"); | |
205 | ___tlbie(vpn, psize, apsize, ssize); | |
206 | } | |
207 | } | |
208 | ||
a3961f82 MS |
209 | static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) |
210 | { | |
211 | unsigned long rb; | |
212 | ||
213 | rb = ___tlbie(vpn, psize, apsize, ssize); | |
214 | trace_tlbie(0, 0, rb, 0, 0, 0, 0); | |
3c726f8d BH |
215 | } |
216 | ||
b1022fbd | 217 | static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) |
3c726f8d | 218 | { |
5524a27d | 219 | unsigned long va; |
3c726f8d | 220 | unsigned int penc; |
de640959 | 221 | unsigned long sllp; |
3c726f8d | 222 | |
5524a27d AK |
223 | /* VPN_SHIFT can be atmost 12 */ |
224 | va = vpn << VPN_SHIFT; | |
225 | /* | |
226 | * clear top 16 bits of 64 bit va, non SLS segment | |
227 | * Older versions of the architecture (2.02 and earler) require the | |
228 | * masking of the top 16 bits. | |
229 | */ | |
accfad7d AK |
230 | if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) |
231 | va &= ~(0xffffULL << 48); | |
3c726f8d BH |
232 | |
233 | switch (psize) { | |
234 | case MMU_PAGE_4K: | |
1f6aaacc AK |
235 | /* clear out bits after(52) [0....52.....63] */ |
236 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 237 | va |= ssize << 8; |
138ee7ee | 238 | sllp = get_sllp_encoding(apsize); |
de640959 | 239 | va |= sllp << 5; |
f923efbc BS |
240 | asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1) |
241 | : : "r" (va), "i" (CPU_FTR_ARCH_206) | |
242 | : "memory"); | |
3c726f8d BH |
243 | break; |
244 | default: | |
5524a27d | 245 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 246 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 247 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 248 | va |= penc << 12; |
1189be65 | 249 | va |= ssize << 8; |
29ef7a3e AK |
250 | /* |
251 | * AVAL bits: | |
252 | * We don't need all the bits, but rest of the bits | |
253 | * must be ignored by the processor. | |
254 | * vpn cover upto 65 bits of va. (0...65) and we need | |
255 | * 58..64 bits of va. | |
256 | */ | |
257 | va |= (vpn & 0xfe); | |
60dbf438 | 258 | va |= 1; /* L */ |
f923efbc BS |
259 | asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1) |
260 | : : "r" (va), "i" (CPU_FTR_ARCH_206) | |
261 | : "memory"); | |
3c726f8d BH |
262 | break; |
263 | } | |
0428491c | 264 | trace_tlbie(0, 1, va, 0, 0, 0, 0); |
3c726f8d BH |
265 | |
266 | } | |
267 | ||
b1022fbd AK |
268 | static inline void tlbie(unsigned long vpn, int psize, int apsize, |
269 | int ssize, int local) | |
3c726f8d | 270 | { |
4c6d9acc | 271 | unsigned int use_local; |
44ae3ab3 | 272 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
3c726f8d | 273 | |
4c6d9acc IM |
274 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); |
275 | ||
3c726f8d BH |
276 | if (use_local) |
277 | use_local = mmu_psize_defs[psize].tlbiel; | |
278 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 279 | raw_spin_lock(&native_tlbie_lock); |
3c726f8d BH |
280 | asm volatile("ptesync": : :"memory"); |
281 | if (use_local) { | |
b1022fbd | 282 | __tlbiel(vpn, psize, apsize, ssize); |
3c726f8d BH |
283 | asm volatile("ptesync": : :"memory"); |
284 | } else { | |
b1022fbd | 285 | __tlbie(vpn, psize, apsize, ssize); |
a5d4b589 | 286 | fixup_tlbie(vpn, psize, apsize, ssize); |
3c726f8d BH |
287 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
288 | } | |
289 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 290 | raw_spin_unlock(&native_tlbie_lock); |
3c726f8d BH |
291 | } |
292 | ||
8e561e7e | 293 | static inline void native_lock_hpte(struct hash_pte *hptep) |
1da177e4 | 294 | { |
12f04f2b | 295 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 LT |
296 | |
297 | while (1) { | |
66d99b88 | 298 | if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) |
1da177e4 | 299 | break; |
4e287e65 | 300 | spin_begin(); |
1da177e4 | 301 | while(test_bit(HPTE_LOCK_BIT, word)) |
4e287e65 NP |
302 | spin_cpu_relax(); |
303 | spin_end(); | |
1da177e4 LT |
304 | } |
305 | } | |
306 | ||
8e561e7e | 307 | static inline void native_unlock_hpte(struct hash_pte *hptep) |
1da177e4 | 308 | { |
12f04f2b | 309 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 | 310 | |
66d99b88 | 311 | clear_bit_unlock(HPTE_LOCK_BIT, word); |
1da177e4 LT |
312 | } |
313 | ||
5524a27d | 314 | static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, |
3c726f8d | 315 | unsigned long pa, unsigned long rflags, |
b1022fbd | 316 | unsigned long vflags, int psize, int apsize, int ssize) |
1da177e4 | 317 | { |
8e561e7e | 318 | struct hash_pte *hptep = htab_address + hpte_group; |
96e28449 | 319 | unsigned long hpte_v, hpte_r; |
1da177e4 LT |
320 | int i; |
321 | ||
3c726f8d | 322 | if (!(vflags & HPTE_V_BOLTED)) { |
5524a27d | 323 | DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," |
3c726f8d | 324 | " rflags=%lx, vflags=%lx, psize=%d)\n", |
5524a27d | 325 | hpte_group, vpn, pa, rflags, vflags, psize); |
3c726f8d BH |
326 | } |
327 | ||
1da177e4 | 328 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
12f04f2b | 329 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { |
1da177e4 LT |
330 | /* retry with lock held */ |
331 | native_lock_hpte(hptep); | |
12f04f2b | 332 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) |
1da177e4 LT |
333 | break; |
334 | native_unlock_hpte(hptep); | |
335 | } | |
336 | ||
337 | hptep++; | |
338 | } | |
339 | ||
340 | if (i == HPTES_PER_GROUP) | |
341 | return -1; | |
342 | ||
b1022fbd | 343 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
6b243fcf | 344 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
3c726f8d BH |
345 | |
346 | if (!(vflags & HPTE_V_BOLTED)) { | |
347 | DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", | |
348 | i, hpte_v, hpte_r); | |
349 | } | |
1da177e4 | 350 | |
6b243fcf PM |
351 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
352 | hpte_r = hpte_old_to_new_r(hpte_v, hpte_r); | |
353 | hpte_v = hpte_old_to_new_v(hpte_v); | |
354 | } | |
355 | ||
12f04f2b | 356 | hptep->r = cpu_to_be64(hpte_r); |
1da177e4 | 357 | /* Guarantee the second dword is visible before the valid bit */ |
74a0ba61 | 358 | eieio(); |
1da177e4 LT |
359 | /* |
360 | * Now set the first dword including the valid bit | |
361 | * NOTE: this also unlocks the hpte | |
362 | */ | |
12f04f2b | 363 | hptep->v = cpu_to_be64(hpte_v); |
1da177e4 LT |
364 | |
365 | __asm__ __volatile__ ("ptesync" : : : "memory"); | |
366 | ||
96e28449 | 367 | return i | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
368 | } |
369 | ||
370 | static long native_hpte_remove(unsigned long hpte_group) | |
371 | { | |
8e561e7e | 372 | struct hash_pte *hptep; |
1da177e4 LT |
373 | int i; |
374 | int slot_offset; | |
96e28449 | 375 | unsigned long hpte_v; |
1da177e4 | 376 | |
3c726f8d BH |
377 | DBG_LOW(" remove(group=%lx)\n", hpte_group); |
378 | ||
1da177e4 LT |
379 | /* pick a random entry to start at */ |
380 | slot_offset = mftb() & 0x7; | |
381 | ||
382 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
383 | hptep = htab_address + hpte_group + slot_offset; | |
12f04f2b | 384 | hpte_v = be64_to_cpu(hptep->v); |
1da177e4 | 385 | |
96e28449 | 386 | if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { |
1da177e4 LT |
387 | /* retry with lock held */ |
388 | native_lock_hpte(hptep); | |
12f04f2b | 389 | hpte_v = be64_to_cpu(hptep->v); |
96e28449 DG |
390 | if ((hpte_v & HPTE_V_VALID) |
391 | && !(hpte_v & HPTE_V_BOLTED)) | |
1da177e4 LT |
392 | break; |
393 | native_unlock_hpte(hptep); | |
394 | } | |
395 | ||
396 | slot_offset++; | |
397 | slot_offset &= 0x7; | |
398 | } | |
399 | ||
400 | if (i == HPTES_PER_GROUP) | |
401 | return -1; | |
402 | ||
403 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
96e28449 | 404 | hptep->v = 0; |
1da177e4 LT |
405 | |
406 | return i; | |
407 | } | |
408 | ||
3c726f8d | 409 | static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, |
db3d8534 | 410 | unsigned long vpn, int bpsize, |
aefa5688 | 411 | int apsize, int ssize, unsigned long flags) |
1da177e4 | 412 | { |
8e561e7e | 413 | struct hash_pte *hptep = htab_address + slot; |
3c726f8d | 414 | unsigned long hpte_v, want_v; |
aefa5688 | 415 | int ret = 0, local = 0; |
3c726f8d | 416 | |
db3d8534 | 417 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
3c726f8d | 418 | |
5524a27d AK |
419 | DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", |
420 | vpn, want_v & HPTE_V_AVPN, slot, newpp); | |
3c726f8d | 421 | |
a833280b | 422 | hpte_v = hpte_get_old_v(hptep); |
0608d692 AK |
423 | /* |
424 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
425 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
426 | * random entry from it. When we do that we don't invalidate the TLB | |
427 | * (hpte_remove) because we assume the old translation is still | |
428 | * technically "valid". | |
429 | */ | |
db3d8534 | 430 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { |
3c726f8d | 431 | DBG_LOW(" -> miss\n"); |
3c726f8d BH |
432 | ret = -1; |
433 | } else { | |
0ec2698f AK |
434 | native_lock_hpte(hptep); |
435 | /* recheck with locks held */ | |
a833280b | 436 | hpte_v = hpte_get_old_v(hptep); |
0ec2698f AK |
437 | if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || |
438 | !(hpte_v & HPTE_V_VALID))) { | |
439 | ret = -1; | |
440 | } else { | |
441 | DBG_LOW(" -> hit\n"); | |
442 | /* Update the HPTE */ | |
443 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & | |
8550e2fa AK |
444 | ~(HPTE_R_PPP | HPTE_R_N)) | |
445 | (newpp & (HPTE_R_PPP | HPTE_R_N | | |
0ec2698f AK |
446 | HPTE_R_C))); |
447 | } | |
448 | native_unlock_hpte(hptep); | |
3c726f8d | 449 | } |
aefa5688 AK |
450 | |
451 | if (flags & HPTE_LOCAL_UPDATE) | |
452 | local = 1; | |
453 | /* | |
454 | * Ensure it is out of the tlb too if it is not a nohpte fault | |
455 | */ | |
456 | if (!(flags & HPTE_NOHPTE_UPDATE)) | |
457 | tlbie(vpn, bpsize, apsize, ssize, local); | |
458 | ||
3c726f8d | 459 | return ret; |
1da177e4 LT |
460 | } |
461 | ||
5524a27d | 462 | static long native_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 463 | { |
8e561e7e | 464 | struct hash_pte *hptep; |
1da177e4 | 465 | unsigned long hash; |
1189be65 | 466 | unsigned long i; |
1da177e4 | 467 | long slot; |
3c726f8d | 468 | unsigned long want_v, hpte_v; |
1da177e4 | 469 | |
5524a27d | 470 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
74f227b2 | 471 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 472 | |
1189be65 PM |
473 | /* Bolted mappings are only ever in the primary group */ |
474 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
475 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
1da177e4 | 476 | |
a833280b AK |
477 | hptep = htab_address + slot; |
478 | hpte_v = hpte_get_old_v(hptep); | |
1189be65 PM |
479 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) |
480 | /* HPTE matches */ | |
481 | return slot; | |
482 | ++slot; | |
1da177e4 LT |
483 | } |
484 | ||
485 | return -1; | |
486 | } | |
487 | ||
1da177e4 LT |
488 | /* |
489 | * Update the page protection bits. Intended to be used to create | |
490 | * guard pages for kernel data structures on pages which are bolted | |
491 | * in the HPT. Assumes pages being operated on will not be stolen. | |
1da177e4 LT |
492 | * |
493 | * No need to lock here because we should be the only user. | |
494 | */ | |
3c726f8d | 495 | static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, |
1189be65 | 496 | int psize, int ssize) |
1da177e4 | 497 | { |
5524a27d AK |
498 | unsigned long vpn; |
499 | unsigned long vsid; | |
1da177e4 | 500 | long slot; |
8e561e7e | 501 | struct hash_pte *hptep; |
1da177e4 | 502 | |
1189be65 | 503 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 504 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 505 | |
5524a27d | 506 | slot = native_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
507 | if (slot == -1) |
508 | panic("could not find page to bolt\n"); | |
509 | hptep = htab_address + slot; | |
510 | ||
3c726f8d | 511 | /* Update the HPTE */ |
12f04f2b | 512 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & |
8550e2fa AK |
513 | ~(HPTE_R_PPP | HPTE_R_N)) | |
514 | (newpp & (HPTE_R_PPP | HPTE_R_N))); | |
db3d8534 AK |
515 | /* |
516 | * Ensure it is out of the tlb too. Bolted entries base and | |
517 | * actual page size will be same. | |
518 | */ | |
519 | tlbie(vpn, psize, psize, ssize, 0); | |
1da177e4 LT |
520 | } |
521 | ||
1b644f57 AB |
522 | /* |
523 | * Remove a bolted kernel entry. Memory hotplug uses this. | |
524 | * | |
525 | * No need to lock here because we should be the only user. | |
526 | */ | |
527 | static int native_hpte_removebolted(unsigned long ea, int psize, int ssize) | |
528 | { | |
529 | unsigned long vpn; | |
530 | unsigned long vsid; | |
531 | long slot; | |
532 | struct hash_pte *hptep; | |
533 | ||
534 | vsid = get_kernel_vsid(ea, ssize); | |
535 | vpn = hpt_vpn(ea, vsid, ssize); | |
536 | ||
537 | slot = native_hpte_find(vpn, psize, ssize); | |
538 | if (slot == -1) | |
539 | return -ENOENT; | |
540 | ||
541 | hptep = htab_address + slot; | |
542 | ||
543 | VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED)); | |
544 | ||
545 | /* Invalidate the hpte */ | |
546 | hptep->v = 0; | |
547 | ||
548 | /* Invalidate the TLB */ | |
549 | tlbie(vpn, psize, psize, ssize, 0); | |
550 | return 0; | |
551 | } | |
552 | ||
553 | ||
5524a27d | 554 | static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 | 555 | int bpsize, int apsize, int ssize, int local) |
1da177e4 | 556 | { |
8e561e7e | 557 | struct hash_pte *hptep = htab_address + slot; |
96e28449 | 558 | unsigned long hpte_v; |
3c726f8d | 559 | unsigned long want_v; |
1da177e4 | 560 | unsigned long flags; |
1da177e4 LT |
561 | |
562 | local_irq_save(flags); | |
1da177e4 | 563 | |
5524a27d | 564 | DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); |
3c726f8d | 565 | |
db3d8534 | 566 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
a833280b | 567 | hpte_v = hpte_get_old_v(hptep); |
1da177e4 | 568 | |
27d8959d AK |
569 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { |
570 | native_lock_hpte(hptep); | |
571 | /* recheck with locks held */ | |
572 | hpte_v = hpte_get_old_v(hptep); | |
573 | ||
574 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) | |
575 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
576 | hptep->v = 0; | |
577 | else | |
578 | native_unlock_hpte(hptep); | |
579 | } | |
0608d692 AK |
580 | /* |
581 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
582 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
583 | * random entry from it. When we do that we don't invalidate the TLB | |
584 | * (hpte_remove) because we assume the old translation is still | |
585 | * technically "valid". | |
586 | */ | |
db3d8534 AK |
587 | tlbie(vpn, bpsize, apsize, ssize, local); |
588 | ||
1da177e4 LT |
589 | local_irq_restore(flags); |
590 | } | |
591 | ||
e34aa03c | 592 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fa1f8ae8 AK |
593 | static void native_hugepage_invalidate(unsigned long vsid, |
594 | unsigned long addr, | |
1a527286 | 595 | unsigned char *hpte_slot_array, |
d557b098 | 596 | int psize, int ssize, int local) |
1a527286 | 597 | { |
969b7b20 | 598 | int i; |
1a527286 AK |
599 | struct hash_pte *hptep; |
600 | int actual_psize = MMU_PAGE_16M; | |
601 | unsigned int max_hpte_count, valid; | |
602 | unsigned long flags, s_addr = addr; | |
603 | unsigned long hpte_v, want_v, shift; | |
fa1f8ae8 | 604 | unsigned long hidx, vpn = 0, hash, slot; |
1a527286 AK |
605 | |
606 | shift = mmu_psize_defs[psize].shift; | |
607 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
608 | ||
609 | local_irq_save(flags); | |
610 | for (i = 0; i < max_hpte_count; i++) { | |
611 | valid = hpte_valid(hpte_slot_array, i); | |
612 | if (!valid) | |
613 | continue; | |
614 | hidx = hpte_hash_index(hpte_slot_array, i); | |
615 | ||
616 | /* get the vpn */ | |
617 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
618 | vpn = hpt_vpn(addr, vsid, ssize); |
619 | hash = hpt_hash(vpn, shift, ssize); | |
620 | if (hidx & _PTEIDX_SECONDARY) | |
621 | hash = ~hash; | |
622 | ||
623 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
624 | slot += hidx & _PTEIDX_GROUP_IX; | |
625 | ||
626 | hptep = htab_address + slot; | |
627 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
a833280b | 628 | hpte_v = hpte_get_old_v(hptep); |
1a527286 AK |
629 | |
630 | /* Even if we miss, we need to invalidate the TLB */ | |
27d8959d AK |
631 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { |
632 | /* recheck with locks held */ | |
633 | native_lock_hpte(hptep); | |
634 | hpte_v = hpte_get_old_v(hptep); | |
635 | ||
636 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { | |
637 | /* | |
638 | * Invalidate the hpte. NOTE: this also unlocks it | |
639 | */ | |
640 | ||
641 | hptep->v = 0; | |
642 | } else | |
643 | native_unlock_hpte(hptep); | |
644 | } | |
969b7b20 AK |
645 | /* |
646 | * We need to do tlb invalidate for all the address, tlbie | |
647 | * instruction compares entry_VA in tlb with the VA specified | |
648 | * here | |
649 | */ | |
d557b098 | 650 | tlbie(vpn, psize, actual_psize, ssize, local); |
1a527286 | 651 | } |
1a527286 AK |
652 | local_irq_restore(flags); |
653 | } | |
e34aa03c AK |
654 | #else |
655 | static void native_hugepage_invalidate(unsigned long vsid, | |
656 | unsigned long addr, | |
657 | unsigned char *hpte_slot_array, | |
658 | int psize, int ssize, int local) | |
659 | { | |
660 | WARN(1, "%s called without THP support\n", __func__); | |
661 | } | |
662 | #endif | |
1a527286 | 663 | |
8e561e7e | 664 | static void hpte_decode(struct hash_pte *hpte, unsigned long slot, |
b1022fbd | 665 | int *psize, int *apsize, int *ssize, unsigned long *vpn) |
71bf08b6 | 666 | { |
dcda287a | 667 | unsigned long avpn, pteg, vpi; |
12f04f2b AB |
668 | unsigned long hpte_v = be64_to_cpu(hpte->v); |
669 | unsigned long hpte_r = be64_to_cpu(hpte->r); | |
dcda287a | 670 | unsigned long vsid, seg_off; |
7e74c392 AK |
671 | int size, a_size, shift; |
672 | /* Look at the 8 bit LP value */ | |
12f04f2b | 673 | unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); |
71bf08b6 | 674 | |
6b243fcf PM |
675 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
676 | hpte_v = hpte_new_to_old_v(hpte_v, hpte_r); | |
677 | hpte_r = hpte_new_to_old_r(hpte_r); | |
678 | } | |
b1022fbd AK |
679 | if (!(hpte_v & HPTE_V_LARGE)) { |
680 | size = MMU_PAGE_4K; | |
681 | a_size = MMU_PAGE_4K; | |
682 | } else { | |
0eeede0c PM |
683 | size = hpte_page_sizes[lp] & 0xf; |
684 | a_size = hpte_page_sizes[lp] >> 4; | |
71bf08b6 | 685 | } |
2454c7e9 | 686 | /* This works for all page sizes, and for 256M and 1T segments */ |
6b243fcf | 687 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; |
71bf08b6 | 688 | shift = mmu_psize_defs[size].shift; |
71bf08b6 | 689 | |
dcda287a AK |
690 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); |
691 | pteg = slot / HPTES_PER_GROUP; | |
692 | if (hpte_v & HPTE_V_SECONDARY) | |
693 | pteg = ~pteg; | |
694 | ||
695 | switch (*ssize) { | |
696 | case MMU_SEGSIZE_256M: | |
697 | /* We only have 28 - 23 bits of seg_off in avpn */ | |
698 | seg_off = (avpn & 0x1f) << 23; | |
699 | vsid = avpn >> 5; | |
700 | /* We can find more bits from the pteg value */ | |
701 | if (shift < 23) { | |
702 | vpi = (vsid ^ pteg) & htab_hash_mask; | |
703 | seg_off |= vpi << shift; | |
704 | } | |
5524a27d | 705 | *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 706 | break; |
dcda287a AK |
707 | case MMU_SEGSIZE_1T: |
708 | /* We only have 40 - 23 bits of seg_off in avpn */ | |
709 | seg_off = (avpn & 0x1ffff) << 23; | |
710 | vsid = avpn >> 17; | |
711 | if (shift < 23) { | |
2454c7e9 | 712 | vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; |
dcda287a | 713 | seg_off |= vpi << shift; |
71bf08b6 | 714 | } |
5524a27d | 715 | *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 716 | break; |
dcda287a | 717 | default: |
5524a27d | 718 | *vpn = size = 0; |
3c726f8d | 719 | } |
b1022fbd AK |
720 | *psize = size; |
721 | *apsize = a_size; | |
3c726f8d BH |
722 | } |
723 | ||
f4c82d51 S |
724 | /* |
725 | * clear all mappings on kexec. All cpus are in real mode (or they will | |
726 | * be when they isi), and we are the only one left. We rely on our kernel | |
727 | * mapping being 0xC0's and the hardware ignoring those two real bits. | |
728 | * | |
fdf880a6 CB |
729 | * This must be called with interrupts disabled. |
730 | * | |
731 | * Taking the native_tlbie_lock is unsafe here due to the possibility of | |
732 | * lockdep being on. On pre POWER5 hardware, not taking the lock could | |
733 | * cause deadlock. POWER5 and newer not taking the lock is fine. This only | |
734 | * gets called during boot before secondary CPUs have come up and during | |
735 | * crashdump and all bets are off anyway. | |
736 | * | |
f4c82d51 | 737 | * TODO: add batching support when enabled. remember, no dynamic memory here, |
027dfac6 | 738 | * although there is the control page available... |
f4c82d51 S |
739 | */ |
740 | static void native_hpte_clear(void) | |
741 | { | |
5524a27d | 742 | unsigned long vpn = 0; |
fdf880a6 | 743 | unsigned long slot, slots; |
8e561e7e | 744 | struct hash_pte *hptep = htab_address; |
5524a27d | 745 | unsigned long hpte_v; |
f4c82d51 | 746 | unsigned long pteg_count; |
b1022fbd | 747 | int psize, apsize, ssize; |
f4c82d51 S |
748 | |
749 | pteg_count = htab_hash_mask + 1; | |
750 | ||
f4c82d51 S |
751 | slots = pteg_count * HPTES_PER_GROUP; |
752 | ||
753 | for (slot = 0; slot < slots; slot++, hptep++) { | |
754 | /* | |
755 | * we could lock the pte here, but we are the only cpu | |
756 | * running, right? and for crash dump, we probably | |
757 | * don't want to wait for a maybe bad cpu. | |
758 | */ | |
12f04f2b | 759 | hpte_v = be64_to_cpu(hptep->v); |
f4c82d51 | 760 | |
47f78a49 | 761 | /* |
fdf880a6 CB |
762 | * Call __tlbie() here rather than tlbie() since we can't take the |
763 | * native_tlbie_lock. | |
47f78a49 | 764 | */ |
96e28449 | 765 | if (hpte_v & HPTE_V_VALID) { |
b1022fbd | 766 | hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); |
96e28449 | 767 | hptep->v = 0; |
a3961f82 | 768 | ___tlbie(vpn, psize, apsize, ssize); |
f4c82d51 S |
769 | } |
770 | } | |
771 | ||
47f78a49 | 772 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
f4c82d51 S |
773 | } |
774 | ||
3c726f8d BH |
775 | /* |
776 | * Batched hash table flush, we batch the tlbie's to avoid taking/releasing | |
777 | * the lock all the time | |
778 | */ | |
61b1a942 | 779 | static void native_flush_hash_range(unsigned long number, int local) |
1da177e4 | 780 | { |
a5d4b589 | 781 | unsigned long vpn = 0; |
5524a27d | 782 | unsigned long hash, index, hidx, shift, slot; |
8e561e7e | 783 | struct hash_pte *hptep; |
96e28449 | 784 | unsigned long hpte_v; |
3c726f8d BH |
785 | unsigned long want_v; |
786 | unsigned long flags; | |
787 | real_pte_t pte; | |
69111bac | 788 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
3c726f8d | 789 | unsigned long psize = batch->psize; |
1189be65 | 790 | int ssize = batch->ssize; |
3c726f8d | 791 | int i; |
88b1bf72 FB |
792 | unsigned int use_local; |
793 | ||
794 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && | |
795 | mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); | |
1da177e4 LT |
796 | |
797 | local_irq_save(flags); | |
798 | ||
1da177e4 | 799 | for (i = 0; i < number; i++) { |
5524a27d | 800 | vpn = batch->vpn[i]; |
3c726f8d BH |
801 | pte = batch->pte[i]; |
802 | ||
5524a27d AK |
803 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
804 | hash = hpt_hash(vpn, shift, ssize); | |
3c726f8d BH |
805 | hidx = __rpte_to_hidx(pte, index); |
806 | if (hidx & _PTEIDX_SECONDARY) | |
807 | hash = ~hash; | |
808 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
809 | slot += hidx & _PTEIDX_GROUP_IX; | |
810 | hptep = htab_address + slot; | |
74f227b2 | 811 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
27d8959d AK |
812 | hpte_v = hpte_get_old_v(hptep); |
813 | ||
814 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) | |
815 | continue; | |
816 | /* lock and try again */ | |
3c726f8d | 817 | native_lock_hpte(hptep); |
a833280b | 818 | hpte_v = hpte_get_old_v(hptep); |
27d8959d AK |
819 | |
820 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) | |
3c726f8d BH |
821 | native_unlock_hpte(hptep); |
822 | else | |
823 | hptep->v = 0; | |
27d8959d | 824 | |
3c726f8d | 825 | } pte_iterate_hashed_end(); |
1da177e4 LT |
826 | } |
827 | ||
88b1bf72 | 828 | if (use_local) { |
1da177e4 | 829 | asm volatile("ptesync":::"memory"); |
3c726f8d | 830 | for (i = 0; i < number; i++) { |
5524a27d | 831 | vpn = batch->vpn[i]; |
3c726f8d BH |
832 | pte = batch->pte[i]; |
833 | ||
5524a27d AK |
834 | pte_iterate_hashed_subpages(pte, psize, |
835 | vpn, index, shift) { | |
b1022fbd | 836 | __tlbiel(vpn, psize, psize, ssize); |
3c726f8d BH |
837 | } pte_iterate_hashed_end(); |
838 | } | |
1da177e4 LT |
839 | asm volatile("ptesync":::"memory"); |
840 | } else { | |
44ae3ab3 | 841 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
1da177e4 LT |
842 | |
843 | if (lock_tlbie) | |
6b9c9b8a | 844 | raw_spin_lock(&native_tlbie_lock); |
1da177e4 LT |
845 | |
846 | asm volatile("ptesync":::"memory"); | |
3c726f8d | 847 | for (i = 0; i < number; i++) { |
5524a27d | 848 | vpn = batch->vpn[i]; |
3c726f8d BH |
849 | pte = batch->pte[i]; |
850 | ||
5524a27d AK |
851 | pte_iterate_hashed_subpages(pte, psize, |
852 | vpn, index, shift) { | |
b1022fbd | 853 | __tlbie(vpn, psize, psize, ssize); |
3c726f8d BH |
854 | } pte_iterate_hashed_end(); |
855 | } | |
a5d4b589 AK |
856 | /* |
857 | * Just do one more with the last used values. | |
858 | */ | |
859 | fixup_tlbie(vpn, psize, psize, ssize); | |
1da177e4 LT |
860 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
861 | ||
862 | if (lock_tlbie) | |
6b9c9b8a | 863 | raw_spin_unlock(&native_tlbie_lock); |
1da177e4 LT |
864 | } |
865 | ||
866 | local_irq_restore(flags); | |
867 | } | |
868 | ||
7d0daae4 | 869 | void __init hpte_init_native(void) |
1da177e4 | 870 | { |
7025776e BH |
871 | mmu_hash_ops.hpte_invalidate = native_hpte_invalidate; |
872 | mmu_hash_ops.hpte_updatepp = native_hpte_updatepp; | |
873 | mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp; | |
1b644f57 | 874 | mmu_hash_ops.hpte_removebolted = native_hpte_removebolted; |
7025776e BH |
875 | mmu_hash_ops.hpte_insert = native_hpte_insert; |
876 | mmu_hash_ops.hpte_remove = native_hpte_remove; | |
877 | mmu_hash_ops.hpte_clear_all = native_hpte_clear; | |
878 | mmu_hash_ops.flush_hash_range = native_flush_hash_range; | |
879 | mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; | |
1da177e4 | 880 | } |