]>
Commit | Line | Data |
---|---|---|
10b46525 DG |
1 | /* |
2 | * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (c) 2003-2007 Jocelyn Mayer | |
5 | * Copyright (c) 2013 David Gibson, IBM Corporation | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
0d75590d | 20 | #include "qemu/osdep.h" |
da34e65c | 21 | #include "qapi/error.h" |
10b46525 | 22 | #include "cpu.h" |
2ef6175a | 23 | #include "exec/helper-proto.h" |
cd6a9bb6 | 24 | #include "qemu/error-report.h" |
10b46525 | 25 | #include "sysemu/kvm.h" |
be18b2b5 | 26 | #include "qemu/error-report.h" |
10b46525 DG |
27 | #include "kvm_ppc.h" |
28 | #include "mmu-hash64.h" | |
508127e2 | 29 | #include "exec/log.h" |
10b46525 DG |
30 | |
31 | //#define DEBUG_SLB | |
32 | ||
33 | #ifdef DEBUG_SLB | |
48880da6 | 34 | # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) |
10b46525 DG |
35 | #else |
36 | # define LOG_SLB(...) do { } while (0) | |
37 | #endif | |
38 | ||
7c43bca0 | 39 | /* |
c18ad9a5 DG |
40 | * Used to indicate that a CPU has its hash page table (HPT) managed |
41 | * within the host kernel | |
7c43bca0 | 42 | */ |
c18ad9a5 DG |
43 | #define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1) |
44 | ||
10b46525 DG |
45 | /* |
46 | * SLB handling | |
47 | */ | |
48 | ||
7ef23068 | 49 | static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) |
10b46525 | 50 | { |
7ef23068 | 51 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
52 | uint64_t esid_256M, esid_1T; |
53 | int n; | |
54 | ||
55 | LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); | |
56 | ||
57 | esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; | |
58 | esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; | |
59 | ||
60 | for (n = 0; n < env->slb_nr; n++) { | |
61 | ppc_slb_t *slb = &env->slb[n]; | |
62 | ||
63 | LOG_SLB("%s: slot %d %016" PRIx64 " %016" | |
64 | PRIx64 "\n", __func__, n, slb->esid, slb->vsid); | |
65 | /* We check for 1T matches on all MMUs here - if the MMU | |
66 | * doesn't have 1T segment support, we will have prevented 1T | |
67 | * entries from being inserted in the slbmte code. */ | |
68 | if (((slb->esid == esid_256M) && | |
69 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) | |
70 | || ((slb->esid == esid_1T) && | |
71 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { | |
72 | return slb; | |
73 | } | |
74 | } | |
75 | ||
76 | return NULL; | |
77 | } | |
78 | ||
7ef23068 | 79 | void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu) |
10b46525 | 80 | { |
7ef23068 | 81 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
82 | int i; |
83 | uint64_t slbe, slbv; | |
84 | ||
7ef23068 | 85 | cpu_synchronize_state(CPU(cpu)); |
10b46525 DG |
86 | |
87 | cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); | |
88 | for (i = 0; i < env->slb_nr; i++) { | |
89 | slbe = env->slb[i].esid; | |
90 | slbv = env->slb[i].vsid; | |
91 | if (slbe == 0 && slbv == 0) { | |
92 | continue; | |
93 | } | |
94 | cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", | |
95 | i, slbe, slbv); | |
96 | } | |
97 | } | |
98 | ||
99 | void helper_slbia(CPUPPCState *env) | |
100 | { | |
00c8cb0a | 101 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
102 | int n, do_invalidate; |
103 | ||
104 | do_invalidate = 0; | |
105 | /* XXX: Warning: slbia never invalidates the first segment */ | |
106 | for (n = 1; n < env->slb_nr; n++) { | |
107 | ppc_slb_t *slb = &env->slb[n]; | |
108 | ||
109 | if (slb->esid & SLB_ESID_V) { | |
110 | slb->esid &= ~SLB_ESID_V; | |
111 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
112 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
113 | * in QEMU, we just invalidate all TLBs | |
114 | */ | |
115 | do_invalidate = 1; | |
116 | } | |
117 | } | |
118 | if (do_invalidate) { | |
00c8cb0a | 119 | tlb_flush(CPU(cpu), 1); |
10b46525 DG |
120 | } |
121 | } | |
122 | ||
123 | void helper_slbie(CPUPPCState *env, target_ulong addr) | |
124 | { | |
00c8cb0a | 125 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
126 | ppc_slb_t *slb; |
127 | ||
7ef23068 | 128 | slb = slb_lookup(cpu, addr); |
10b46525 DG |
129 | if (!slb) { |
130 | return; | |
131 | } | |
132 | ||
133 | if (slb->esid & SLB_ESID_V) { | |
134 | slb->esid &= ~SLB_ESID_V; | |
135 | ||
136 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
137 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
138 | * in QEMU, we just invalidate all TLBs | |
139 | */ | |
00c8cb0a | 140 | tlb_flush(CPU(cpu), 1); |
10b46525 DG |
141 | } |
142 | } | |
143 | ||
bcd81230 DG |
144 | int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, |
145 | target_ulong esid, target_ulong vsid) | |
10b46525 | 146 | { |
7ef23068 | 147 | CPUPPCState *env = &cpu->env; |
10b46525 | 148 | ppc_slb_t *slb = &env->slb[slot]; |
cd6a9bb6 DG |
149 | const struct ppc_one_seg_page_size *sps = NULL; |
150 | int i; | |
10b46525 | 151 | |
bcd81230 DG |
152 | if (slot >= env->slb_nr) { |
153 | return -1; /* Bad slot number */ | |
154 | } | |
155 | if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { | |
156 | return -1; /* Reserved bits set */ | |
10b46525 | 157 | } |
bcd81230 | 158 | if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { |
10b46525 DG |
159 | return -1; /* Bad segment size */ |
160 | } | |
bcd81230 | 161 | if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { |
10b46525 DG |
162 | return -1; /* 1T segment on MMU that doesn't support it */ |
163 | } | |
164 | ||
cd6a9bb6 DG |
165 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { |
166 | const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i]; | |
167 | ||
168 | if (!sps1->page_shift) { | |
169 | break; | |
170 | } | |
171 | ||
172 | if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { | |
173 | sps = sps1; | |
174 | break; | |
175 | } | |
176 | } | |
177 | ||
178 | if (!sps) { | |
179 | error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu | |
180 | " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, | |
181 | slot, esid, vsid); | |
182 | return -1; | |
183 | } | |
184 | ||
bcd81230 DG |
185 | slb->esid = esid; |
186 | slb->vsid = vsid; | |
cd6a9bb6 | 187 | slb->sps = sps; |
10b46525 DG |
188 | |
189 | LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 | |
bcd81230 | 190 | " %016" PRIx64 "\n", __func__, slot, esid, vsid, |
10b46525 DG |
191 | slb->esid, slb->vsid); |
192 | ||
193 | return 0; | |
194 | } | |
195 | ||
7ef23068 | 196 | static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, |
10b46525 DG |
197 | target_ulong *rt) |
198 | { | |
7ef23068 | 199 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
200 | int slot = rb & 0xfff; |
201 | ppc_slb_t *slb = &env->slb[slot]; | |
202 | ||
203 | if (slot >= env->slb_nr) { | |
204 | return -1; | |
205 | } | |
206 | ||
207 | *rt = slb->esid; | |
208 | return 0; | |
209 | } | |
210 | ||
7ef23068 | 211 | static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, |
10b46525 DG |
212 | target_ulong *rt) |
213 | { | |
7ef23068 | 214 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
215 | int slot = rb & 0xfff; |
216 | ppc_slb_t *slb = &env->slb[slot]; | |
217 | ||
218 | if (slot >= env->slb_nr) { | |
219 | return -1; | |
220 | } | |
221 | ||
222 | *rt = slb->vsid; | |
223 | return 0; | |
224 | } | |
225 | ||
226 | void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) | |
227 | { | |
7ef23068 DG |
228 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
229 | ||
bcd81230 | 230 | if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { |
10b46525 DG |
231 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
232 | POWERPC_EXCP_INVAL); | |
233 | } | |
234 | } | |
235 | ||
236 | target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) | |
237 | { | |
7ef23068 | 238 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
239 | target_ulong rt = 0; |
240 | ||
7ef23068 | 241 | if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { |
10b46525 DG |
242 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
243 | POWERPC_EXCP_INVAL); | |
244 | } | |
245 | return rt; | |
246 | } | |
247 | ||
248 | target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) | |
249 | { | |
7ef23068 | 250 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
251 | target_ulong rt = 0; |
252 | ||
7ef23068 | 253 | if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { |
10b46525 DG |
254 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
255 | POWERPC_EXCP_INVAL); | |
256 | } | |
257 | return rt; | |
258 | } | |
9d7c3f4a DG |
259 | |
260 | /* | |
261 | * 64-bit hash table MMU handling | |
262 | */ | |
e5c0d3ce DG |
263 | void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value, |
264 | Error **errp) | |
265 | { | |
266 | CPUPPCState *env = &cpu->env; | |
267 | target_ulong htabsize = value & SDR_64_HTABSIZE; | |
268 | ||
269 | env->spr[SPR_SDR1] = value; | |
270 | if (htabsize > 28) { | |
271 | error_setg(errp, | |
272 | "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", | |
273 | htabsize); | |
274 | htabsize = 28; | |
275 | } | |
276 | env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1; | |
277 | env->htab_base = value & SDR_64_HTABORG; | |
278 | } | |
279 | ||
280 | void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, | |
281 | Error **errp) | |
282 | { | |
283 | CPUPPCState *env = &cpu->env; | |
284 | Error *local_err = NULL; | |
285 | ||
286 | cpu_synchronize_state(CPU(cpu)); | |
287 | ||
c18ad9a5 DG |
288 | if (hpt) { |
289 | env->external_htab = hpt; | |
290 | } else { | |
291 | env->external_htab = MMU_HASH64_KVM_MANAGED_HPT; | |
292 | } | |
e5c0d3ce DG |
293 | ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18), |
294 | &local_err); | |
295 | if (local_err) { | |
296 | error_propagate(errp, local_err); | |
297 | return; | |
298 | } | |
299 | ||
300 | /* Not strictly necessary, but makes it clearer that an external | |
301 | * htab is in use when debugging */ | |
302 | env->htab_base = -1; | |
303 | ||
304 | if (kvm_enabled()) { | |
305 | if (kvmppc_put_books_sregs(cpu) < 0) { | |
306 | error_setg(errp, "Unable to update SDR1 in KVM"); | |
307 | } | |
308 | } | |
309 | } | |
9d7c3f4a | 310 | |
7ef23068 | 311 | static int ppc_hash64_pte_prot(PowerPCCPU *cpu, |
e01b4445 | 312 | ppc_slb_t *slb, ppc_hash_pte64_t pte) |
496272a7 | 313 | { |
7ef23068 | 314 | CPUPPCState *env = &cpu->env; |
e01b4445 DG |
315 | unsigned pp, key; |
316 | /* Some pp bit combinations have undefined behaviour, so default | |
317 | * to no access in those cases */ | |
318 | int prot = 0; | |
319 | ||
320 | key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) | |
321 | : (slb->vsid & SLB_VSID_KS)); | |
322 | pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); | |
496272a7 | 323 | |
496272a7 DG |
324 | if (key == 0) { |
325 | switch (pp) { | |
326 | case 0x0: | |
327 | case 0x1: | |
328 | case 0x2: | |
e01b4445 DG |
329 | prot = PAGE_READ | PAGE_WRITE; |
330 | break; | |
331 | ||
496272a7 DG |
332 | case 0x3: |
333 | case 0x6: | |
e01b4445 | 334 | prot = PAGE_READ; |
496272a7 DG |
335 | break; |
336 | } | |
337 | } else { | |
338 | switch (pp) { | |
339 | case 0x0: | |
340 | case 0x6: | |
e01b4445 | 341 | prot = 0; |
496272a7 | 342 | break; |
e01b4445 | 343 | |
496272a7 DG |
344 | case 0x1: |
345 | case 0x3: | |
e01b4445 | 346 | prot = PAGE_READ; |
496272a7 | 347 | break; |
e01b4445 | 348 | |
496272a7 | 349 | case 0x2: |
e01b4445 | 350 | prot = PAGE_READ | PAGE_WRITE; |
496272a7 DG |
351 | break; |
352 | } | |
353 | } | |
496272a7 | 354 | |
e01b4445 | 355 | /* No execute if either noexec or guarded bits set */ |
57d0a39d DG |
356 | if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) |
357 | || (slb->vsid & SLB_VSID_N)) { | |
e01b4445 | 358 | prot |= PAGE_EXEC; |
496272a7 DG |
359 | } |
360 | ||
e01b4445 | 361 | return prot; |
496272a7 DG |
362 | } |
363 | ||
7ef23068 | 364 | static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) |
f80872e2 | 365 | { |
7ef23068 | 366 | CPUPPCState *env = &cpu->env; |
f80872e2 | 367 | int key, amrbits; |
363248e8 | 368 | int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
f80872e2 | 369 | |
f80872e2 DG |
370 | /* Only recent MMUs implement Virtual Page Class Key Protection */ |
371 | if (!(env->mmu_model & POWERPC_MMU_AMR)) { | |
363248e8 | 372 | return prot; |
f80872e2 DG |
373 | } |
374 | ||
375 | key = HPTE64_R_KEY(pte.pte1); | |
376 | amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; | |
377 | ||
378 | /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ | |
379 | /* env->spr[SPR_AMR]); */ | |
380 | ||
363248e8 CLG |
381 | /* |
382 | * A store is permitted if the AMR bit is 0. Remove write | |
383 | * protection if it is set. | |
384 | */ | |
f80872e2 | 385 | if (amrbits & 0x2) { |
363248e8 | 386 | prot &= ~PAGE_WRITE; |
f80872e2 | 387 | } |
363248e8 CLG |
388 | /* |
389 | * A load is permitted if the AMR bit is 0. Remove read | |
390 | * protection if it is set. | |
391 | */ | |
f80872e2 | 392 | if (amrbits & 0x1) { |
363248e8 | 393 | prot &= ~PAGE_READ; |
f80872e2 DG |
394 | } |
395 | ||
396 | return prot; | |
397 | } | |
398 | ||
7c43bca0 AK |
399 | uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) |
400 | { | |
401 | uint64_t token = 0; | |
402 | hwaddr pte_offset; | |
403 | ||
404 | pte_offset = pte_index * HASH_PTE_SIZE_64; | |
c18ad9a5 | 405 | if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
7c43bca0 AK |
406 | /* |
407 | * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. | |
408 | */ | |
409 | token = kvmppc_hash64_read_pteg(cpu, pte_index); | |
c18ad9a5 | 410 | } else if (cpu->env.external_htab) { |
7c43bca0 | 411 | /* |
c18ad9a5 DG |
412 | * HTAB is controlled by QEMU. Just point to the internally |
413 | * accessible PTEG. | |
7c43bca0 | 414 | */ |
7c43bca0 AK |
415 | token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset; |
416 | } else if (cpu->env.htab_base) { | |
417 | token = cpu->env.htab_base + pte_offset; | |
418 | } | |
419 | return token; | |
420 | } | |
421 | ||
c18ad9a5 | 422 | void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token) |
7c43bca0 | 423 | { |
c18ad9a5 | 424 | if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
a9ab06d1 | 425 | kvmppc_hash64_free_pteg(token); |
7c43bca0 AK |
426 | } |
427 | } | |
428 | ||
7ef23068 | 429 | static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, |
aea390e4 DG |
430 | bool secondary, target_ulong ptem, |
431 | ppc_hash_pte64_t *pte) | |
432 | { | |
7ef23068 | 433 | CPUPPCState *env = &cpu->env; |
aea390e4 | 434 | int i; |
7c43bca0 AK |
435 | uint64_t token; |
436 | target_ulong pte0, pte1; | |
437 | target_ulong pte_index; | |
aea390e4 | 438 | |
7c43bca0 | 439 | pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; |
7ef23068 | 440 | token = ppc_hash64_start_access(cpu, pte_index); |
7c43bca0 AK |
441 | if (!token) { |
442 | return -1; | |
443 | } | |
aea390e4 | 444 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
7ef23068 DG |
445 | pte0 = ppc_hash64_load_hpte0(cpu, token, i); |
446 | pte1 = ppc_hash64_load_hpte1(cpu, token, i); | |
aea390e4 DG |
447 | |
448 | if ((pte0 & HPTE64_V_VALID) | |
449 | && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) | |
450 | && HPTE64_V_COMPARE(pte0, ptem)) { | |
451 | pte->pte0 = pte0; | |
452 | pte->pte1 = pte1; | |
c18ad9a5 | 453 | ppc_hash64_stop_access(cpu, token); |
7c43bca0 | 454 | return (pte_index + i) * HASH_PTE_SIZE_64; |
aea390e4 | 455 | } |
aea390e4 | 456 | } |
c18ad9a5 | 457 | ppc_hash64_stop_access(cpu, token); |
7c43bca0 AK |
458 | /* |
459 | * We didn't find a valid entry. | |
460 | */ | |
aea390e4 DG |
461 | return -1; |
462 | } | |
463 | ||
7ef23068 | 464 | static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, |
7f3bdc2d DG |
465 | ppc_slb_t *slb, target_ulong eaddr, |
466 | ppc_hash_pte64_t *pte) | |
c69b6151 | 467 | { |
7ef23068 | 468 | CPUPPCState *env = &cpu->env; |
7c43bca0 | 469 | hwaddr pte_offset; |
a1ff751a | 470 | hwaddr hash; |
cd6a9bb6 DG |
471 | uint64_t vsid, epnmask, epn, ptem; |
472 | ||
473 | /* The SLB store path should prevent any bad page size encodings | |
474 | * getting in there, so: */ | |
475 | assert(slb->sps); | |
a1ff751a | 476 | |
cd6a9bb6 | 477 | epnmask = ~((1ULL << slb->sps->page_shift) - 1); |
a1ff751a | 478 | |
a1ff751a | 479 | if (slb->vsid & SLB_VSID_B) { |
18148898 DG |
480 | /* 1TB segment */ |
481 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; | |
482 | epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; | |
cd6a9bb6 | 483 | hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift); |
a1ff751a | 484 | } else { |
18148898 DG |
485 | /* 256M segment */ |
486 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; | |
487 | epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; | |
cd6a9bb6 | 488 | hash = vsid ^ (epn >> slb->sps->page_shift); |
a1ff751a | 489 | } |
18148898 | 490 | ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); |
a1ff751a | 491 | |
a1ff751a | 492 | /* Page address translation */ |
339aaf5b AP |
493 | qemu_log_mask(CPU_LOG_MMU, |
494 | "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx | |
a1ff751a DG |
495 | " hash " TARGET_FMT_plx "\n", |
496 | env->htab_base, env->htab_mask, hash); | |
497 | ||
a1ff751a | 498 | /* Primary PTEG lookup */ |
339aaf5b AP |
499 | qemu_log_mask(CPU_LOG_MMU, |
500 | "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
a1ff751a DG |
501 | " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx |
502 | " hash=" TARGET_FMT_plx "\n", | |
503 | env->htab_base, env->htab_mask, vsid, ptem, hash); | |
7ef23068 | 504 | pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte); |
7f3bdc2d | 505 | |
a1ff751a DG |
506 | if (pte_offset == -1) { |
507 | /* Secondary PTEG lookup */ | |
339aaf5b AP |
508 | qemu_log_mask(CPU_LOG_MMU, |
509 | "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
a1ff751a DG |
510 | " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx |
511 | " hash=" TARGET_FMT_plx "\n", env->htab_base, | |
512 | env->htab_mask, vsid, ptem, ~hash); | |
513 | ||
7ef23068 | 514 | pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte); |
a1ff751a DG |
515 | } |
516 | ||
7f3bdc2d | 517 | return pte_offset; |
c69b6151 | 518 | } |
0480884f | 519 | |
be18b2b5 DG |
520 | static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps, |
521 | uint64_t pte0, uint64_t pte1) | |
522 | { | |
523 | int i; | |
524 | ||
525 | if (!(pte0 & HPTE64_V_LARGE)) { | |
526 | if (sps->page_shift != 12) { | |
527 | /* 4kiB page in a non 4kiB segment */ | |
528 | return 0; | |
529 | } | |
530 | /* Normal 4kiB page */ | |
531 | return 12; | |
532 | } | |
533 | ||
534 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { | |
535 | const struct ppc_one_page_size *ps = &sps->enc[i]; | |
536 | uint64_t mask; | |
537 | ||
538 | if (!ps->page_shift) { | |
539 | break; | |
540 | } | |
541 | ||
542 | if (ps->page_shift == 12) { | |
543 | /* L bit is set so this can't be a 4kiB page */ | |
544 | continue; | |
545 | } | |
546 | ||
547 | mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; | |
548 | ||
549 | if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) { | |
550 | return ps->page_shift; | |
551 | } | |
552 | } | |
553 | ||
554 | return 0; /* Bad page size encoding */ | |
555 | } | |
556 | ||
1114e712 DG |
557 | unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, |
558 | uint64_t pte0, uint64_t pte1, | |
559 | unsigned *seg_page_shift) | |
560 | { | |
561 | CPUPPCState *env = &cpu->env; | |
562 | int i; | |
563 | ||
564 | if (!(pte0 & HPTE64_V_LARGE)) { | |
565 | *seg_page_shift = 12; | |
566 | return 12; | |
567 | } | |
568 | ||
569 | /* | |
570 | * The encodings in env->sps need to be carefully chosen so that | |
571 | * this gives an unambiguous result. | |
572 | */ | |
573 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { | |
574 | const struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; | |
575 | unsigned shift; | |
576 | ||
577 | if (!sps->page_shift) { | |
578 | break; | |
579 | } | |
580 | ||
581 | shift = hpte_page_shift(sps, pte0, pte1); | |
582 | if (shift) { | |
583 | *seg_page_shift = sps->page_shift; | |
584 | return shift; | |
585 | } | |
586 | } | |
587 | ||
588 | *seg_page_shift = 0; | |
589 | return 0; | |
590 | } | |
591 | ||
b2305601 | 592 | int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, |
caa597bd | 593 | int rwx, int mmu_idx) |
0480884f | 594 | { |
d0e39c5d AF |
595 | CPUState *cs = CPU(cpu); |
596 | CPUPPCState *env = &cpu->env; | |
0480884f | 597 | ppc_slb_t *slb; |
be18b2b5 | 598 | unsigned apshift; |
7f3bdc2d DG |
599 | hwaddr pte_offset; |
600 | ppc_hash_pte64_t pte; | |
f80872e2 | 601 | int pp_prot, amr_prot, prot; |
b3440746 | 602 | uint64_t new_pte1; |
e01b4445 | 603 | const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; |
caa597bd | 604 | hwaddr raddr; |
0480884f | 605 | |
6a980110 DG |
606 | assert((rwx == 0) || (rwx == 1) || (rwx == 2)); |
607 | ||
65d61643 DG |
608 | /* 1. Handle real mode accesses */ |
609 | if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { | |
610 | /* Translation is off */ | |
611 | /* In real mode the top 4 effective address bits are ignored */ | |
caa597bd | 612 | raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; |
0c591eb0 | 613 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
caa597bd DG |
614 | PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, |
615 | TARGET_PAGE_SIZE); | |
65d61643 DG |
616 | return 0; |
617 | } | |
618 | ||
bb218042 | 619 | /* 2. Translation is on, so look up the SLB */ |
7ef23068 | 620 | slb = slb_lookup(cpu, eaddr); |
bb218042 | 621 | |
0480884f | 622 | if (!slb) { |
caa597bd | 623 | if (rwx == 2) { |
27103424 | 624 | cs->exception_index = POWERPC_EXCP_ISEG; |
caa597bd DG |
625 | env->error_code = 0; |
626 | } else { | |
27103424 | 627 | cs->exception_index = POWERPC_EXCP_DSEG; |
caa597bd DG |
628 | env->error_code = 0; |
629 | env->spr[SPR_DAR] = eaddr; | |
630 | } | |
631 | return 1; | |
0480884f DG |
632 | } |
633 | ||
bb218042 DG |
634 | /* 3. Check for segment level no-execute violation */ |
635 | if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { | |
27103424 | 636 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
637 | env->error_code = 0x10000000; |
638 | return 1; | |
bb218042 DG |
639 | } |
640 | ||
7f3bdc2d | 641 | /* 4. Locate the PTE in the hash table */ |
7ef23068 | 642 | pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte); |
7f3bdc2d | 643 | if (pte_offset == -1) { |
caa597bd | 644 | if (rwx == 2) { |
27103424 | 645 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
646 | env->error_code = 0x40000000; |
647 | } else { | |
27103424 | 648 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
649 | env->error_code = 0; |
650 | env->spr[SPR_DAR] = eaddr; | |
651 | if (rwx == 1) { | |
652 | env->spr[SPR_DSISR] = 0x42000000; | |
653 | } else { | |
654 | env->spr[SPR_DSISR] = 0x40000000; | |
655 | } | |
656 | } | |
657 | return 1; | |
7f3bdc2d | 658 | } |
339aaf5b AP |
659 | qemu_log_mask(CPU_LOG_MMU, |
660 | "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); | |
7f3bdc2d | 661 | |
be18b2b5 DG |
662 | /* Validate page size encoding */ |
663 | apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1); | |
664 | if (!apshift) { | |
665 | error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64 | |
666 | " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset); | |
667 | /* Not entirely sure what the right action here, but machine | |
668 | * check seems reasonable */ | |
669 | cs->exception_index = POWERPC_EXCP_MCHECK; | |
670 | env->error_code = 0; | |
671 | return 1; | |
672 | } | |
673 | ||
7f3bdc2d | 674 | /* 5. Check access permissions */ |
7f3bdc2d | 675 | |
7ef23068 DG |
676 | pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); |
677 | amr_prot = ppc_hash64_amr_prot(cpu, pte); | |
f80872e2 | 678 | prot = pp_prot & amr_prot; |
6a980110 | 679 | |
caa597bd | 680 | if ((need_prot[rwx] & ~prot) != 0) { |
6a980110 | 681 | /* Access right violation */ |
339aaf5b | 682 | qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); |
caa597bd | 683 | if (rwx == 2) { |
27103424 | 684 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
685 | env->error_code = 0x08000000; |
686 | } else { | |
f80872e2 DG |
687 | target_ulong dsisr = 0; |
688 | ||
27103424 | 689 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
690 | env->error_code = 0; |
691 | env->spr[SPR_DAR] = eaddr; | |
f80872e2 DG |
692 | if (need_prot[rwx] & ~pp_prot) { |
693 | dsisr |= 0x08000000; | |
694 | } | |
caa597bd | 695 | if (rwx == 1) { |
f80872e2 DG |
696 | dsisr |= 0x02000000; |
697 | } | |
698 | if (need_prot[rwx] & ~amr_prot) { | |
699 | dsisr |= 0x00200000; | |
caa597bd | 700 | } |
f80872e2 | 701 | env->spr[SPR_DSISR] = dsisr; |
caa597bd DG |
702 | } |
703 | return 1; | |
6a980110 DG |
704 | } |
705 | ||
339aaf5b | 706 | qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); |
87dc3fd1 DG |
707 | |
708 | /* 6. Update PTE referenced and changed bits if necessary */ | |
709 | ||
b3440746 DG |
710 | new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ |
711 | if (rwx == 1) { | |
712 | new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ | |
713 | } else { | |
714 | /* Treat the page as read-only for now, so that a later write | |
715 | * will pass through this function again to set the C bit */ | |
caa597bd | 716 | prot &= ~PAGE_WRITE; |
b3440746 DG |
717 | } |
718 | ||
719 | if (new_pte1 != pte.pte1) { | |
7ef23068 | 720 | ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64, |
3f94170b | 721 | pte.pte0, new_pte1); |
7f3bdc2d | 722 | } |
0480884f | 723 | |
6d11d998 DG |
724 | /* 7. Determine the real address from the PTE */ |
725 | ||
be18b2b5 | 726 | raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); |
caa597bd | 727 | |
0c591eb0 | 728 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
be18b2b5 | 729 | prot, mmu_idx, 1ULL << apshift); |
e01b4445 | 730 | |
e01b4445 | 731 | return 0; |
0480884f | 732 | } |
629bd516 | 733 | |
7ef23068 | 734 | hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) |
f2ad6be8 | 735 | { |
7ef23068 | 736 | CPUPPCState *env = &cpu->env; |
5883d8b2 DG |
737 | ppc_slb_t *slb; |
738 | hwaddr pte_offset; | |
739 | ppc_hash_pte64_t pte; | |
be18b2b5 | 740 | unsigned apshift; |
5883d8b2 DG |
741 | |
742 | if (msr_dr == 0) { | |
743 | /* In real mode the top 4 effective address bits are ignored */ | |
744 | return addr & 0x0FFFFFFFFFFFFFFFULL; | |
745 | } | |
f2ad6be8 | 746 | |
7ef23068 | 747 | slb = slb_lookup(cpu, addr); |
5883d8b2 DG |
748 | if (!slb) { |
749 | return -1; | |
750 | } | |
751 | ||
7ef23068 | 752 | pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte); |
5883d8b2 | 753 | if (pte_offset == -1) { |
f2ad6be8 DG |
754 | return -1; |
755 | } | |
756 | ||
be18b2b5 DG |
757 | apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1); |
758 | if (!apshift) { | |
759 | return -1; | |
760 | } | |
761 | ||
762 | return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) | |
cd6a9bb6 | 763 | & TARGET_PAGE_MASK; |
f2ad6be8 | 764 | } |
c1385933 | 765 | |
7ef23068 | 766 | void ppc_hash64_store_hpte(PowerPCCPU *cpu, |
c1385933 AK |
767 | target_ulong pte_index, |
768 | target_ulong pte0, target_ulong pte1) | |
769 | { | |
7ef23068 | 770 | CPUPPCState *env = &cpu->env; |
c1385933 | 771 | |
c18ad9a5 | 772 | if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
a9ab06d1 SW |
773 | kvmppc_hash64_write_pte(env, pte_index, pte0, pte1); |
774 | return; | |
c1385933 AK |
775 | } |
776 | ||
777 | pte_index *= HASH_PTE_SIZE_64; | |
778 | if (env->external_htab) { | |
779 | stq_p(env->external_htab + pte_index, pte0); | |
7ef23068 | 780 | stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1); |
c1385933 | 781 | } else { |
7ef23068 DG |
782 | stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0); |
783 | stq_phys(CPU(cpu)->as, | |
784 | env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1); | |
c1385933 AK |
785 | } |
786 | } | |
61a36c9b DG |
787 | |
788 | void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, | |
789 | target_ulong pte_index, | |
790 | target_ulong pte0, target_ulong pte1) | |
791 | { | |
792 | /* | |
793 | * XXX: given the fact that there are too many segments to | |
794 | * invalidate, and we still don't have a tlb_flush_mask(env, n, | |
795 | * mask) in QEMU, we just invalidate all TLBs | |
796 | */ | |
797 | tlb_flush(CPU(cpu), 1); | |
798 | } |