]>
Commit | Line | Data |
---|---|---|
10b46525 DG |
1 | /* |
2 | * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (c) 2003-2007 Jocelyn Mayer | |
5 | * Copyright (c) 2013 David Gibson, IBM Corporation | |
6 | * | |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public | |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
19 | */ | |
0d75590d | 20 | #include "qemu/osdep.h" |
da34e65c | 21 | #include "qapi/error.h" |
10b46525 | 22 | #include "cpu.h" |
63c91552 | 23 | #include "exec/exec-all.h" |
2ef6175a | 24 | #include "exec/helper-proto.h" |
cd6a9bb6 | 25 | #include "qemu/error-report.h" |
10b46525 | 26 | #include "sysemu/kvm.h" |
be18b2b5 | 27 | #include "qemu/error-report.h" |
10b46525 DG |
28 | #include "kvm_ppc.h" |
29 | #include "mmu-hash64.h" | |
508127e2 | 30 | #include "exec/log.h" |
10b46525 DG |
31 | |
32 | //#define DEBUG_SLB | |
33 | ||
34 | #ifdef DEBUG_SLB | |
48880da6 | 35 | # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) |
10b46525 DG |
36 | #else |
37 | # define LOG_SLB(...) do { } while (0) | |
38 | #endif | |
39 | ||
7c43bca0 | 40 | /* |
c18ad9a5 DG |
41 | * Used to indicate that a CPU has its hash page table (HPT) managed |
42 | * within the host kernel | |
7c43bca0 | 43 | */ |
c18ad9a5 DG |
44 | #define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1) |
45 | ||
10b46525 DG |
46 | /* |
47 | * SLB handling | |
48 | */ | |
49 | ||
7ef23068 | 50 | static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr) |
10b46525 | 51 | { |
7ef23068 | 52 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
53 | uint64_t esid_256M, esid_1T; |
54 | int n; | |
55 | ||
56 | LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr); | |
57 | ||
58 | esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V; | |
59 | esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V; | |
60 | ||
61 | for (n = 0; n < env->slb_nr; n++) { | |
62 | ppc_slb_t *slb = &env->slb[n]; | |
63 | ||
64 | LOG_SLB("%s: slot %d %016" PRIx64 " %016" | |
65 | PRIx64 "\n", __func__, n, slb->esid, slb->vsid); | |
66 | /* We check for 1T matches on all MMUs here - if the MMU | |
67 | * doesn't have 1T segment support, we will have prevented 1T | |
68 | * entries from being inserted in the slbmte code. */ | |
69 | if (((slb->esid == esid_256M) && | |
70 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) | |
71 | || ((slb->esid == esid_1T) && | |
72 | ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) { | |
73 | return slb; | |
74 | } | |
75 | } | |
76 | ||
77 | return NULL; | |
78 | } | |
79 | ||
7ef23068 | 80 | void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu) |
10b46525 | 81 | { |
7ef23068 | 82 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
83 | int i; |
84 | uint64_t slbe, slbv; | |
85 | ||
7ef23068 | 86 | cpu_synchronize_state(CPU(cpu)); |
10b46525 DG |
87 | |
88 | cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n"); | |
89 | for (i = 0; i < env->slb_nr; i++) { | |
90 | slbe = env->slb[i].esid; | |
91 | slbv = env->slb[i].vsid; | |
92 | if (slbe == 0 && slbv == 0) { | |
93 | continue; | |
94 | } | |
95 | cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n", | |
96 | i, slbe, slbv); | |
97 | } | |
98 | } | |
99 | ||
100 | void helper_slbia(CPUPPCState *env) | |
101 | { | |
cd0c6f47 | 102 | int n; |
10b46525 | 103 | |
10b46525 DG |
104 | /* XXX: Warning: slbia never invalidates the first segment */ |
105 | for (n = 1; n < env->slb_nr; n++) { | |
106 | ppc_slb_t *slb = &env->slb[n]; | |
107 | ||
108 | if (slb->esid & SLB_ESID_V) { | |
109 | slb->esid &= ~SLB_ESID_V; | |
110 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
111 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
112 | * in QEMU, we just invalidate all TLBs | |
113 | */ | |
cd0c6f47 | 114 | env->tlb_need_flush = 1; |
10b46525 DG |
115 | } |
116 | } | |
10b46525 DG |
117 | } |
118 | ||
119 | void helper_slbie(CPUPPCState *env, target_ulong addr) | |
120 | { | |
00c8cb0a | 121 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
122 | ppc_slb_t *slb; |
123 | ||
7ef23068 | 124 | slb = slb_lookup(cpu, addr); |
10b46525 DG |
125 | if (!slb) { |
126 | return; | |
127 | } | |
128 | ||
129 | if (slb->esid & SLB_ESID_V) { | |
130 | slb->esid &= ~SLB_ESID_V; | |
131 | ||
132 | /* XXX: given the fact that segment size is 256 MB or 1TB, | |
133 | * and we still don't have a tlb_flush_mask(env, n, mask) | |
134 | * in QEMU, we just invalidate all TLBs | |
135 | */ | |
cd0c6f47 | 136 | env->tlb_need_flush = 1; |
10b46525 DG |
137 | } |
138 | } | |
139 | ||
bcd81230 DG |
140 | int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot, |
141 | target_ulong esid, target_ulong vsid) | |
10b46525 | 142 | { |
7ef23068 | 143 | CPUPPCState *env = &cpu->env; |
10b46525 | 144 | ppc_slb_t *slb = &env->slb[slot]; |
cd6a9bb6 DG |
145 | const struct ppc_one_seg_page_size *sps = NULL; |
146 | int i; | |
10b46525 | 147 | |
bcd81230 DG |
148 | if (slot >= env->slb_nr) { |
149 | return -1; /* Bad slot number */ | |
150 | } | |
151 | if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) { | |
152 | return -1; /* Reserved bits set */ | |
10b46525 | 153 | } |
bcd81230 | 154 | if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) { |
10b46525 DG |
155 | return -1; /* Bad segment size */ |
156 | } | |
bcd81230 | 157 | if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) { |
10b46525 DG |
158 | return -1; /* 1T segment on MMU that doesn't support it */ |
159 | } | |
160 | ||
cd6a9bb6 DG |
161 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { |
162 | const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i]; | |
163 | ||
164 | if (!sps1->page_shift) { | |
165 | break; | |
166 | } | |
167 | ||
168 | if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) { | |
169 | sps = sps1; | |
170 | break; | |
171 | } | |
172 | } | |
173 | ||
174 | if (!sps) { | |
175 | error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu | |
176 | " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx, | |
177 | slot, esid, vsid); | |
178 | return -1; | |
179 | } | |
180 | ||
bcd81230 DG |
181 | slb->esid = esid; |
182 | slb->vsid = vsid; | |
cd6a9bb6 | 183 | slb->sps = sps; |
10b46525 DG |
184 | |
185 | LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64 | |
bcd81230 | 186 | " %016" PRIx64 "\n", __func__, slot, esid, vsid, |
10b46525 DG |
187 | slb->esid, slb->vsid); |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
7ef23068 | 192 | static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb, |
10b46525 DG |
193 | target_ulong *rt) |
194 | { | |
7ef23068 | 195 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
196 | int slot = rb & 0xfff; |
197 | ppc_slb_t *slb = &env->slb[slot]; | |
198 | ||
199 | if (slot >= env->slb_nr) { | |
200 | return -1; | |
201 | } | |
202 | ||
203 | *rt = slb->esid; | |
204 | return 0; | |
205 | } | |
206 | ||
7ef23068 | 207 | static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb, |
10b46525 DG |
208 | target_ulong *rt) |
209 | { | |
7ef23068 | 210 | CPUPPCState *env = &cpu->env; |
10b46525 DG |
211 | int slot = rb & 0xfff; |
212 | ppc_slb_t *slb = &env->slb[slot]; | |
213 | ||
214 | if (slot >= env->slb_nr) { | |
215 | return -1; | |
216 | } | |
217 | ||
218 | *rt = slb->vsid; | |
219 | return 0; | |
220 | } | |
221 | ||
222 | void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) | |
223 | { | |
7ef23068 DG |
224 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
225 | ||
bcd81230 | 226 | if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { |
10b46525 DG |
227 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
228 | POWERPC_EXCP_INVAL); | |
229 | } | |
230 | } | |
231 | ||
232 | target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) | |
233 | { | |
7ef23068 | 234 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
235 | target_ulong rt = 0; |
236 | ||
7ef23068 | 237 | if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { |
10b46525 DG |
238 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
239 | POWERPC_EXCP_INVAL); | |
240 | } | |
241 | return rt; | |
242 | } | |
243 | ||
244 | target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) | |
245 | { | |
7ef23068 | 246 | PowerPCCPU *cpu = ppc_env_get_cpu(env); |
10b46525 DG |
247 | target_ulong rt = 0; |
248 | ||
7ef23068 | 249 | if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { |
10b46525 DG |
250 | helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM, |
251 | POWERPC_EXCP_INVAL); | |
252 | } | |
253 | return rt; | |
254 | } | |
9d7c3f4a DG |
255 | |
256 | /* | |
257 | * 64-bit hash table MMU handling | |
258 | */ | |
e5c0d3ce DG |
259 | void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value, |
260 | Error **errp) | |
261 | { | |
262 | CPUPPCState *env = &cpu->env; | |
263 | target_ulong htabsize = value & SDR_64_HTABSIZE; | |
264 | ||
265 | env->spr[SPR_SDR1] = value; | |
266 | if (htabsize > 28) { | |
267 | error_setg(errp, | |
268 | "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1", | |
269 | htabsize); | |
270 | htabsize = 28; | |
271 | } | |
272 | env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1; | |
273 | env->htab_base = value & SDR_64_HTABORG; | |
274 | } | |
275 | ||
276 | void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift, | |
277 | Error **errp) | |
278 | { | |
279 | CPUPPCState *env = &cpu->env; | |
280 | Error *local_err = NULL; | |
281 | ||
c18ad9a5 DG |
282 | if (hpt) { |
283 | env->external_htab = hpt; | |
284 | } else { | |
285 | env->external_htab = MMU_HASH64_KVM_MANAGED_HPT; | |
286 | } | |
e5c0d3ce DG |
287 | ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18), |
288 | &local_err); | |
289 | if (local_err) { | |
290 | error_propagate(errp, local_err); | |
291 | return; | |
292 | } | |
293 | ||
294 | /* Not strictly necessary, but makes it clearer that an external | |
295 | * htab is in use when debugging */ | |
296 | env->htab_base = -1; | |
297 | ||
298 | if (kvm_enabled()) { | |
299 | if (kvmppc_put_books_sregs(cpu) < 0) { | |
300 | error_setg(errp, "Unable to update SDR1 in KVM"); | |
301 | } | |
302 | } | |
303 | } | |
9d7c3f4a | 304 | |
7ef23068 | 305 | static int ppc_hash64_pte_prot(PowerPCCPU *cpu, |
e01b4445 | 306 | ppc_slb_t *slb, ppc_hash_pte64_t pte) |
496272a7 | 307 | { |
7ef23068 | 308 | CPUPPCState *env = &cpu->env; |
e01b4445 DG |
309 | unsigned pp, key; |
310 | /* Some pp bit combinations have undefined behaviour, so default | |
311 | * to no access in those cases */ | |
312 | int prot = 0; | |
313 | ||
314 | key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) | |
315 | : (slb->vsid & SLB_VSID_KS)); | |
316 | pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61); | |
496272a7 | 317 | |
496272a7 DG |
318 | if (key == 0) { |
319 | switch (pp) { | |
320 | case 0x0: | |
321 | case 0x1: | |
322 | case 0x2: | |
e01b4445 DG |
323 | prot = PAGE_READ | PAGE_WRITE; |
324 | break; | |
325 | ||
496272a7 DG |
326 | case 0x3: |
327 | case 0x6: | |
e01b4445 | 328 | prot = PAGE_READ; |
496272a7 DG |
329 | break; |
330 | } | |
331 | } else { | |
332 | switch (pp) { | |
333 | case 0x0: | |
334 | case 0x6: | |
e01b4445 | 335 | prot = 0; |
496272a7 | 336 | break; |
e01b4445 | 337 | |
496272a7 DG |
338 | case 0x1: |
339 | case 0x3: | |
e01b4445 | 340 | prot = PAGE_READ; |
496272a7 | 341 | break; |
e01b4445 | 342 | |
496272a7 | 343 | case 0x2: |
e01b4445 | 344 | prot = PAGE_READ | PAGE_WRITE; |
496272a7 DG |
345 | break; |
346 | } | |
347 | } | |
496272a7 | 348 | |
e01b4445 | 349 | /* No execute if either noexec or guarded bits set */ |
57d0a39d DG |
350 | if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) |
351 | || (slb->vsid & SLB_VSID_N)) { | |
e01b4445 | 352 | prot |= PAGE_EXEC; |
496272a7 DG |
353 | } |
354 | ||
e01b4445 | 355 | return prot; |
496272a7 DG |
356 | } |
357 | ||
7ef23068 | 358 | static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte) |
f80872e2 | 359 | { |
7ef23068 | 360 | CPUPPCState *env = &cpu->env; |
f80872e2 | 361 | int key, amrbits; |
363248e8 | 362 | int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
f80872e2 | 363 | |
f80872e2 DG |
364 | /* Only recent MMUs implement Virtual Page Class Key Protection */ |
365 | if (!(env->mmu_model & POWERPC_MMU_AMR)) { | |
363248e8 | 366 | return prot; |
f80872e2 DG |
367 | } |
368 | ||
369 | key = HPTE64_R_KEY(pte.pte1); | |
370 | amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; | |
371 | ||
372 | /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ | |
373 | /* env->spr[SPR_AMR]); */ | |
374 | ||
363248e8 CLG |
375 | /* |
376 | * A store is permitted if the AMR bit is 0. Remove write | |
377 | * protection if it is set. | |
378 | */ | |
f80872e2 | 379 | if (amrbits & 0x2) { |
363248e8 | 380 | prot &= ~PAGE_WRITE; |
f80872e2 | 381 | } |
363248e8 CLG |
382 | /* |
383 | * A load is permitted if the AMR bit is 0. Remove read | |
384 | * protection if it is set. | |
385 | */ | |
f80872e2 | 386 | if (amrbits & 0x1) { |
363248e8 | 387 | prot &= ~PAGE_READ; |
f80872e2 DG |
388 | } |
389 | ||
390 | return prot; | |
391 | } | |
392 | ||
7c43bca0 AK |
393 | uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index) |
394 | { | |
395 | uint64_t token = 0; | |
396 | hwaddr pte_offset; | |
397 | ||
398 | pte_offset = pte_index * HASH_PTE_SIZE_64; | |
c18ad9a5 | 399 | if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
7c43bca0 AK |
400 | /* |
401 | * HTAB is controlled by KVM. Fetch the PTEG into a new buffer. | |
402 | */ | |
403 | token = kvmppc_hash64_read_pteg(cpu, pte_index); | |
c18ad9a5 | 404 | } else if (cpu->env.external_htab) { |
7c43bca0 | 405 | /* |
c18ad9a5 DG |
406 | * HTAB is controlled by QEMU. Just point to the internally |
407 | * accessible PTEG. | |
7c43bca0 | 408 | */ |
7c43bca0 AK |
409 | token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset; |
410 | } else if (cpu->env.htab_base) { | |
411 | token = cpu->env.htab_base + pte_offset; | |
412 | } | |
413 | return token; | |
414 | } | |
415 | ||
c18ad9a5 | 416 | void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token) |
7c43bca0 | 417 | { |
c18ad9a5 | 418 | if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
a9ab06d1 | 419 | kvmppc_hash64_free_pteg(token); |
7c43bca0 AK |
420 | } |
421 | } | |
422 | ||
7ef23068 | 423 | static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash, |
aea390e4 DG |
424 | bool secondary, target_ulong ptem, |
425 | ppc_hash_pte64_t *pte) | |
426 | { | |
7ef23068 | 427 | CPUPPCState *env = &cpu->env; |
aea390e4 | 428 | int i; |
7c43bca0 AK |
429 | uint64_t token; |
430 | target_ulong pte0, pte1; | |
431 | target_ulong pte_index; | |
aea390e4 | 432 | |
7c43bca0 | 433 | pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP; |
7ef23068 | 434 | token = ppc_hash64_start_access(cpu, pte_index); |
7c43bca0 AK |
435 | if (!token) { |
436 | return -1; | |
437 | } | |
aea390e4 | 438 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
7ef23068 DG |
439 | pte0 = ppc_hash64_load_hpte0(cpu, token, i); |
440 | pte1 = ppc_hash64_load_hpte1(cpu, token, i); | |
aea390e4 DG |
441 | |
442 | if ((pte0 & HPTE64_V_VALID) | |
443 | && (secondary == !!(pte0 & HPTE64_V_SECONDARY)) | |
444 | && HPTE64_V_COMPARE(pte0, ptem)) { | |
445 | pte->pte0 = pte0; | |
446 | pte->pte1 = pte1; | |
c18ad9a5 | 447 | ppc_hash64_stop_access(cpu, token); |
7c43bca0 | 448 | return (pte_index + i) * HASH_PTE_SIZE_64; |
aea390e4 | 449 | } |
aea390e4 | 450 | } |
c18ad9a5 | 451 | ppc_hash64_stop_access(cpu, token); |
7c43bca0 AK |
452 | /* |
453 | * We didn't find a valid entry. | |
454 | */ | |
aea390e4 DG |
455 | return -1; |
456 | } | |
457 | ||
7ef23068 | 458 | static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu, |
7f3bdc2d DG |
459 | ppc_slb_t *slb, target_ulong eaddr, |
460 | ppc_hash_pte64_t *pte) | |
c69b6151 | 461 | { |
7ef23068 | 462 | CPUPPCState *env = &cpu->env; |
7c43bca0 | 463 | hwaddr pte_offset; |
a1ff751a | 464 | hwaddr hash; |
cd6a9bb6 DG |
465 | uint64_t vsid, epnmask, epn, ptem; |
466 | ||
467 | /* The SLB store path should prevent any bad page size encodings | |
468 | * getting in there, so: */ | |
469 | assert(slb->sps); | |
a1ff751a | 470 | |
cd6a9bb6 | 471 | epnmask = ~((1ULL << slb->sps->page_shift) - 1); |
a1ff751a | 472 | |
a1ff751a | 473 | if (slb->vsid & SLB_VSID_B) { |
18148898 DG |
474 | /* 1TB segment */ |
475 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T; | |
476 | epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask; | |
cd6a9bb6 | 477 | hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift); |
a1ff751a | 478 | } else { |
18148898 DG |
479 | /* 256M segment */ |
480 | vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT; | |
481 | epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask; | |
cd6a9bb6 | 482 | hash = vsid ^ (epn >> slb->sps->page_shift); |
a1ff751a | 483 | } |
18148898 | 484 | ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN); |
a1ff751a | 485 | |
a1ff751a | 486 | /* Page address translation */ |
339aaf5b AP |
487 | qemu_log_mask(CPU_LOG_MMU, |
488 | "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx | |
a1ff751a DG |
489 | " hash " TARGET_FMT_plx "\n", |
490 | env->htab_base, env->htab_mask, hash); | |
491 | ||
a1ff751a | 492 | /* Primary PTEG lookup */ |
339aaf5b AP |
493 | qemu_log_mask(CPU_LOG_MMU, |
494 | "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
a1ff751a DG |
495 | " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx |
496 | " hash=" TARGET_FMT_plx "\n", | |
497 | env->htab_base, env->htab_mask, vsid, ptem, hash); | |
7ef23068 | 498 | pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte); |
7f3bdc2d | 499 | |
a1ff751a DG |
500 | if (pte_offset == -1) { |
501 | /* Secondary PTEG lookup */ | |
339aaf5b AP |
502 | qemu_log_mask(CPU_LOG_MMU, |
503 | "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx | |
a1ff751a DG |
504 | " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx |
505 | " hash=" TARGET_FMT_plx "\n", env->htab_base, | |
506 | env->htab_mask, vsid, ptem, ~hash); | |
507 | ||
7ef23068 | 508 | pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte); |
a1ff751a DG |
509 | } |
510 | ||
7f3bdc2d | 511 | return pte_offset; |
c69b6151 | 512 | } |
0480884f | 513 | |
be18b2b5 DG |
514 | static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps, |
515 | uint64_t pte0, uint64_t pte1) | |
516 | { | |
517 | int i; | |
518 | ||
519 | if (!(pte0 & HPTE64_V_LARGE)) { | |
520 | if (sps->page_shift != 12) { | |
521 | /* 4kiB page in a non 4kiB segment */ | |
522 | return 0; | |
523 | } | |
524 | /* Normal 4kiB page */ | |
525 | return 12; | |
526 | } | |
527 | ||
528 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { | |
529 | const struct ppc_one_page_size *ps = &sps->enc[i]; | |
530 | uint64_t mask; | |
531 | ||
532 | if (!ps->page_shift) { | |
533 | break; | |
534 | } | |
535 | ||
536 | if (ps->page_shift == 12) { | |
537 | /* L bit is set so this can't be a 4kiB page */ | |
538 | continue; | |
539 | } | |
540 | ||
541 | mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN; | |
542 | ||
543 | if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) { | |
544 | return ps->page_shift; | |
545 | } | |
546 | } | |
547 | ||
548 | return 0; /* Bad page size encoding */ | |
549 | } | |
550 | ||
1114e712 DG |
551 | unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu, |
552 | uint64_t pte0, uint64_t pte1, | |
553 | unsigned *seg_page_shift) | |
554 | { | |
555 | CPUPPCState *env = &cpu->env; | |
556 | int i; | |
557 | ||
558 | if (!(pte0 & HPTE64_V_LARGE)) { | |
559 | *seg_page_shift = 12; | |
560 | return 12; | |
561 | } | |
562 | ||
563 | /* | |
564 | * The encodings in env->sps need to be carefully chosen so that | |
565 | * this gives an unambiguous result. | |
566 | */ | |
567 | for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) { | |
568 | const struct ppc_one_seg_page_size *sps = &env->sps.sps[i]; | |
569 | unsigned shift; | |
570 | ||
571 | if (!sps->page_shift) { | |
572 | break; | |
573 | } | |
574 | ||
575 | shift = hpte_page_shift(sps, pte0, pte1); | |
576 | if (shift) { | |
577 | *seg_page_shift = sps->page_shift; | |
578 | return shift; | |
579 | } | |
580 | } | |
581 | ||
582 | *seg_page_shift = 0; | |
583 | return 0; | |
584 | } | |
585 | ||
b2305601 | 586 | int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, |
caa597bd | 587 | int rwx, int mmu_idx) |
0480884f | 588 | { |
d0e39c5d AF |
589 | CPUState *cs = CPU(cpu); |
590 | CPUPPCState *env = &cpu->env; | |
0480884f | 591 | ppc_slb_t *slb; |
be18b2b5 | 592 | unsigned apshift; |
7f3bdc2d DG |
593 | hwaddr pte_offset; |
594 | ppc_hash_pte64_t pte; | |
f80872e2 | 595 | int pp_prot, amr_prot, prot; |
b3440746 | 596 | uint64_t new_pte1; |
e01b4445 | 597 | const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; |
caa597bd | 598 | hwaddr raddr; |
0480884f | 599 | |
6a980110 DG |
600 | assert((rwx == 0) || (rwx == 1) || (rwx == 2)); |
601 | ||
65d61643 DG |
602 | /* 1. Handle real mode accesses */ |
603 | if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { | |
604 | /* Translation is off */ | |
605 | /* In real mode the top 4 effective address bits are ignored */ | |
caa597bd | 606 | raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; |
0c591eb0 | 607 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
caa597bd DG |
608 | PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, |
609 | TARGET_PAGE_SIZE); | |
65d61643 DG |
610 | return 0; |
611 | } | |
612 | ||
bb218042 | 613 | /* 2. Translation is on, so look up the SLB */ |
7ef23068 | 614 | slb = slb_lookup(cpu, eaddr); |
bb218042 | 615 | |
0480884f | 616 | if (!slb) { |
caa597bd | 617 | if (rwx == 2) { |
27103424 | 618 | cs->exception_index = POWERPC_EXCP_ISEG; |
caa597bd DG |
619 | env->error_code = 0; |
620 | } else { | |
27103424 | 621 | cs->exception_index = POWERPC_EXCP_DSEG; |
caa597bd DG |
622 | env->error_code = 0; |
623 | env->spr[SPR_DAR] = eaddr; | |
624 | } | |
625 | return 1; | |
0480884f DG |
626 | } |
627 | ||
bb218042 DG |
628 | /* 3. Check for segment level no-execute violation */ |
629 | if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) { | |
27103424 | 630 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
631 | env->error_code = 0x10000000; |
632 | return 1; | |
bb218042 DG |
633 | } |
634 | ||
7f3bdc2d | 635 | /* 4. Locate the PTE in the hash table */ |
7ef23068 | 636 | pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte); |
7f3bdc2d | 637 | if (pte_offset == -1) { |
caa597bd | 638 | if (rwx == 2) { |
27103424 | 639 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
640 | env->error_code = 0x40000000; |
641 | } else { | |
27103424 | 642 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
643 | env->error_code = 0; |
644 | env->spr[SPR_DAR] = eaddr; | |
645 | if (rwx == 1) { | |
646 | env->spr[SPR_DSISR] = 0x42000000; | |
647 | } else { | |
648 | env->spr[SPR_DSISR] = 0x40000000; | |
649 | } | |
650 | } | |
651 | return 1; | |
7f3bdc2d | 652 | } |
339aaf5b AP |
653 | qemu_log_mask(CPU_LOG_MMU, |
654 | "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset); | |
7f3bdc2d | 655 | |
be18b2b5 DG |
656 | /* Validate page size encoding */ |
657 | apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1); | |
658 | if (!apshift) { | |
659 | error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64 | |
660 | " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset); | |
661 | /* Not entirely sure what the right action here, but machine | |
662 | * check seems reasonable */ | |
663 | cs->exception_index = POWERPC_EXCP_MCHECK; | |
664 | env->error_code = 0; | |
665 | return 1; | |
666 | } | |
667 | ||
7f3bdc2d | 668 | /* 5. Check access permissions */ |
7f3bdc2d | 669 | |
7ef23068 DG |
670 | pp_prot = ppc_hash64_pte_prot(cpu, slb, pte); |
671 | amr_prot = ppc_hash64_amr_prot(cpu, pte); | |
f80872e2 | 672 | prot = pp_prot & amr_prot; |
6a980110 | 673 | |
caa597bd | 674 | if ((need_prot[rwx] & ~prot) != 0) { |
6a980110 | 675 | /* Access right violation */ |
339aaf5b | 676 | qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); |
caa597bd | 677 | if (rwx == 2) { |
27103424 | 678 | cs->exception_index = POWERPC_EXCP_ISI; |
caa597bd DG |
679 | env->error_code = 0x08000000; |
680 | } else { | |
f80872e2 DG |
681 | target_ulong dsisr = 0; |
682 | ||
27103424 | 683 | cs->exception_index = POWERPC_EXCP_DSI; |
caa597bd DG |
684 | env->error_code = 0; |
685 | env->spr[SPR_DAR] = eaddr; | |
f80872e2 DG |
686 | if (need_prot[rwx] & ~pp_prot) { |
687 | dsisr |= 0x08000000; | |
688 | } | |
caa597bd | 689 | if (rwx == 1) { |
f80872e2 DG |
690 | dsisr |= 0x02000000; |
691 | } | |
692 | if (need_prot[rwx] & ~amr_prot) { | |
693 | dsisr |= 0x00200000; | |
caa597bd | 694 | } |
f80872e2 | 695 | env->spr[SPR_DSISR] = dsisr; |
caa597bd DG |
696 | } |
697 | return 1; | |
6a980110 DG |
698 | } |
699 | ||
339aaf5b | 700 | qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); |
87dc3fd1 DG |
701 | |
702 | /* 6. Update PTE referenced and changed bits if necessary */ | |
703 | ||
b3440746 DG |
704 | new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ |
705 | if (rwx == 1) { | |
706 | new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ | |
707 | } else { | |
708 | /* Treat the page as read-only for now, so that a later write | |
709 | * will pass through this function again to set the C bit */ | |
caa597bd | 710 | prot &= ~PAGE_WRITE; |
b3440746 DG |
711 | } |
712 | ||
713 | if (new_pte1 != pte.pte1) { | |
7ef23068 | 714 | ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64, |
3f94170b | 715 | pte.pte0, new_pte1); |
7f3bdc2d | 716 | } |
0480884f | 717 | |
6d11d998 DG |
718 | /* 7. Determine the real address from the PTE */ |
719 | ||
be18b2b5 | 720 | raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr); |
caa597bd | 721 | |
0c591eb0 | 722 | tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, |
be18b2b5 | 723 | prot, mmu_idx, 1ULL << apshift); |
e01b4445 | 724 | |
e01b4445 | 725 | return 0; |
0480884f | 726 | } |
629bd516 | 727 | |
7ef23068 | 728 | hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr) |
f2ad6be8 | 729 | { |
7ef23068 | 730 | CPUPPCState *env = &cpu->env; |
5883d8b2 DG |
731 | ppc_slb_t *slb; |
732 | hwaddr pte_offset; | |
733 | ppc_hash_pte64_t pte; | |
be18b2b5 | 734 | unsigned apshift; |
5883d8b2 DG |
735 | |
736 | if (msr_dr == 0) { | |
737 | /* In real mode the top 4 effective address bits are ignored */ | |
738 | return addr & 0x0FFFFFFFFFFFFFFFULL; | |
739 | } | |
f2ad6be8 | 740 | |
7ef23068 | 741 | slb = slb_lookup(cpu, addr); |
5883d8b2 DG |
742 | if (!slb) { |
743 | return -1; | |
744 | } | |
745 | ||
7ef23068 | 746 | pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte); |
5883d8b2 | 747 | if (pte_offset == -1) { |
f2ad6be8 DG |
748 | return -1; |
749 | } | |
750 | ||
be18b2b5 DG |
751 | apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1); |
752 | if (!apshift) { | |
753 | return -1; | |
754 | } | |
755 | ||
756 | return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr) | |
cd6a9bb6 | 757 | & TARGET_PAGE_MASK; |
f2ad6be8 | 758 | } |
c1385933 | 759 | |
7ef23068 | 760 | void ppc_hash64_store_hpte(PowerPCCPU *cpu, |
c1385933 AK |
761 | target_ulong pte_index, |
762 | target_ulong pte0, target_ulong pte1) | |
763 | { | |
7ef23068 | 764 | CPUPPCState *env = &cpu->env; |
c1385933 | 765 | |
c18ad9a5 | 766 | if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) { |
a9ab06d1 SW |
767 | kvmppc_hash64_write_pte(env, pte_index, pte0, pte1); |
768 | return; | |
c1385933 AK |
769 | } |
770 | ||
771 | pte_index *= HASH_PTE_SIZE_64; | |
772 | if (env->external_htab) { | |
773 | stq_p(env->external_htab + pte_index, pte0); | |
7ef23068 | 774 | stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1); |
c1385933 | 775 | } else { |
7ef23068 DG |
776 | stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0); |
777 | stq_phys(CPU(cpu)->as, | |
778 | env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1); | |
c1385933 AK |
779 | } |
780 | } | |
61a36c9b DG |
781 | |
782 | void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, | |
783 | target_ulong pte_index, | |
784 | target_ulong pte0, target_ulong pte1) | |
785 | { | |
786 | /* | |
787 | * XXX: given the fact that there are too many segments to | |
788 | * invalidate, and we still don't have a tlb_flush_mask(env, n, | |
789 | * mask) in QEMU, we just invalidate all TLBs | |
790 | */ | |
791 | tlb_flush(CPU(cpu), 1); | |
792 | } |