]> git.proxmox.com Git - qemu.git/blob - target-ppc/mmu-hash64.c
mmu-hash64: Remove nx from mmu_ctx_hash64
[qemu.git] / target-ppc / mmu-hash64.c
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20 #include "cpu.h"
21 #include "helper.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_ppc.h"
24 #include "mmu-hash64.h"
25
26 //#define DEBUG_MMU
27 //#define DEBUG_SLB
28
29 #ifdef DEBUG_MMU
30 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
31 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
32 #else
33 # define LOG_MMU(...) do { } while (0)
34 # define LOG_MMU_STATE(...) do { } while (0)
35 #endif
36
37 #ifdef DEBUG_SLB
38 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
39 #else
40 # define LOG_SLB(...) do { } while (0)
41 #endif
42
43 struct mmu_ctx_hash64 {
44 hwaddr raddr; /* Real address */
45 hwaddr eaddr; /* Effective address */
46 int prot; /* Protection bits */
47 hwaddr hash[2]; /* Pagetable hash values */
48 target_ulong ptem; /* Virtual segment ID | API */
49 int key; /* Access key */
50 };
51
52 /*
53 * SLB handling
54 */
55
56 static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
57 {
58 uint64_t esid_256M, esid_1T;
59 int n;
60
61 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
62
63 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
64 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
65
66 for (n = 0; n < env->slb_nr; n++) {
67 ppc_slb_t *slb = &env->slb[n];
68
69 LOG_SLB("%s: slot %d %016" PRIx64 " %016"
70 PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
71 /* We check for 1T matches on all MMUs here - if the MMU
72 * doesn't have 1T segment support, we will have prevented 1T
73 * entries from being inserted in the slbmte code. */
74 if (((slb->esid == esid_256M) &&
75 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
76 || ((slb->esid == esid_1T) &&
77 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
78 return slb;
79 }
80 }
81
82 return NULL;
83 }
84
85 void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
86 {
87 int i;
88 uint64_t slbe, slbv;
89
90 cpu_synchronize_state(env);
91
92 cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
93 for (i = 0; i < env->slb_nr; i++) {
94 slbe = env->slb[i].esid;
95 slbv = env->slb[i].vsid;
96 if (slbe == 0 && slbv == 0) {
97 continue;
98 }
99 cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
100 i, slbe, slbv);
101 }
102 }
103
104 void helper_slbia(CPUPPCState *env)
105 {
106 int n, do_invalidate;
107
108 do_invalidate = 0;
109 /* XXX: Warning: slbia never invalidates the first segment */
110 for (n = 1; n < env->slb_nr; n++) {
111 ppc_slb_t *slb = &env->slb[n];
112
113 if (slb->esid & SLB_ESID_V) {
114 slb->esid &= ~SLB_ESID_V;
115 /* XXX: given the fact that segment size is 256 MB or 1TB,
116 * and we still don't have a tlb_flush_mask(env, n, mask)
117 * in QEMU, we just invalidate all TLBs
118 */
119 do_invalidate = 1;
120 }
121 }
122 if (do_invalidate) {
123 tlb_flush(env, 1);
124 }
125 }
126
127 void helper_slbie(CPUPPCState *env, target_ulong addr)
128 {
129 ppc_slb_t *slb;
130
131 slb = slb_lookup(env, addr);
132 if (!slb) {
133 return;
134 }
135
136 if (slb->esid & SLB_ESID_V) {
137 slb->esid &= ~SLB_ESID_V;
138
139 /* XXX: given the fact that segment size is 256 MB or 1TB,
140 * and we still don't have a tlb_flush_mask(env, n, mask)
141 * in QEMU, we just invalidate all TLBs
142 */
143 tlb_flush(env, 1);
144 }
145 }
146
147 int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
148 {
149 int slot = rb & 0xfff;
150 ppc_slb_t *slb = &env->slb[slot];
151
152 if (rb & (0x1000 - env->slb_nr)) {
153 return -1; /* Reserved bits set or slot too high */
154 }
155 if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
156 return -1; /* Bad segment size */
157 }
158 if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
159 return -1; /* 1T segment on MMU that doesn't support it */
160 }
161
162 /* Mask out the slot number as we store the entry */
163 slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
164 slb->vsid = rs;
165
166 LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
167 " %016" PRIx64 "\n", __func__, slot, rb, rs,
168 slb->esid, slb->vsid);
169
170 return 0;
171 }
172
173 static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
174 target_ulong *rt)
175 {
176 int slot = rb & 0xfff;
177 ppc_slb_t *slb = &env->slb[slot];
178
179 if (slot >= env->slb_nr) {
180 return -1;
181 }
182
183 *rt = slb->esid;
184 return 0;
185 }
186
187 static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
188 target_ulong *rt)
189 {
190 int slot = rb & 0xfff;
191 ppc_slb_t *slb = &env->slb[slot];
192
193 if (slot >= env->slb_nr) {
194 return -1;
195 }
196
197 *rt = slb->vsid;
198 return 0;
199 }
200
201 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
202 {
203 if (ppc_store_slb(env, rb, rs) < 0) {
204 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
205 POWERPC_EXCP_INVAL);
206 }
207 }
208
209 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
210 {
211 target_ulong rt = 0;
212
213 if (ppc_load_slb_esid(env, rb, &rt) < 0) {
214 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
215 POWERPC_EXCP_INVAL);
216 }
217 return rt;
218 }
219
220 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
221 {
222 target_ulong rt = 0;
223
224 if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
225 helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
226 POWERPC_EXCP_INVAL);
227 }
228 return rt;
229 }
230
231 /*
232 * 64-bit hash table MMU handling
233 */
234
235 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
236
237 static int ppc_hash64_pp_check(int key, int pp, bool nx)
238 {
239 int access;
240
241 /* Compute access rights */
242 /* When pp is 4, 5 or 7, the result is undefined. Set it to noaccess */
243 access = 0;
244 if (key == 0) {
245 switch (pp) {
246 case 0x0:
247 case 0x1:
248 case 0x2:
249 access |= PAGE_WRITE;
250 /* No break here */
251 case 0x3:
252 case 0x6:
253 access |= PAGE_READ;
254 break;
255 }
256 } else {
257 switch (pp) {
258 case 0x0:
259 case 0x6:
260 access = 0;
261 break;
262 case 0x1:
263 case 0x3:
264 access = PAGE_READ;
265 break;
266 case 0x2:
267 access = PAGE_READ | PAGE_WRITE;
268 break;
269 }
270 }
271 if (!nx) {
272 access |= PAGE_EXEC;
273 }
274
275 return access;
276 }
277
278 static int ppc_hash64_check_prot(int prot, int rwx)
279 {
280 int ret;
281
282 if (rwx == 2) {
283 if (prot & PAGE_EXEC) {
284 ret = 0;
285 } else {
286 ret = -2;
287 }
288 } else if (rwx == 1) {
289 if (prot & PAGE_WRITE) {
290 ret = 0;
291 } else {
292 ret = -2;
293 }
294 } else {
295 if (prot & PAGE_READ) {
296 ret = 0;
297 } else {
298 ret = -2;
299 }
300 }
301
302 return ret;
303 }
304
305 static int pte64_check(struct mmu_ctx_hash64 *ctx, target_ulong pte0,
306 target_ulong pte1, int h, int rwx)
307 {
308 target_ulong mmask;
309 int access, ret, pp;
310
311 ret = -1;
312 /* Check validity and table match */
313 if ((pte0 & HPTE64_V_VALID) && (h == !!(pte0 & HPTE64_V_SECONDARY))) {
314 bool nx;
315
316 /* Check vsid & api */
317 mmask = PTE64_CHECK_MASK;
318 pp = (pte1 & HPTE64_R_PP) | ((pte1 & HPTE64_R_PP0) >> 61);
319 /* No execute if either noexec or guarded bits set */
320 nx = (pte1 & HPTE64_R_N) || (pte1 & HPTE64_R_G);
321 if (HPTE64_V_COMPARE(pte0, ctx->ptem)) {
322 if (ctx->raddr != (hwaddr)-1ULL) {
323 /* all matches should have equal RPN, WIMG & PP */
324 if ((ctx->raddr & mmask) != (pte1 & mmask)) {
325 qemu_log("Bad RPN/WIMG/PP\n");
326 return -3;
327 }
328 }
329 /* Compute access rights */
330 access = ppc_hash64_pp_check(ctx->key, pp, nx);
331 /* Keep the matching PTE informations */
332 ctx->raddr = pte1;
333 ctx->prot = access;
334 ret = ppc_hash64_check_prot(ctx->prot, rwx);
335 if (ret == 0) {
336 /* Access granted */
337 LOG_MMU("PTE access granted !\n");
338 } else {
339 /* Access right violation */
340 LOG_MMU("PTE access rejected\n");
341 }
342 }
343 }
344
345 return ret;
346 }
347
348 static int ppc_hash64_pte_update_flags(struct mmu_ctx_hash64 *ctx,
349 target_ulong *pte1p,
350 int ret, int rw)
351 {
352 int store = 0;
353
354 /* Update page flags */
355 if (!(*pte1p & HPTE64_R_R)) {
356 /* Update accessed flag */
357 *pte1p |= HPTE64_R_R;
358 store = 1;
359 }
360 if (!(*pte1p & HPTE64_R_C)) {
361 if (rw == 1 && ret == 0) {
362 /* Update changed flag */
363 *pte1p |= HPTE64_R_C;
364 store = 1;
365 } else {
366 /* Force page fault for first write access */
367 ctx->prot &= ~PAGE_WRITE;
368 }
369 }
370
371 return store;
372 }
373
374 /* PTE table lookup */
375 static int find_pte64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx, int h,
376 int rwx, int target_page_bits)
377 {
378 hwaddr pteg_off;
379 target_ulong pte0, pte1;
380 int i, good = -1;
381 int ret, r;
382
383 ret = -1; /* No entry found */
384 pteg_off = (ctx->hash[h] * HASH_PTEG_SIZE_64) & env->htab_mask;
385 for (i = 0; i < HPTES_PER_GROUP; i++) {
386 pte0 = ppc_hash64_load_hpte0(env, pteg_off + i*HASH_PTE_SIZE_64);
387 pte1 = ppc_hash64_load_hpte1(env, pteg_off + i*HASH_PTE_SIZE_64);
388
389 r = pte64_check(ctx, pte0, pte1, h, rwx);
390 LOG_MMU("Load pte from %016" HWADDR_PRIx " => " TARGET_FMT_lx " "
391 TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
392 pteg_off + (i * 16), pte0, pte1, (int)(pte0 & 1), h,
393 (int)((pte0 >> 1) & 1), ctx->ptem);
394 switch (r) {
395 case -3:
396 /* PTE inconsistency */
397 return -1;
398 case -2:
399 /* Access violation */
400 ret = -2;
401 good = i;
402 break;
403 case -1:
404 default:
405 /* No PTE match */
406 break;
407 case 0:
408 /* access granted */
409 /* XXX: we should go on looping to check all PTEs consistency
410 * but if we can speed-up the whole thing as the
411 * result would be undefined if PTEs are not consistent.
412 */
413 ret = 0;
414 good = i;
415 goto done;
416 }
417 }
418 if (good != -1) {
419 done:
420 LOG_MMU("found PTE at addr %08" HWADDR_PRIx " prot=%01x ret=%d\n",
421 ctx->raddr, ctx->prot, ret);
422 /* Update page flags */
423 pte1 = ctx->raddr;
424 if (ppc_hash64_pte_update_flags(ctx, &pte1, ret, rwx) == 1) {
425 ppc_hash64_store_hpte1(env, pteg_off + good * HASH_PTE_SIZE_64, pte1);
426 }
427 }
428
429 /* We have a TLB that saves 4K pages, so let's
430 * split a huge page to 4k chunks */
431 if (target_page_bits != TARGET_PAGE_BITS) {
432 ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
433 & TARGET_PAGE_MASK;
434 }
435 return ret;
436 }
437
438 static int get_segment64(CPUPPCState *env, struct mmu_ctx_hash64 *ctx,
439 target_ulong eaddr, int rwx)
440 {
441 hwaddr hash;
442 target_ulong vsid;
443 int pr, target_page_bits;
444 int ret, ret2;
445
446 pr = msr_pr;
447 ctx->eaddr = eaddr;
448 ppc_slb_t *slb;
449 target_ulong pageaddr;
450 int segment_bits;
451
452 LOG_MMU("Check SLBs\n");
453 slb = slb_lookup(env, eaddr);
454 if (!slb) {
455 return -5;
456 }
457
458 if (slb->vsid & SLB_VSID_B) {
459 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
460 segment_bits = 40;
461 } else {
462 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
463 segment_bits = 28;
464 }
465
466 target_page_bits = (slb->vsid & SLB_VSID_L)
467 ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
468 ctx->key = !!(pr ? (slb->vsid & SLB_VSID_KP)
469 : (slb->vsid & SLB_VSID_KS));
470
471 pageaddr = eaddr & ((1ULL << segment_bits)
472 - (1ULL << target_page_bits));
473 if (slb->vsid & SLB_VSID_B) {
474 hash = vsid ^ (vsid << 25) ^ (pageaddr >> target_page_bits);
475 } else {
476 hash = vsid ^ (pageaddr >> target_page_bits);
477 }
478 /* Only 5 bits of the page index are used in the AVPN */
479 ctx->ptem = (slb->vsid & SLB_VSID_PTEM) |
480 ((pageaddr >> 16) & ((1ULL << segment_bits) - 0x80));
481
482 LOG_MMU("pte segment: key=%d nx %d vsid " TARGET_FMT_lx "\n",
483 ctx->key, !!(slb->vsid & SLB_VSID_N), vsid);
484 ret = -1;
485
486 /* Check if instruction fetch is allowed, if needed */
487 if (rwx != 2 || !(slb->vsid & SLB_VSID_N)) {
488 /* Page address translation */
489 LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
490 " hash " TARGET_FMT_plx "\n",
491 env->htab_base, env->htab_mask, hash);
492 ctx->hash[0] = hash;
493 ctx->hash[1] = ~hash;
494
495 /* Initialize real address with an invalid value */
496 ctx->raddr = (hwaddr)-1ULL;
497 LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
498 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
499 " hash=" TARGET_FMT_plx "\n",
500 env->htab_base, env->htab_mask, vsid, ctx->ptem,
501 ctx->hash[0]);
502 /* Primary table lookup */
503 ret = find_pte64(env, ctx, 0, rwx, target_page_bits);
504 if (ret < 0) {
505 /* Secondary table lookup */
506 LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
507 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
508 " hash=" TARGET_FMT_plx "\n", env->htab_base,
509 env->htab_mask, vsid, ctx->ptem, ctx->hash[1]);
510 ret2 = find_pte64(env, ctx, 1, rwx, target_page_bits);
511 if (ret2 != -1) {
512 ret = ret2;
513 }
514 }
515 } else {
516 LOG_MMU("No access allowed\n");
517 ret = -3;
518 }
519
520 return ret;
521 }
522
523 static int ppc_hash64_get_physical_address(CPUPPCState *env,
524 struct mmu_ctx_hash64 *ctx,
525 target_ulong eaddr, int rwx)
526 {
527 bool real_mode = (rwx == 2 && msr_ir == 0)
528 || (rwx != 2 && msr_dr == 0);
529
530 if (real_mode) {
531 ctx->raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
532 ctx->prot = PAGE_READ | PAGE_EXEC | PAGE_WRITE;
533 return 0;
534 } else {
535 return get_segment64(env, ctx, eaddr, rwx);
536 }
537 }
538
539 hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
540 {
541 struct mmu_ctx_hash64 ctx;
542
543 if (unlikely(ppc_hash64_get_physical_address(env, &ctx, addr, 0) != 0)) {
544 return -1;
545 }
546
547 return ctx.raddr & TARGET_PAGE_MASK;
548 }
549
550 int ppc_hash64_handle_mmu_fault(CPUPPCState *env, target_ulong address, int rwx,
551 int mmu_idx)
552 {
553 struct mmu_ctx_hash64 ctx;
554 int ret = 0;
555
556 ret = ppc_hash64_get_physical_address(env, &ctx, address, rwx);
557 if (ret == 0) {
558 tlb_set_page(env, address & TARGET_PAGE_MASK,
559 ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
560 mmu_idx, TARGET_PAGE_SIZE);
561 ret = 0;
562 } else if (ret < 0) {
563 LOG_MMU_STATE(env);
564 if (rwx == 2) {
565 switch (ret) {
566 case -1:
567 env->exception_index = POWERPC_EXCP_ISI;
568 env->error_code = 0x40000000;
569 break;
570 case -2:
571 /* Access rights violation */
572 env->exception_index = POWERPC_EXCP_ISI;
573 env->error_code = 0x08000000;
574 break;
575 case -3:
576 /* No execute protection violation */
577 env->exception_index = POWERPC_EXCP_ISI;
578 env->error_code = 0x10000000;
579 break;
580 case -5:
581 /* No match in segment table */
582 env->exception_index = POWERPC_EXCP_ISEG;
583 env->error_code = 0;
584 break;
585 }
586 } else {
587 switch (ret) {
588 case -1:
589 /* No matches in page tables or TLB */
590 env->exception_index = POWERPC_EXCP_DSI;
591 env->error_code = 0;
592 env->spr[SPR_DAR] = address;
593 if (rwx == 1) {
594 env->spr[SPR_DSISR] = 0x42000000;
595 } else {
596 env->spr[SPR_DSISR] = 0x40000000;
597 }
598 break;
599 case -2:
600 /* Access rights violation */
601 env->exception_index = POWERPC_EXCP_DSI;
602 env->error_code = 0;
603 env->spr[SPR_DAR] = address;
604 if (rwx == 1) {
605 env->spr[SPR_DSISR] = 0x0A000000;
606 } else {
607 env->spr[SPR_DSISR] = 0x08000000;
608 }
609 break;
610 case -5:
611 /* No match in segment table */
612 env->exception_index = POWERPC_EXCP_DSEG;
613 env->error_code = 0;
614 env->spr[SPR_DAR] = address;
615 break;
616 }
617 }
618 #if 0
619 printf("%s: set exception to %d %02x\n", __func__,
620 env->exception, env->error_code);
621 #endif
622 ret = 1;
623 }
624
625 return ret;
626 }