2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "sysemu/kvm.h"
24 #include "mmu-hash64.h"
30 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
31 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
33 # define LOG_MMU(...) do { } while (0)
34 # define LOG_MMU_STATE(...) do { } while (0)
38 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
40 # define LOG_SLB(...) do { } while (0)
43 struct mmu_ctx_hash64
{
44 hwaddr raddr
; /* Real address */
45 hwaddr eaddr
; /* Effective address */
46 int prot
; /* Protection bits */
47 hwaddr hash
[2]; /* Pagetable hash values */
48 target_ulong ptem
; /* Virtual segment ID | API */
49 int key
; /* Access key */
56 static ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
58 uint64_t esid_256M
, esid_1T
;
61 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
63 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
64 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
66 for (n
= 0; n
< env
->slb_nr
; n
++) {
67 ppc_slb_t
*slb
= &env
->slb
[n
];
69 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
70 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
71 /* We check for 1T matches on all MMUs here - if the MMU
72 * doesn't have 1T segment support, we will have prevented 1T
73 * entries from being inserted in the slbmte code. */
74 if (((slb
->esid
== esid_256M
) &&
75 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
76 || ((slb
->esid
== esid_1T
) &&
77 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
85 void dump_slb(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
90 cpu_synchronize_state(env
);
92 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
93 for (i
= 0; i
< env
->slb_nr
; i
++) {
94 slbe
= env
->slb
[i
].esid
;
95 slbv
= env
->slb
[i
].vsid
;
96 if (slbe
== 0 && slbv
== 0) {
99 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
104 void helper_slbia(CPUPPCState
*env
)
106 int n
, do_invalidate
;
109 /* XXX: Warning: slbia never invalidates the first segment */
110 for (n
= 1; n
< env
->slb_nr
; n
++) {
111 ppc_slb_t
*slb
= &env
->slb
[n
];
113 if (slb
->esid
& SLB_ESID_V
) {
114 slb
->esid
&= ~SLB_ESID_V
;
115 /* XXX: given the fact that segment size is 256 MB or 1TB,
116 * and we still don't have a tlb_flush_mask(env, n, mask)
117 * in QEMU, we just invalidate all TLBs
127 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
131 slb
= slb_lookup(env
, addr
);
136 if (slb
->esid
& SLB_ESID_V
) {
137 slb
->esid
&= ~SLB_ESID_V
;
139 /* XXX: given the fact that segment size is 256 MB or 1TB,
140 * and we still don't have a tlb_flush_mask(env, n, mask)
141 * in QEMU, we just invalidate all TLBs
147 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
149 int slot
= rb
& 0xfff;
150 ppc_slb_t
*slb
= &env
->slb
[slot
];
152 if (rb
& (0x1000 - env
->slb_nr
)) {
153 return -1; /* Reserved bits set or slot too high */
155 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
156 return -1; /* Bad segment size */
158 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
159 return -1; /* 1T segment on MMU that doesn't support it */
162 /* Mask out the slot number as we store the entry */
163 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
166 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
167 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
168 slb
->esid
, slb
->vsid
);
173 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
176 int slot
= rb
& 0xfff;
177 ppc_slb_t
*slb
= &env
->slb
[slot
];
179 if (slot
>= env
->slb_nr
) {
187 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
190 int slot
= rb
& 0xfff;
191 ppc_slb_t
*slb
= &env
->slb
[slot
];
193 if (slot
>= env
->slb_nr
) {
201 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
203 if (ppc_store_slb(env
, rb
, rs
) < 0) {
204 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
209 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
213 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
214 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
220 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
224 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
225 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
232 * 64-bit hash table MMU handling
235 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
237 static int ppc_hash64_pp_check(int key
, int pp
, bool nx
)
241 /* Compute access rights */
242 /* When pp is 4, 5 or 7, the result is undefined. Set it to noaccess */
249 access
|= PAGE_WRITE
;
267 access
= PAGE_READ
| PAGE_WRITE
;
278 static int ppc_hash64_check_prot(int prot
, int rwx
)
283 if (prot
& PAGE_EXEC
) {
288 } else if (rwx
== 1) {
289 if (prot
& PAGE_WRITE
) {
295 if (prot
& PAGE_READ
) {
305 static int pte64_check(struct mmu_ctx_hash64
*ctx
, target_ulong pte0
,
306 target_ulong pte1
, int h
, int rwx
)
312 /* Check validity and table match */
313 if ((pte0
& HPTE64_V_VALID
) && (h
== !!(pte0
& HPTE64_V_SECONDARY
))) {
316 /* Check vsid & api */
317 mmask
= PTE64_CHECK_MASK
;
318 pp
= (pte1
& HPTE64_R_PP
) | ((pte1
& HPTE64_R_PP0
) >> 61);
319 /* No execute if either noexec or guarded bits set */
320 nx
= (pte1
& HPTE64_R_N
) || (pte1
& HPTE64_R_G
);
321 if (HPTE64_V_COMPARE(pte0
, ctx
->ptem
)) {
322 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
323 /* all matches should have equal RPN, WIMG & PP */
324 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
325 qemu_log("Bad RPN/WIMG/PP\n");
329 /* Compute access rights */
330 access
= ppc_hash64_pp_check(ctx
->key
, pp
, nx
);
331 /* Keep the matching PTE informations */
334 ret
= ppc_hash64_check_prot(ctx
->prot
, rwx
);
337 LOG_MMU("PTE access granted !\n");
339 /* Access right violation */
340 LOG_MMU("PTE access rejected\n");
348 static int ppc_hash64_pte_update_flags(struct mmu_ctx_hash64
*ctx
,
354 /* Update page flags */
355 if (!(*pte1p
& HPTE64_R_R
)) {
356 /* Update accessed flag */
357 *pte1p
|= HPTE64_R_R
;
360 if (!(*pte1p
& HPTE64_R_C
)) {
361 if (rw
== 1 && ret
== 0) {
362 /* Update changed flag */
363 *pte1p
|= HPTE64_R_C
;
366 /* Force page fault for first write access */
367 ctx
->prot
&= ~PAGE_WRITE
;
374 /* PTE table lookup */
375 static int find_pte64(CPUPPCState
*env
, struct mmu_ctx_hash64
*ctx
, int h
,
376 int rwx
, int target_page_bits
)
379 target_ulong pte0
, pte1
;
383 ret
= -1; /* No entry found */
384 pteg_off
= (ctx
->hash
[h
] * HASH_PTEG_SIZE_64
) & env
->htab_mask
;
385 for (i
= 0; i
< HPTES_PER_GROUP
; i
++) {
386 pte0
= ppc_hash64_load_hpte0(env
, pteg_off
+ i
*HASH_PTE_SIZE_64
);
387 pte1
= ppc_hash64_load_hpte1(env
, pteg_off
+ i
*HASH_PTE_SIZE_64
);
389 r
= pte64_check(ctx
, pte0
, pte1
, h
, rwx
);
390 LOG_MMU("Load pte from %016" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
391 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
392 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
393 (int)((pte0
>> 1) & 1), ctx
->ptem
);
396 /* PTE inconsistency */
399 /* Access violation */
409 /* XXX: we should go on looping to check all PTEs consistency
410 * but if we can speed-up the whole thing as the
411 * result would be undefined if PTEs are not consistent.
420 LOG_MMU("found PTE at addr %08" HWADDR_PRIx
" prot=%01x ret=%d\n",
421 ctx
->raddr
, ctx
->prot
, ret
);
422 /* Update page flags */
424 if (ppc_hash64_pte_update_flags(ctx
, &pte1
, ret
, rwx
) == 1) {
425 ppc_hash64_store_hpte1(env
, pteg_off
+ good
* HASH_PTE_SIZE_64
, pte1
);
429 /* We have a TLB that saves 4K pages, so let's
430 * split a huge page to 4k chunks */
431 if (target_page_bits
!= TARGET_PAGE_BITS
) {
432 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
438 static int get_segment64(CPUPPCState
*env
, struct mmu_ctx_hash64
*ctx
,
439 target_ulong eaddr
, int rwx
)
443 int pr
, target_page_bits
;
449 target_ulong pageaddr
;
452 LOG_MMU("Check SLBs\n");
453 slb
= slb_lookup(env
, eaddr
);
458 if (slb
->vsid
& SLB_VSID_B
) {
459 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
462 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
466 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
467 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
468 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
469 : (slb
->vsid
& SLB_VSID_KS
));
471 pageaddr
= eaddr
& ((1ULL << segment_bits
)
472 - (1ULL << target_page_bits
));
473 if (slb
->vsid
& SLB_VSID_B
) {
474 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
476 hash
= vsid
^ (pageaddr
>> target_page_bits
);
478 /* Only 5 bits of the page index are used in the AVPN */
479 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
480 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
482 LOG_MMU("pte segment: key=%d nx %d vsid " TARGET_FMT_lx
"\n",
483 ctx
->key
, !!(slb
->vsid
& SLB_VSID_N
), vsid
);
486 /* Check if instruction fetch is allowed, if needed */
487 if (rwx
!= 2 || !(slb
->vsid
& SLB_VSID_N
)) {
488 /* Page address translation */
489 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
490 " hash " TARGET_FMT_plx
"\n",
491 env
->htab_base
, env
->htab_mask
, hash
);
493 ctx
->hash
[1] = ~hash
;
495 /* Initialize real address with an invalid value */
496 ctx
->raddr
= (hwaddr
)-1ULL;
497 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
498 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
499 " hash=" TARGET_FMT_plx
"\n",
500 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
502 /* Primary table lookup */
503 ret
= find_pte64(env
, ctx
, 0, rwx
, target_page_bits
);
505 /* Secondary table lookup */
506 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
507 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
508 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
509 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
510 ret2
= find_pte64(env
, ctx
, 1, rwx
, target_page_bits
);
516 LOG_MMU("No access allowed\n");
523 static int ppc_hash64_get_physical_address(CPUPPCState
*env
,
524 struct mmu_ctx_hash64
*ctx
,
525 target_ulong eaddr
, int rwx
)
527 bool real_mode
= (rwx
== 2 && msr_ir
== 0)
528 || (rwx
!= 2 && msr_dr
== 0);
531 ctx
->raddr
= eaddr
& 0x0FFFFFFFFFFFFFFFULL
;
532 ctx
->prot
= PAGE_READ
| PAGE_EXEC
| PAGE_WRITE
;
535 return get_segment64(env
, ctx
, eaddr
, rwx
);
539 hwaddr
ppc_hash64_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
541 struct mmu_ctx_hash64 ctx
;
543 if (unlikely(ppc_hash64_get_physical_address(env
, &ctx
, addr
, 0) != 0)) {
547 return ctx
.raddr
& TARGET_PAGE_MASK
;
550 int ppc_hash64_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rwx
,
553 struct mmu_ctx_hash64 ctx
;
556 ret
= ppc_hash64_get_physical_address(env
, &ctx
, address
, rwx
);
558 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
559 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
560 mmu_idx
, TARGET_PAGE_SIZE
);
562 } else if (ret
< 0) {
567 env
->exception_index
= POWERPC_EXCP_ISI
;
568 env
->error_code
= 0x40000000;
571 /* Access rights violation */
572 env
->exception_index
= POWERPC_EXCP_ISI
;
573 env
->error_code
= 0x08000000;
576 /* No execute protection violation */
577 env
->exception_index
= POWERPC_EXCP_ISI
;
578 env
->error_code
= 0x10000000;
581 /* No match in segment table */
582 env
->exception_index
= POWERPC_EXCP_ISEG
;
589 /* No matches in page tables or TLB */
590 env
->exception_index
= POWERPC_EXCP_DSI
;
592 env
->spr
[SPR_DAR
] = address
;
594 env
->spr
[SPR_DSISR
] = 0x42000000;
596 env
->spr
[SPR_DSISR
] = 0x40000000;
600 /* Access rights violation */
601 env
->exception_index
= POWERPC_EXCP_DSI
;
603 env
->spr
[SPR_DAR
] = address
;
605 env
->spr
[SPR_DSISR
] = 0x0A000000;
607 env
->spr
[SPR_DSISR
] = 0x08000000;
611 /* No match in segment table */
612 env
->exception_index
= POWERPC_EXCP_DSEG
;
614 env
->spr
[SPR_DAR
] = address
;
619 printf("%s: set exception to %d %02x\n", __func__
,
620 env
->exception
, env
->error_code
);