2 * PowerPC emulation helpers for qemu.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "helper_regs.h"
27 #include "qemu-common.h"
35 //#define DEBUG_SOFTWARE_TLB
36 //#define DUMP_PAGE_TABLES
37 //#define DEBUG_EXCEPTIONS
38 //#define FLUSH_ALL_TLBS
41 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
42 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
44 # define LOG_MMU(...) do { } while (0)
45 # define LOG_MMU_STATE(...) do { } while (0)
49 #ifdef DEBUG_SOFTWARE_TLB
50 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
52 # define LOG_SWTLB(...) do { } while (0)
56 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
58 # define LOG_BATS(...) do { } while (0)
62 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
64 # define LOG_SLB(...) do { } while (0)
67 #ifdef DEBUG_EXCEPTIONS
68 # define LOG_EXCP(...) qemu_log(__VA_ARGS__)
70 # define LOG_EXCP(...) do { } while (0)
73 /*****************************************************************************/
74 /* PowerPC Hypercall emulation */
76 void (*cpu_ppc_hypercall
)(CPUState
*);
78 /*****************************************************************************/
79 /* PowerPC MMU emulation */
81 #if defined(CONFIG_USER_ONLY)
82 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
85 int exception
, error_code
;
88 exception
= POWERPC_EXCP_ISI
;
89 error_code
= 0x40000000;
91 exception
= POWERPC_EXCP_DSI
;
92 error_code
= 0x40000000;
94 error_code
|= 0x02000000;
95 env
->spr
[SPR_DAR
] = address
;
96 env
->spr
[SPR_DSISR
] = error_code
;
98 env
->exception_index
= exception
;
99 env
->error_code
= error_code
;
105 /* Common routines used by software and hardware TLBs emulation */
106 static inline int pte_is_valid(target_ulong pte0
)
108 return pte0
& 0x80000000 ? 1 : 0;
111 static inline void pte_invalidate(target_ulong
*pte0
)
113 *pte0
&= ~0x80000000;
116 #if defined(TARGET_PPC64)
117 static inline int pte64_is_valid(target_ulong pte0
)
119 return pte0
& 0x0000000000000001ULL
? 1 : 0;
122 static inline void pte64_invalidate(target_ulong
*pte0
)
124 *pte0
&= ~0x0000000000000001ULL
;
128 #define PTE_PTEM_MASK 0x7FFFFFBF
129 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
130 #if defined(TARGET_PPC64)
131 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
132 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
135 static inline int pp_check(int key
, int pp
, int nx
)
139 /* Compute access rights */
140 /* When pp is 3/7, the result is undefined. Set it to noaccess */
147 access
|= PAGE_WRITE
;
165 access
= PAGE_READ
| PAGE_WRITE
;
175 static inline int check_prot(int prot
, int rw
, int access_type
)
179 if (access_type
== ACCESS_CODE
) {
180 if (prot
& PAGE_EXEC
)
185 if (prot
& PAGE_WRITE
)
190 if (prot
& PAGE_READ
)
199 static inline int _pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
200 target_ulong pte1
, int h
, int rw
, int type
)
202 target_ulong ptem
, mmask
;
203 int access
, ret
, pteh
, ptev
, pp
;
206 /* Check validity and table match */
207 #if defined(TARGET_PPC64)
209 ptev
= pte64_is_valid(pte0
);
210 pteh
= (pte0
>> 1) & 1;
214 ptev
= pte_is_valid(pte0
);
215 pteh
= (pte0
>> 6) & 1;
217 if (ptev
&& h
== pteh
) {
218 /* Check vsid & api */
219 #if defined(TARGET_PPC64)
221 ptem
= pte0
& PTE64_PTEM_MASK
;
222 mmask
= PTE64_CHECK_MASK
;
223 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
224 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
225 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
229 ptem
= pte0
& PTE_PTEM_MASK
;
230 mmask
= PTE_CHECK_MASK
;
231 pp
= pte1
& 0x00000003;
233 if (ptem
== ctx
->ptem
) {
234 if (ctx
->raddr
!= (target_phys_addr_t
)-1ULL) {
235 /* all matches should have equal RPN, WIMG & PP */
236 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
237 qemu_log("Bad RPN/WIMG/PP\n");
241 /* Compute access rights */
242 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
243 /* Keep the matching PTE informations */
246 ret
= check_prot(ctx
->prot
, rw
, type
);
249 LOG_MMU("PTE access granted !\n");
251 /* Access right violation */
252 LOG_MMU("PTE access rejected\n");
260 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
261 target_ulong pte1
, int h
, int rw
, int type
)
263 return _pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
266 #if defined(TARGET_PPC64)
267 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
268 target_ulong pte1
, int h
, int rw
, int type
)
270 return _pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
274 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
279 /* Update page flags */
280 if (!(*pte1p
& 0x00000100)) {
281 /* Update accessed flag */
282 *pte1p
|= 0x00000100;
285 if (!(*pte1p
& 0x00000080)) {
286 if (rw
== 1 && ret
== 0) {
287 /* Update changed flag */
288 *pte1p
|= 0x00000080;
291 /* Force page fault for first write access */
292 ctx
->prot
&= ~PAGE_WRITE
;
299 /* Software driven TLB helpers */
300 static inline int ppc6xx_tlb_getnum(CPUState
*env
, target_ulong eaddr
, int way
,
305 /* Select TLB num in a way from address */
306 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
308 nr
+= env
->tlb_per_way
* way
;
309 /* 6xx have separate TLBs for instructions and data */
310 if (is_code
&& env
->id_tlbs
== 1)
316 static inline void ppc6xx_tlb_invalidate_all(CPUState
*env
)
321 //LOG_SWTLB("Invalidate all TLBs\n");
322 /* Invalidate all defined software TLB */
324 if (env
->id_tlbs
== 1)
326 for (nr
= 0; nr
< max
; nr
++) {
327 tlb
= &env
->tlb
.tlb6
[nr
];
328 pte_invalidate(&tlb
->pte0
);
333 static inline void __ppc6xx_tlb_invalidate_virt(CPUState
*env
,
335 int is_code
, int match_epn
)
337 #if !defined(FLUSH_ALL_TLBS)
341 /* Invalidate ITLB + DTLB, all ways */
342 for (way
= 0; way
< env
->nb_ways
; way
++) {
343 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
344 tlb
= &env
->tlb
.tlb6
[nr
];
345 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
346 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
348 pte_invalidate(&tlb
->pte0
);
349 tlb_flush_page(env
, tlb
->EPN
);
353 /* XXX: PowerPC specification say this is valid as well */
354 ppc6xx_tlb_invalidate_all(env
);
358 static inline void ppc6xx_tlb_invalidate_virt(CPUState
*env
,
359 target_ulong eaddr
, int is_code
)
361 __ppc6xx_tlb_invalidate_virt(env
, eaddr
, is_code
, 0);
364 void ppc6xx_tlb_store (CPUState
*env
, target_ulong EPN
, int way
, int is_code
,
365 target_ulong pte0
, target_ulong pte1
)
370 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
371 tlb
= &env
->tlb
.tlb6
[nr
];
372 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
373 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
374 /* Invalidate any pending reference in Qemu for this virtual address */
375 __ppc6xx_tlb_invalidate_virt(env
, EPN
, is_code
, 1);
379 /* Store last way for LRU mechanism */
383 static inline int ppc6xx_tlb_check(CPUState
*env
, mmu_ctx_t
*ctx
,
384 target_ulong eaddr
, int rw
, int access_type
)
391 ret
= -1; /* No TLB found */
392 for (way
= 0; way
< env
->nb_ways
; way
++) {
393 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
394 access_type
== ACCESS_CODE
? 1 : 0);
395 tlb
= &env
->tlb
.tlb6
[nr
];
396 /* This test "emulates" the PTE index match for hardware TLBs */
397 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
398 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
399 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
400 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
401 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
404 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
405 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
406 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
407 tlb
->EPN
, eaddr
, tlb
->pte1
,
408 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
409 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
411 /* TLB inconsistency */
414 /* Access violation */
424 /* XXX: we should go on looping to check all TLBs consistency
425 * but we can speed-up the whole thing as the
426 * result would be undefined if TLBs are not consistent.
435 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
436 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
437 /* Update page flags */
438 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
444 /* Perform BAT hit & translation */
445 static inline void bat_size_prot(CPUState
*env
, target_ulong
*blp
, int *validp
,
446 int *protp
, target_ulong
*BATu
,
452 bl
= (*BATu
& 0x00001FFC) << 15;
455 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
456 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
458 pp
= *BATl
& 0x00000003;
460 prot
= PAGE_READ
| PAGE_EXEC
;
470 static inline void bat_601_size_prot(CPUState
*env
, target_ulong
*blp
,
471 int *validp
, int *protp
,
472 target_ulong
*BATu
, target_ulong
*BATl
)
475 int key
, pp
, valid
, prot
;
477 bl
= (*BATl
& 0x0000003F) << 17;
478 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
479 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
481 valid
= (*BATl
>> 6) & 1;
483 pp
= *BATu
& 0x00000003;
485 key
= (*BATu
>> 3) & 1;
487 key
= (*BATu
>> 2) & 1;
488 prot
= pp_check(key
, pp
, 0);
495 static inline int get_bat(CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong
virtual,
498 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
499 target_ulong BEPIl
, BEPIu
, bl
;
503 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
504 type
== ACCESS_CODE
? 'I' : 'D', virtual);
507 BATlt
= env
->IBAT
[1];
508 BATut
= env
->IBAT
[0];
511 BATlt
= env
->DBAT
[1];
512 BATut
= env
->DBAT
[0];
515 for (i
= 0; i
< env
->nb_BATs
; i
++) {
518 BEPIu
= *BATu
& 0xF0000000;
519 BEPIl
= *BATu
& 0x0FFE0000;
520 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
521 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
523 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
525 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
526 " BATl " TARGET_FMT_lx
"\n", __func__
,
527 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
528 if ((virtual & 0xF0000000) == BEPIu
&&
529 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
532 /* Get physical address */
533 ctx
->raddr
= (*BATl
& 0xF0000000) |
534 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
535 (virtual & 0x0001F000);
536 /* Compute access rights */
538 ret
= check_prot(ctx
->prot
, rw
, type
);
540 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
541 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
542 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
548 #if defined(DEBUG_BATS)
549 if (qemu_log_enabled()) {
550 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
551 for (i
= 0; i
< 4; i
++) {
554 BEPIu
= *BATu
& 0xF0000000;
555 BEPIl
= *BATu
& 0x0FFE0000;
556 bl
= (*BATu
& 0x00001FFC) << 15;
557 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
558 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
559 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
560 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
561 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
570 static inline target_phys_addr_t
get_pteg_offset(CPUState
*env
,
571 target_phys_addr_t hash
,
574 return (hash
* pte_size
* 8) & env
->htab_mask
;
577 /* PTE table lookup */
578 static inline int _find_pte(CPUState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
579 int rw
, int type
, int target_page_bits
)
581 target_phys_addr_t pteg_off
;
582 target_ulong pte0
, pte1
;
586 ret
= -1; /* No entry found */
587 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
588 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
589 for (i
= 0; i
< 8; i
++) {
590 #if defined(TARGET_PPC64)
592 if (env
->external_htab
) {
593 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
594 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
596 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
597 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
600 /* We have a TLB that saves 4K pages, so let's
601 * split a huge page to 4k chunks */
602 if (target_page_bits
!= TARGET_PAGE_BITS
)
603 pte1
|= (ctx
->eaddr
& (( 1 << target_page_bits
) - 1))
606 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
607 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
608 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
609 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
610 (int)((pte0
>> 1) & 1), ctx
->ptem
);
614 if (env
->external_htab
) {
615 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
616 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
618 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
619 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
621 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
622 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
623 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
624 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
625 (int)((pte0
>> 6) & 1), ctx
->ptem
);
629 /* PTE inconsistency */
632 /* Access violation */
642 /* XXX: we should go on looping to check all PTEs consistency
643 * but if we can speed-up the whole thing as the
644 * result would be undefined if PTEs are not consistent.
653 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
654 ctx
->raddr
, ctx
->prot
, ret
);
655 /* Update page flags */
657 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
658 #if defined(TARGET_PPC64)
660 if (env
->external_htab
) {
661 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
664 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
665 (good
* 16) + 8, pte1
);
670 if (env
->external_htab
) {
671 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
674 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
675 (good
* 8) + 4, pte1
);
684 static inline int find_pte(CPUState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
685 int type
, int target_page_bits
)
687 #if defined(TARGET_PPC64)
688 if (env
->mmu_model
& POWERPC_MMU_64
)
689 return _find_pte(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
692 return _find_pte(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
695 #if defined(TARGET_PPC64)
696 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
698 uint64_t esid_256M
, esid_1T
;
701 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
703 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
704 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
706 for (n
= 0; n
< env
->slb_nr
; n
++) {
707 ppc_slb_t
*slb
= &env
->slb
[n
];
709 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
710 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
711 /* We check for 1T matches on all MMUs here - if the MMU
712 * doesn't have 1T segment support, we will have prevented 1T
713 * entries from being inserted in the slbmte code. */
714 if (((slb
->esid
== esid_256M
) &&
715 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
716 || ((slb
->esid
== esid_1T
) &&
717 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
725 void ppc_slb_invalidate_all (CPUPPCState
*env
)
727 int n
, do_invalidate
;
730 /* XXX: Warning: slbia never invalidates the first segment */
731 for (n
= 1; n
< env
->slb_nr
; n
++) {
732 ppc_slb_t
*slb
= &env
->slb
[n
];
734 if (slb
->esid
& SLB_ESID_V
) {
735 slb
->esid
&= ~SLB_ESID_V
;
736 /* XXX: given the fact that segment size is 256 MB or 1TB,
737 * and we still don't have a tlb_flush_mask(env, n, mask)
738 * in Qemu, we just invalidate all TLBs
747 void ppc_slb_invalidate_one (CPUPPCState
*env
, uint64_t T0
)
751 slb
= slb_lookup(env
, T0
);
756 if (slb
->esid
& SLB_ESID_V
) {
757 slb
->esid
&= ~SLB_ESID_V
;
759 /* XXX: given the fact that segment size is 256 MB or 1TB,
760 * and we still don't have a tlb_flush_mask(env, n, mask)
761 * in Qemu, we just invalidate all TLBs
767 int ppc_store_slb (CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
769 int slot
= rb
& 0xfff;
770 ppc_slb_t
*slb
= &env
->slb
[slot
];
772 if (rb
& (0x1000 - env
->slb_nr
)) {
773 return -1; /* Reserved bits set or slot too high */
775 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
776 return -1; /* Bad segment size */
778 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
779 return -1; /* 1T segment on MMU that doesn't support it */
782 /* Mask out the slot number as we store the entry */
783 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
786 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
787 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
788 slb
->esid
, slb
->vsid
);
793 int ppc_load_slb_esid (CPUPPCState
*env
, target_ulong rb
, target_ulong
*rt
)
795 int slot
= rb
& 0xfff;
796 ppc_slb_t
*slb
= &env
->slb
[slot
];
798 if (slot
>= env
->slb_nr
) {
806 int ppc_load_slb_vsid (CPUPPCState
*env
, target_ulong rb
, target_ulong
*rt
)
808 int slot
= rb
& 0xfff;
809 ppc_slb_t
*slb
= &env
->slb
[slot
];
811 if (slot
>= env
->slb_nr
) {
818 #endif /* defined(TARGET_PPC64) */
820 /* Perform segment based translation */
821 static inline int get_segment(CPUState
*env
, mmu_ctx_t
*ctx
,
822 target_ulong eaddr
, int rw
, int type
)
824 target_phys_addr_t hash
;
826 int ds
, pr
, target_page_bits
;
831 #if defined(TARGET_PPC64)
832 if (env
->mmu_model
& POWERPC_MMU_64
) {
834 target_ulong pageaddr
;
837 LOG_MMU("Check SLBs\n");
838 slb
= slb_lookup(env
, eaddr
);
843 if (slb
->vsid
& SLB_VSID_B
) {
844 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
847 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
851 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
852 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
853 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
854 : (slb
->vsid
& SLB_VSID_KS
));
856 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
858 pageaddr
= eaddr
& ((1ULL << segment_bits
)
859 - (1ULL << target_page_bits
));
860 if (slb
->vsid
& SLB_VSID_B
) {
861 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
863 hash
= vsid
^ (pageaddr
>> target_page_bits
);
865 /* Only 5 bits of the page index are used in the AVPN */
866 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
867 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
869 #endif /* defined(TARGET_PPC64) */
871 target_ulong sr
, pgidx
;
873 sr
= env
->sr
[eaddr
>> 28];
874 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
875 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
876 ds
= sr
& 0x80000000 ? 1 : 0;
877 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
878 vsid
= sr
& 0x00FFFFFF;
879 target_page_bits
= TARGET_PAGE_BITS
;
880 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
881 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
882 " ir=%d dr=%d pr=%d %d t=%d\n",
883 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
884 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
885 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
887 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
889 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
890 ctx
->key
, ds
, ctx
->nx
, vsid
);
893 /* Check if instruction fetch is allowed, if needed */
894 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
895 /* Page address translation */
896 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
897 " hash " TARGET_FMT_plx
"\n",
898 env
->htab_base
, env
->htab_mask
, hash
);
900 ctx
->hash
[1] = ~hash
;
902 /* Initialize real address with an invalid value */
903 ctx
->raddr
= (target_phys_addr_t
)-1ULL;
904 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
905 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
906 /* Software TLB search */
907 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
909 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
910 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
911 " hash=" TARGET_FMT_plx
"\n",
912 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
914 /* Primary table lookup */
915 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
917 /* Secondary table lookup */
918 if (eaddr
!= 0xEFFFFFFF)
919 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
920 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
921 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
922 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
923 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
929 #if defined (DUMP_PAGE_TABLES)
930 if (qemu_log_enabled()) {
931 target_phys_addr_t curaddr
;
932 uint32_t a0
, a1
, a2
, a3
;
933 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
934 "\n", sdr
, mask
+ 0x80);
935 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
937 a0
= ldl_phys(curaddr
);
938 a1
= ldl_phys(curaddr
+ 4);
939 a2
= ldl_phys(curaddr
+ 8);
940 a3
= ldl_phys(curaddr
+ 12);
941 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
942 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
943 curaddr
, a0
, a1
, a2
, a3
);
949 LOG_MMU("No access allowed\n");
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr
= env
->sr
[eaddr
>> 28];
961 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
967 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
973 /* Integer load/store : only access allowed */
976 /* No code fetch is allowed in direct-store areas */
979 /* Floating point load/store */
982 /* lwarx, ldarx or srwcx. */
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
999 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 int ppcemb_tlb_check(CPUState
*env
, ppcemb_tlb_t
*tlb
,
1012 target_phys_addr_t
*raddrp
,
1013 target_ulong address
, uint32_t pid
, int ext
,
1018 /* Check valid flag */
1019 if (!(tlb
->prot
& PAGE_VALID
)) {
1022 mask
= ~(tlb
->size
- 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1025 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1027 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
)
1029 /* Check effective address */
1030 if ((address
& mask
) != tlb
->EPN
)
1032 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1033 #if (TARGET_PHYS_ADDR_BITS >= 36)
1035 /* Extend the physical address to 36 bits */
1036 *raddrp
|= (target_phys_addr_t
)(tlb
->RPN
& 0xF) << 32;
1043 /* Generic TLB search function for PowerPC embedded implementations */
1044 int ppcemb_tlb_search (CPUPPCState
*env
, target_ulong address
, uint32_t pid
)
1047 target_phys_addr_t raddr
;
1050 /* Default return value is no match */
1052 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1053 tlb
= &env
->tlb
.tlbe
[i
];
1054 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1063 /* Helpers specific to PowerPC 40x implementations */
1064 static inline void ppc4xx_tlb_invalidate_all(CPUState
*env
)
1069 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1070 tlb
= &env
->tlb
.tlbe
[i
];
1071 tlb
->prot
&= ~PAGE_VALID
;
1076 static inline void ppc4xx_tlb_invalidate_virt(CPUState
*env
,
1077 target_ulong eaddr
, uint32_t pid
)
1079 #if !defined(FLUSH_ALL_TLBS)
1081 target_phys_addr_t raddr
;
1082 target_ulong page
, end
;
1085 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1086 tlb
= &env
->tlb
.tlbe
[i
];
1087 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1088 end
= tlb
->EPN
+ tlb
->size
;
1089 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
)
1090 tlb_flush_page(env
, page
);
1091 tlb
->prot
&= ~PAGE_VALID
;
1096 ppc4xx_tlb_invalidate_all(env
);
1100 static int mmu40x_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1101 target_ulong address
, int rw
, int access_type
)
1104 target_phys_addr_t raddr
;
1105 int i
, ret
, zsel
, zpr
, pr
;
1108 raddr
= (target_phys_addr_t
)-1ULL;
1110 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1111 tlb
= &env
->tlb
.tlbe
[i
];
1112 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1113 env
->spr
[SPR_40x_PID
], 0, i
) < 0)
1115 zsel
= (tlb
->attr
>> 4) & 0xF;
1116 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1117 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1118 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1119 /* Check execute enable bit */
1126 /* All accesses granted */
1127 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1132 /* Raise Zone protection fault. */
1133 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1141 /* Check from TLB entry */
1142 ctx
->prot
= tlb
->prot
;
1143 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1145 env
->spr
[SPR_40x_ESR
] = 0;
1150 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1151 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1156 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1157 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1162 void store_40x_sler (CPUPPCState
*env
, uint32_t val
)
1164 /* XXX: TO BE FIXED */
1165 if (val
!= 0x00000000) {
1166 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1168 env
->spr
[SPR_405_SLER
] = val
;
1171 static inline int mmubooke_check_tlb (CPUState
*env
, ppcemb_tlb_t
*tlb
,
1172 target_phys_addr_t
*raddr
, int *prot
,
1173 target_ulong address
, int rw
,
1174 int access_type
, int i
)
1178 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1179 env
->spr
[SPR_BOOKE_PID
],
1180 !env
->nb_pids
, i
) >= 0) {
1184 if (env
->spr
[SPR_BOOKE_PID1
] &&
1185 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1186 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1190 if (env
->spr
[SPR_BOOKE_PID2
] &&
1191 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1192 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1196 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1202 _prot
= tlb
->prot
& 0xF;
1204 _prot
= (tlb
->prot
>> 4) & 0xF;
1207 /* Check the address space */
1208 if (access_type
== ACCESS_CODE
) {
1209 if (msr_ir
!= (tlb
->attr
& 1)) {
1210 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1215 if (_prot
& PAGE_EXEC
) {
1216 LOG_SWTLB("%s: good TLB!\n", __func__
);
1220 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, _prot
);
1223 if (msr_dr
!= (tlb
->attr
& 1)) {
1224 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1229 if ((!rw
&& _prot
& PAGE_READ
) || (rw
&& (_prot
& PAGE_WRITE
))) {
1230 LOG_SWTLB("%s: found TLB!\n", __func__
);
1234 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, _prot
);
1241 static int mmubooke_get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
,
1242 target_ulong address
, int rw
,
1246 target_phys_addr_t raddr
;
1250 raddr
= (target_phys_addr_t
)-1ULL;
1251 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1252 tlb
= &env
->tlb
.tlbe
[i
];
1253 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1262 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1263 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1266 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1267 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1273 void booke206_flush_tlb(CPUState
*env
, int flags
, const int check_iprot
)
1277 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1279 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1280 if (flags
& (1 << i
)) {
1281 tlb_size
= booke206_tlb_size(env
, i
);
1282 for (j
= 0; j
< tlb_size
; j
++) {
1283 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1284 tlb
[j
].mas1
&= ~MAS1_VALID
;
1288 tlb
+= booke206_tlb_size(env
, i
);
1294 target_phys_addr_t
booke206_tlb_to_page_size(CPUState
*env
, ppcmas_tlb_t
*tlb
)
1297 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
1300 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
1301 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1303 return 1024ULL << tlbm_size
;
1306 /* TLB check function for MAS based SoftTLBs */
1307 int ppcmas_tlb_check(CPUState
*env
, ppcmas_tlb_t
*tlb
,
1308 target_phys_addr_t
*raddrp
,
1309 target_ulong address
, uint32_t pid
)
1314 /* Check valid flag */
1315 if (!(tlb
->mas1
& MAS1_VALID
)) {
1319 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1320 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1321 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1322 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1326 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1327 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1331 /* Check effective address */
1332 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1337 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1343 static int mmubooke206_check_tlb(CPUState
*env
, ppcmas_tlb_t
*tlb
,
1344 target_phys_addr_t
*raddr
, int *prot
,
1345 target_ulong address
, int rw
,
1351 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1352 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1356 if (env
->spr
[SPR_BOOKE_PID1
] &&
1357 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1358 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1362 if (env
->spr
[SPR_BOOKE_PID2
] &&
1363 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1364 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1368 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1374 if (tlb
->mas7_3
& MAS3_UR
) {
1377 if (tlb
->mas7_3
& MAS3_UW
) {
1378 _prot
|= PAGE_WRITE
;
1380 if (tlb
->mas7_3
& MAS3_UX
) {
1384 if (tlb
->mas7_3
& MAS3_SR
) {
1387 if (tlb
->mas7_3
& MAS3_SW
) {
1388 _prot
|= PAGE_WRITE
;
1390 if (tlb
->mas7_3
& MAS3_SX
) {
1395 /* Check the address space and permissions */
1396 if (access_type
== ACCESS_CODE
) {
1397 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1398 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1403 if (_prot
& PAGE_EXEC
) {
1404 LOG_SWTLB("%s: good TLB!\n", __func__
);
1408 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, _prot
);
1411 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1412 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1417 if ((!rw
&& _prot
& PAGE_READ
) || (rw
&& (_prot
& PAGE_WRITE
))) {
1418 LOG_SWTLB("%s: found TLB!\n", __func__
);
1422 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, _prot
);
1429 static int mmubooke206_get_physical_address(CPUState
*env
, mmu_ctx_t
*ctx
,
1430 target_ulong address
, int rw
,
1434 target_phys_addr_t raddr
;
1438 raddr
= (target_phys_addr_t
)-1ULL;
1440 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1441 int ways
= booke206_tlb_ways(env
, i
);
1443 for (j
= 0; j
< ways
; j
++) {
1444 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1448 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1460 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1461 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1464 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1465 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1471 static const char *book3e_tsize_to_str
[32] = {
1472 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1473 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1474 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1478 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1479 CPUState
*env
, int tlbn
, int offset
,
1482 ppcmas_tlb_t
*entry
;
1485 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1486 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX URWX WIMGE U0123\n");
1488 entry
= &env
->tlb
.tlbm
[offset
];
1489 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1490 target_phys_addr_t ea
, pa
, size
;
1493 if (!(entry
->mas1
& MAS1_VALID
)) {
1497 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1498 size
= 1024ULL << tsize
;
1499 ea
= entry
->mas2
& ~(size
- 1);
1500 pa
= entry
->mas7_3
& ~(size
- 1);
1502 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1503 (uint64_t)ea
, (uint64_t)pa
,
1504 book3e_tsize_to_str
[tsize
],
1505 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1506 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1507 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1508 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1509 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1510 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1511 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1512 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1513 entry
->mas2
& MAS2_W
? 'W' : '-',
1514 entry
->mas2
& MAS2_I
? 'I' : '-',
1515 entry
->mas2
& MAS2_M
? 'M' : '-',
1516 entry
->mas2
& MAS2_G
? 'G' : '-',
1517 entry
->mas2
& MAS2_E
? 'E' : '-',
1518 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1519 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1520 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1521 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1525 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1531 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1532 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1536 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1537 int size
= booke206_tlb_size(env
, i
);
1543 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1548 #if defined(TARGET_PPC64)
1549 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1553 uint64_t slbe
, slbv
;
1555 cpu_synchronize_state(env
);
1557 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1558 for (i
= 0; i
< env
->slb_nr
; i
++) {
1559 slbe
= env
->slb
[i
].esid
;
1560 slbv
= env
->slb
[i
].vsid
;
1561 if (slbe
== 0 && slbv
== 0) {
1564 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1570 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUState
*env
)
1572 switch (env
->mmu_model
) {
1573 case POWERPC_MMU_BOOKE206
:
1574 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1576 #if defined(TARGET_PPC64)
1577 case POWERPC_MMU_64B
:
1578 case POWERPC_MMU_2_06
:
1579 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1583 cpu_fprintf(f
, "%s: unimplemented\n", __func__
);
1587 static inline int check_physical(CPUState
*env
, mmu_ctx_t
*ctx
,
1588 target_ulong eaddr
, int rw
)
1593 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1595 switch (env
->mmu_model
) {
1596 case POWERPC_MMU_32B
:
1597 case POWERPC_MMU_601
:
1598 case POWERPC_MMU_SOFT_6xx
:
1599 case POWERPC_MMU_SOFT_74xx
:
1600 case POWERPC_MMU_SOFT_4xx
:
1601 case POWERPC_MMU_REAL
:
1602 case POWERPC_MMU_BOOKE
:
1603 ctx
->prot
|= PAGE_WRITE
;
1605 #if defined(TARGET_PPC64)
1606 case POWERPC_MMU_620
:
1607 case POWERPC_MMU_64B
:
1608 case POWERPC_MMU_2_06
:
1609 /* Real address are 60 bits long */
1610 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1611 ctx
->prot
|= PAGE_WRITE
;
1614 case POWERPC_MMU_SOFT_4xx_Z
:
1615 if (unlikely(msr_pe
!= 0)) {
1616 /* 403 family add some particular protections,
1617 * using PBL/PBU registers for accesses with no translation.
1620 /* Check PLB validity */
1621 (env
->pb
[0] < env
->pb
[1] &&
1622 /* and address in plb area */
1623 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1624 (env
->pb
[2] < env
->pb
[3] &&
1625 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1626 if (in_plb
^ msr_px
) {
1627 /* Access in protected area */
1629 /* Access is not allowed */
1633 /* Read-write access is allowed */
1634 ctx
->prot
|= PAGE_WRITE
;
1638 case POWERPC_MMU_MPC8xx
:
1640 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1642 case POWERPC_MMU_BOOKE206
:
1643 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1646 cpu_abort(env
, "Unknown or invalid MMU model\n");
1653 int get_physical_address (CPUState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1654 int rw
, int access_type
)
1659 qemu_log("%s\n", __func__
);
1661 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1662 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1663 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1664 /* The BookE MMU always performs address translation. The
1665 IS and DS bits only affect the address space. */
1666 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1668 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1669 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1672 /* No address translation. */
1673 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1677 switch (env
->mmu_model
) {
1678 case POWERPC_MMU_32B
:
1679 case POWERPC_MMU_601
:
1680 case POWERPC_MMU_SOFT_6xx
:
1681 case POWERPC_MMU_SOFT_74xx
:
1682 /* Try to find a BAT */
1683 if (env
->nb_BATs
!= 0)
1684 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1685 #if defined(TARGET_PPC64)
1686 case POWERPC_MMU_620
:
1687 case POWERPC_MMU_64B
:
1688 case POWERPC_MMU_2_06
:
1691 /* We didn't match any BAT entry or don't have BATs */
1692 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1695 case POWERPC_MMU_SOFT_4xx
:
1696 case POWERPC_MMU_SOFT_4xx_Z
:
1697 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1700 case POWERPC_MMU_BOOKE
:
1701 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1704 case POWERPC_MMU_BOOKE206
:
1705 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1708 case POWERPC_MMU_MPC8xx
:
1710 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1712 case POWERPC_MMU_REAL
:
1713 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1716 cpu_abort(env
, "Unknown or invalid MMU model\n");
1721 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1722 __func__
, eaddr
, ret
, ctx
->raddr
);
1728 target_phys_addr_t
cpu_get_phys_page_debug (CPUState
*env
, target_ulong addr
)
1732 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0))
1735 return ctx
.raddr
& TARGET_PAGE_MASK
;
1738 static void booke206_update_mas_tlb_miss(CPUState
*env
, target_ulong address
,
1741 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1742 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1743 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1744 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1745 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1746 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1749 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1750 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1751 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1754 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1755 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1757 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1758 case MAS4_TIDSELD_PID0
:
1759 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1761 case MAS4_TIDSELD_PID1
:
1762 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1764 case MAS4_TIDSELD_PID2
:
1765 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1769 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1771 /* next victim logic */
1772 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1774 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1775 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1778 /* Perform address translation */
1779 int cpu_ppc_handle_mmu_fault (CPUState
*env
, target_ulong address
, int rw
,
1789 access_type
= ACCESS_CODE
;
1792 access_type
= env
->access_type
;
1794 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1796 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1797 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1798 mmu_idx
, TARGET_PAGE_SIZE
);
1800 } else if (ret
< 0) {
1802 if (access_type
== ACCESS_CODE
) {
1805 /* No matches in page tables or TLB */
1806 switch (env
->mmu_model
) {
1807 case POWERPC_MMU_SOFT_6xx
:
1808 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1809 env
->error_code
= 1 << 18;
1810 env
->spr
[SPR_IMISS
] = address
;
1811 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1813 case POWERPC_MMU_SOFT_74xx
:
1814 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1816 case POWERPC_MMU_SOFT_4xx
:
1817 case POWERPC_MMU_SOFT_4xx_Z
:
1818 env
->exception_index
= POWERPC_EXCP_ITLB
;
1819 env
->error_code
= 0;
1820 env
->spr
[SPR_40x_DEAR
] = address
;
1821 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1823 case POWERPC_MMU_32B
:
1824 case POWERPC_MMU_601
:
1825 #if defined(TARGET_PPC64)
1826 case POWERPC_MMU_620
:
1827 case POWERPC_MMU_64B
:
1828 case POWERPC_MMU_2_06
:
1830 env
->exception_index
= POWERPC_EXCP_ISI
;
1831 env
->error_code
= 0x40000000;
1833 case POWERPC_MMU_BOOKE206
:
1834 booke206_update_mas_tlb_miss(env
, address
, rw
);
1836 case POWERPC_MMU_BOOKE
:
1837 env
->exception_index
= POWERPC_EXCP_ITLB
;
1838 env
->error_code
= 0;
1839 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1841 case POWERPC_MMU_MPC8xx
:
1843 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1845 case POWERPC_MMU_REAL
:
1846 cpu_abort(env
, "PowerPC in real mode should never raise "
1847 "any MMU exceptions\n");
1850 cpu_abort(env
, "Unknown or invalid MMU model\n");
1855 /* Access rights violation */
1856 env
->exception_index
= POWERPC_EXCP_ISI
;
1857 env
->error_code
= 0x08000000;
1860 /* No execute protection violation */
1861 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1862 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1863 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1865 env
->exception_index
= POWERPC_EXCP_ISI
;
1866 env
->error_code
= 0x10000000;
1869 /* Direct store exception */
1870 /* No code fetch is allowed in direct-store areas */
1871 env
->exception_index
= POWERPC_EXCP_ISI
;
1872 env
->error_code
= 0x10000000;
1874 #if defined(TARGET_PPC64)
1876 /* No match in segment table */
1877 if (env
->mmu_model
== POWERPC_MMU_620
) {
1878 env
->exception_index
= POWERPC_EXCP_ISI
;
1879 /* XXX: this might be incorrect */
1880 env
->error_code
= 0x40000000;
1882 env
->exception_index
= POWERPC_EXCP_ISEG
;
1883 env
->error_code
= 0;
1891 /* No matches in page tables or TLB */
1892 switch (env
->mmu_model
) {
1893 case POWERPC_MMU_SOFT_6xx
:
1895 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1896 env
->error_code
= 1 << 16;
1898 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1899 env
->error_code
= 0;
1901 env
->spr
[SPR_DMISS
] = address
;
1902 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1904 env
->error_code
|= ctx
.key
<< 19;
1905 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1906 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1907 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1908 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1910 case POWERPC_MMU_SOFT_74xx
:
1912 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1914 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1917 /* Implement LRU algorithm */
1918 env
->error_code
= ctx
.key
<< 19;
1919 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1920 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1921 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1923 case POWERPC_MMU_SOFT_4xx
:
1924 case POWERPC_MMU_SOFT_4xx_Z
:
1925 env
->exception_index
= POWERPC_EXCP_DTLB
;
1926 env
->error_code
= 0;
1927 env
->spr
[SPR_40x_DEAR
] = address
;
1929 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1931 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1933 case POWERPC_MMU_32B
:
1934 case POWERPC_MMU_601
:
1935 #if defined(TARGET_PPC64)
1936 case POWERPC_MMU_620
:
1937 case POWERPC_MMU_64B
:
1938 case POWERPC_MMU_2_06
:
1940 env
->exception_index
= POWERPC_EXCP_DSI
;
1941 env
->error_code
= 0;
1942 env
->spr
[SPR_DAR
] = address
;
1944 env
->spr
[SPR_DSISR
] = 0x42000000;
1946 env
->spr
[SPR_DSISR
] = 0x40000000;
1948 case POWERPC_MMU_MPC8xx
:
1950 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1952 case POWERPC_MMU_BOOKE206
:
1953 booke206_update_mas_tlb_miss(env
, address
, rw
);
1955 case POWERPC_MMU_BOOKE
:
1956 env
->exception_index
= POWERPC_EXCP_DTLB
;
1957 env
->error_code
= 0;
1958 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1959 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
1961 case POWERPC_MMU_REAL
:
1962 cpu_abort(env
, "PowerPC in real mode should never raise "
1963 "any MMU exceptions\n");
1966 cpu_abort(env
, "Unknown or invalid MMU model\n");
1971 /* Access rights violation */
1972 env
->exception_index
= POWERPC_EXCP_DSI
;
1973 env
->error_code
= 0;
1974 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
1975 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
1976 env
->spr
[SPR_40x_DEAR
] = address
;
1978 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
1980 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1981 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1982 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1983 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
1985 env
->spr
[SPR_DAR
] = address
;
1987 env
->spr
[SPR_DSISR
] = 0x0A000000;
1989 env
->spr
[SPR_DSISR
] = 0x08000000;
1994 /* Direct store exception */
1995 switch (access_type
) {
1997 /* Floating point load/store */
1998 env
->exception_index
= POWERPC_EXCP_ALIGN
;
1999 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2000 env
->spr
[SPR_DAR
] = address
;
2003 /* lwarx, ldarx or stwcx. */
2004 env
->exception_index
= POWERPC_EXCP_DSI
;
2005 env
->error_code
= 0;
2006 env
->spr
[SPR_DAR
] = address
;
2008 env
->spr
[SPR_DSISR
] = 0x06000000;
2010 env
->spr
[SPR_DSISR
] = 0x04000000;
2013 /* eciwx or ecowx */
2014 env
->exception_index
= POWERPC_EXCP_DSI
;
2015 env
->error_code
= 0;
2016 env
->spr
[SPR_DAR
] = address
;
2018 env
->spr
[SPR_DSISR
] = 0x06100000;
2020 env
->spr
[SPR_DSISR
] = 0x04100000;
2023 printf("DSI: invalid exception (%d)\n", ret
);
2024 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2026 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2027 env
->spr
[SPR_DAR
] = address
;
2031 #if defined(TARGET_PPC64)
2033 /* No match in segment table */
2034 if (env
->mmu_model
== POWERPC_MMU_620
) {
2035 env
->exception_index
= POWERPC_EXCP_DSI
;
2036 env
->error_code
= 0;
2037 env
->spr
[SPR_DAR
] = address
;
2038 /* XXX: this might be incorrect */
2040 env
->spr
[SPR_DSISR
] = 0x42000000;
2042 env
->spr
[SPR_DSISR
] = 0x40000000;
2044 env
->exception_index
= POWERPC_EXCP_DSEG
;
2045 env
->error_code
= 0;
2046 env
->spr
[SPR_DAR
] = address
;
2053 printf("%s: set exception to %d %02x\n", __func__
,
2054 env
->exception
, env
->error_code
);
2062 /*****************************************************************************/
2063 /* BATs management */
2064 #if !defined(FLUSH_ALL_TLBS)
2065 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2068 target_ulong base
, end
, page
;
2070 base
= BATu
& ~0x0001FFFF;
2071 end
= base
+ mask
+ 0x00020000;
2072 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2073 TARGET_FMT_lx
")\n", base
, end
, mask
);
2074 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
)
2075 tlb_flush_page(env
, page
);
2076 LOG_BATS("Flush done\n");
2080 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2083 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2084 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2087 void ppc_store_ibatu (CPUPPCState
*env
, int nr
, target_ulong value
)
2091 dump_store_bat(env
, 'I', 0, nr
, value
);
2092 if (env
->IBAT
[0][nr
] != value
) {
2093 mask
= (value
<< 15) & 0x0FFE0000UL
;
2094 #if !defined(FLUSH_ALL_TLBS)
2095 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2097 /* When storing valid upper BAT, mask BEPI and BRPN
2098 * and invalidate all TLBs covered by this BAT
2100 mask
= (value
<< 15) & 0x0FFE0000UL
;
2101 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2102 (value
& ~0x0001FFFFUL
& ~mask
);
2103 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2104 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2105 #if !defined(FLUSH_ALL_TLBS)
2106 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2113 void ppc_store_ibatl (CPUPPCState
*env
, int nr
, target_ulong value
)
2115 dump_store_bat(env
, 'I', 1, nr
, value
);
2116 env
->IBAT
[1][nr
] = value
;
2119 void ppc_store_dbatu (CPUPPCState
*env
, int nr
, target_ulong value
)
2123 dump_store_bat(env
, 'D', 0, nr
, value
);
2124 if (env
->DBAT
[0][nr
] != value
) {
2125 /* When storing valid upper BAT, mask BEPI and BRPN
2126 * and invalidate all TLBs covered by this BAT
2128 mask
= (value
<< 15) & 0x0FFE0000UL
;
2129 #if !defined(FLUSH_ALL_TLBS)
2130 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2132 mask
= (value
<< 15) & 0x0FFE0000UL
;
2133 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2134 (value
& ~0x0001FFFFUL
& ~mask
);
2135 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2136 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2137 #if !defined(FLUSH_ALL_TLBS)
2138 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2145 void ppc_store_dbatl (CPUPPCState
*env
, int nr
, target_ulong value
)
2147 dump_store_bat(env
, 'D', 1, nr
, value
);
2148 env
->DBAT
[1][nr
] = value
;
2151 void ppc_store_ibatu_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
2154 #if defined(FLUSH_ALL_TLBS)
2158 dump_store_bat(env
, 'I', 0, nr
, value
);
2159 if (env
->IBAT
[0][nr
] != value
) {
2160 #if defined(FLUSH_ALL_TLBS)
2163 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2164 if (env
->IBAT
[1][nr
] & 0x40) {
2165 /* Invalidate BAT only if it is valid */
2166 #if !defined(FLUSH_ALL_TLBS)
2167 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2172 /* When storing valid upper BAT, mask BEPI and BRPN
2173 * and invalidate all TLBs covered by this BAT
2175 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2176 (value
& ~0x0001FFFFUL
& ~mask
);
2177 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2178 if (env
->IBAT
[1][nr
] & 0x40) {
2179 #if !defined(FLUSH_ALL_TLBS)
2180 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2185 #if defined(FLUSH_ALL_TLBS)
2192 void ppc_store_ibatl_601 (CPUPPCState
*env
, int nr
, target_ulong value
)
2195 #if defined(FLUSH_ALL_TLBS)
2199 dump_store_bat(env
, 'I', 1, nr
, value
);
2200 if (env
->IBAT
[1][nr
] != value
) {
2201 #if defined(FLUSH_ALL_TLBS)
2204 if (env
->IBAT
[1][nr
] & 0x40) {
2205 #if !defined(FLUSH_ALL_TLBS)
2206 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2207 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2213 #if !defined(FLUSH_ALL_TLBS)
2214 mask
= (value
<< 17) & 0x0FFE0000UL
;
2215 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2220 env
->IBAT
[1][nr
] = value
;
2221 env
->DBAT
[1][nr
] = value
;
2222 #if defined(FLUSH_ALL_TLBS)
2229 /*****************************************************************************/
2230 /* TLB management */
2231 void ppc_tlb_invalidate_all (CPUPPCState
*env
)
2233 switch (env
->mmu_model
) {
2234 case POWERPC_MMU_SOFT_6xx
:
2235 case POWERPC_MMU_SOFT_74xx
:
2236 ppc6xx_tlb_invalidate_all(env
);
2238 case POWERPC_MMU_SOFT_4xx
:
2239 case POWERPC_MMU_SOFT_4xx_Z
:
2240 ppc4xx_tlb_invalidate_all(env
);
2242 case POWERPC_MMU_REAL
:
2243 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2245 case POWERPC_MMU_MPC8xx
:
2247 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2249 case POWERPC_MMU_BOOKE
:
2252 case POWERPC_MMU_BOOKE206
:
2253 booke206_flush_tlb(env
, -1, 0);
2255 case POWERPC_MMU_32B
:
2256 case POWERPC_MMU_601
:
2257 #if defined(TARGET_PPC64)
2258 case POWERPC_MMU_620
:
2259 case POWERPC_MMU_64B
:
2260 case POWERPC_MMU_2_06
:
2261 #endif /* defined(TARGET_PPC64) */
2266 cpu_abort(env
, "Unknown MMU model\n");
2271 void ppc_tlb_invalidate_one (CPUPPCState
*env
, target_ulong addr
)
2273 #if !defined(FLUSH_ALL_TLBS)
2274 addr
&= TARGET_PAGE_MASK
;
2275 switch (env
->mmu_model
) {
2276 case POWERPC_MMU_SOFT_6xx
:
2277 case POWERPC_MMU_SOFT_74xx
:
2278 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2279 if (env
->id_tlbs
== 1)
2280 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2282 case POWERPC_MMU_SOFT_4xx
:
2283 case POWERPC_MMU_SOFT_4xx_Z
:
2284 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2286 case POWERPC_MMU_REAL
:
2287 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2289 case POWERPC_MMU_MPC8xx
:
2291 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2293 case POWERPC_MMU_BOOKE
:
2295 cpu_abort(env
, "BookE MMU model is not implemented\n");
2297 case POWERPC_MMU_BOOKE206
:
2299 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2301 case POWERPC_MMU_32B
:
2302 case POWERPC_MMU_601
:
2303 /* tlbie invalidate TLBs for all segments */
2304 addr
&= ~((target_ulong
)-1ULL << 28);
2305 /* XXX: this case should be optimized,
2306 * giving a mask to tlb_flush_page
2308 tlb_flush_page(env
, addr
| (0x0 << 28));
2309 tlb_flush_page(env
, addr
| (0x1 << 28));
2310 tlb_flush_page(env
, addr
| (0x2 << 28));
2311 tlb_flush_page(env
, addr
| (0x3 << 28));
2312 tlb_flush_page(env
, addr
| (0x4 << 28));
2313 tlb_flush_page(env
, addr
| (0x5 << 28));
2314 tlb_flush_page(env
, addr
| (0x6 << 28));
2315 tlb_flush_page(env
, addr
| (0x7 << 28));
2316 tlb_flush_page(env
, addr
| (0x8 << 28));
2317 tlb_flush_page(env
, addr
| (0x9 << 28));
2318 tlb_flush_page(env
, addr
| (0xA << 28));
2319 tlb_flush_page(env
, addr
| (0xB << 28));
2320 tlb_flush_page(env
, addr
| (0xC << 28));
2321 tlb_flush_page(env
, addr
| (0xD << 28));
2322 tlb_flush_page(env
, addr
| (0xE << 28));
2323 tlb_flush_page(env
, addr
| (0xF << 28));
2325 #if defined(TARGET_PPC64)
2326 case POWERPC_MMU_620
:
2327 case POWERPC_MMU_64B
:
2328 case POWERPC_MMU_2_06
:
2329 /* tlbie invalidate TLBs for all segments */
2330 /* XXX: given the fact that there are too many segments to invalidate,
2331 * and we still don't have a tlb_flush_mask(env, n, mask) in Qemu,
2332 * we just invalidate all TLBs
2336 #endif /* defined(TARGET_PPC64) */
2339 cpu_abort(env
, "Unknown MMU model\n");
2343 ppc_tlb_invalidate_all(env
);
2347 /*****************************************************************************/
2348 /* Special registers manipulation */
2349 #if defined(TARGET_PPC64)
2350 void ppc_store_asr (CPUPPCState
*env
, target_ulong value
)
2352 if (env
->asr
!= value
) {
2359 void ppc_store_sdr1 (CPUPPCState
*env
, target_ulong value
)
2361 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2362 if (env
->spr
[SPR_SDR1
] != value
) {
2363 env
->spr
[SPR_SDR1
] = value
;
2364 #if defined(TARGET_PPC64)
2365 if (env
->mmu_model
& POWERPC_MMU_64
) {
2366 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2368 if (htabsize
> 28) {
2369 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2370 " stored in SDR1\n", htabsize
);
2373 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2374 env
->htab_base
= value
& SDR_64_HTABORG
;
2376 #endif /* defined(TARGET_PPC64) */
2378 /* FIXME: Should check for valid HTABMASK values */
2379 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2380 env
->htab_base
= value
& SDR_32_HTABORG
;
2386 #if defined(TARGET_PPC64)
2387 target_ulong
ppc_load_sr (CPUPPCState
*env
, int slb_nr
)
2394 void ppc_store_sr (CPUPPCState
*env
, int srnum
, target_ulong value
)
2396 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2397 srnum
, value
, env
->sr
[srnum
]);
2398 #if defined(TARGET_PPC64)
2399 if (env
->mmu_model
& POWERPC_MMU_64
) {
2400 uint64_t rb
= 0, rs
= 0;
2403 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2404 /* Set the valid bit */
2407 rb
|= (uint32_t)srnum
;
2410 rs
|= (value
& 0xfffffff) << 12;
2412 rs
|= ((value
>> 27) & 0xf) << 8;
2414 ppc_store_slb(env
, rb
, rs
);
2417 if (env
->sr
[srnum
] != value
) {
2418 env
->sr
[srnum
] = value
;
2419 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2420 flusing the whole TLB. */
2421 #if !defined(FLUSH_ALL_TLBS) && 0
2423 target_ulong page
, end
;
2424 /* Invalidate 256 MB of virtual memory */
2425 page
= (16 << 20) * srnum
;
2426 end
= page
+ (16 << 20);
2427 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
)
2428 tlb_flush_page(env
, page
);
2435 #endif /* !defined (CONFIG_USER_ONLY) */
2437 /* GDBstub can read and write MSR... */
2438 void ppc_store_msr (CPUPPCState
*env
, target_ulong value
)
2440 hreg_store_msr(env
, value
, 0);
2443 /*****************************************************************************/
2444 /* Exception processing */
2445 #if defined (CONFIG_USER_ONLY)
2446 void do_interrupt (CPUState
*env
)
2448 env
->exception_index
= POWERPC_EXCP_NONE
;
2449 env
->error_code
= 0;
2452 void ppc_hw_interrupt (CPUState
*env
)
2454 env
->exception_index
= POWERPC_EXCP_NONE
;
2455 env
->error_code
= 0;
2457 #else /* defined (CONFIG_USER_ONLY) */
2458 static inline void dump_syscall(CPUState
*env
)
2460 qemu_log_mask(CPU_LOG_INT
, "syscall r0=%016" PRIx64
" r3=%016" PRIx64
2461 " r4=%016" PRIx64
" r5=%016" PRIx64
" r6=%016" PRIx64
2462 " nip=" TARGET_FMT_lx
"\n",
2463 ppc_dump_gpr(env
, 0), ppc_dump_gpr(env
, 3),
2464 ppc_dump_gpr(env
, 4), ppc_dump_gpr(env
, 5),
2465 ppc_dump_gpr(env
, 6), env
->nip
);
2468 /* Note that this function should be greatly optimized
2469 * when called with a constant excp, from ppc_hw_interrupt
2471 static inline void powerpc_excp(CPUState
*env
, int excp_model
, int excp
)
2473 target_ulong msr
, new_msr
, vector
;
2474 int srr0
, srr1
, asrr0
, asrr1
;
2475 int lpes0
, lpes1
, lev
;
2478 /* XXX: find a suitable condition to enable the hypervisor mode */
2479 lpes0
= (env
->spr
[SPR_LPCR
] >> 1) & 1;
2480 lpes1
= (env
->spr
[SPR_LPCR
] >> 2) & 1;
2482 /* Those values ensure we won't enter the hypervisor mode */
2487 qemu_log_mask(CPU_LOG_INT
, "Raise exception at " TARGET_FMT_lx
2488 " => %08x (%02x)\n", env
->nip
, excp
, env
->error_code
);
2490 /* new srr1 value excluding must-be-zero bits */
2491 msr
= env
->msr
& ~0x783f0000ULL
;
2493 /* new interrupt handler msr */
2494 new_msr
= env
->msr
& ((target_ulong
)1 << MSR_ME
);
2496 /* target registers */
2503 case POWERPC_EXCP_NONE
:
2504 /* Should never happen */
2506 case POWERPC_EXCP_CRITICAL
: /* Critical input */
2507 switch (excp_model
) {
2508 case POWERPC_EXCP_40x
:
2509 srr0
= SPR_40x_SRR2
;
2510 srr1
= SPR_40x_SRR3
;
2512 case POWERPC_EXCP_BOOKE
:
2513 srr0
= SPR_BOOKE_CSRR0
;
2514 srr1
= SPR_BOOKE_CSRR1
;
2516 case POWERPC_EXCP_G2
:
2522 case POWERPC_EXCP_MCHECK
: /* Machine check exception */
2524 /* Machine check exception is not enabled.
2525 * Enter checkstop state.
2527 if (qemu_log_enabled()) {
2528 qemu_log("Machine check while not allowed. "
2529 "Entering checkstop state\n");
2531 fprintf(stderr
, "Machine check while not allowed. "
2532 "Entering checkstop state\n");
2535 env
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;
2538 /* XXX: find a suitable condition to enable the hypervisor mode */
2539 new_msr
|= (target_ulong
)MSR_HVB
;
2542 /* machine check exceptions don't have ME set */
2543 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
2545 /* XXX: should also have something loaded in DAR / DSISR */
2546 switch (excp_model
) {
2547 case POWERPC_EXCP_40x
:
2548 srr0
= SPR_40x_SRR2
;
2549 srr1
= SPR_40x_SRR3
;
2551 case POWERPC_EXCP_BOOKE
:
2552 srr0
= SPR_BOOKE_MCSRR0
;
2553 srr1
= SPR_BOOKE_MCSRR1
;
2554 asrr0
= SPR_BOOKE_CSRR0
;
2555 asrr1
= SPR_BOOKE_CSRR1
;
2561 case POWERPC_EXCP_DSI
: /* Data storage exception */
2562 LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx
" DAR=" TARGET_FMT_lx
2563 "\n", env
->spr
[SPR_DSISR
], env
->spr
[SPR_DAR
]);
2565 new_msr
|= (target_ulong
)MSR_HVB
;
2567 case POWERPC_EXCP_ISI
: /* Instruction storage exception */
2568 LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx
", nip=" TARGET_FMT_lx
2569 "\n", msr
, env
->nip
);
2571 new_msr
|= (target_ulong
)MSR_HVB
;
2572 msr
|= env
->error_code
;
2574 case POWERPC_EXCP_EXTERNAL
: /* External input */
2576 new_msr
|= (target_ulong
)MSR_HVB
;
2578 case POWERPC_EXCP_ALIGN
: /* Alignment exception */
2580 new_msr
|= (target_ulong
)MSR_HVB
;
2581 /* XXX: this is false */
2582 /* Get rS/rD and rA from faulting opcode */
2583 env
->spr
[SPR_DSISR
] |= (ldl_code((env
->nip
- 4)) & 0x03FF0000) >> 16;
2585 case POWERPC_EXCP_PROGRAM
: /* Program exception */
2586 switch (env
->error_code
& ~0xF) {
2587 case POWERPC_EXCP_FP
:
2588 if ((msr_fe0
== 0 && msr_fe1
== 0) || msr_fp
== 0) {
2589 LOG_EXCP("Ignore floating point exception\n");
2590 env
->exception_index
= POWERPC_EXCP_NONE
;
2591 env
->error_code
= 0;
2595 new_msr
|= (target_ulong
)MSR_HVB
;
2597 if (msr_fe0
== msr_fe1
)
2601 case POWERPC_EXCP_INVAL
:
2602 LOG_EXCP("Invalid instruction at " TARGET_FMT_lx
"\n", env
->nip
);
2604 new_msr
|= (target_ulong
)MSR_HVB
;
2606 env
->spr
[SPR_BOOKE_ESR
] = ESR_PIL
;
2608 case POWERPC_EXCP_PRIV
:
2610 new_msr
|= (target_ulong
)MSR_HVB
;
2612 env
->spr
[SPR_BOOKE_ESR
] = ESR_PPR
;
2614 case POWERPC_EXCP_TRAP
:
2616 new_msr
|= (target_ulong
)MSR_HVB
;
2618 env
->spr
[SPR_BOOKE_ESR
] = ESR_PTR
;
2621 /* Should never occur */
2622 cpu_abort(env
, "Invalid program exception %d. Aborting\n",
2627 case POWERPC_EXCP_FPU
: /* Floating-point unavailable exception */
2629 new_msr
|= (target_ulong
)MSR_HVB
;
2631 case POWERPC_EXCP_SYSCALL
: /* System call exception */
2633 lev
= env
->error_code
;
2634 if ((lev
== 1) && cpu_ppc_hypercall
) {
2635 cpu_ppc_hypercall(env
);
2638 if (lev
== 1 || (lpes0
== 0 && lpes1
== 0))
2639 new_msr
|= (target_ulong
)MSR_HVB
;
2641 case POWERPC_EXCP_APU
: /* Auxiliary processor unavailable */
2643 case POWERPC_EXCP_DECR
: /* Decrementer exception */
2645 new_msr
|= (target_ulong
)MSR_HVB
;
2647 case POWERPC_EXCP_FIT
: /* Fixed-interval timer interrupt */
2649 LOG_EXCP("FIT exception\n");
2651 case POWERPC_EXCP_WDT
: /* Watchdog timer interrupt */
2652 LOG_EXCP("WDT exception\n");
2653 switch (excp_model
) {
2654 case POWERPC_EXCP_BOOKE
:
2655 srr0
= SPR_BOOKE_CSRR0
;
2656 srr1
= SPR_BOOKE_CSRR1
;
2662 case POWERPC_EXCP_DTLB
: /* Data TLB error */
2664 case POWERPC_EXCP_ITLB
: /* Instruction TLB error */
2666 case POWERPC_EXCP_DEBUG
: /* Debug interrupt */
2667 switch (excp_model
) {
2668 case POWERPC_EXCP_BOOKE
:
2669 srr0
= SPR_BOOKE_DSRR0
;
2670 srr1
= SPR_BOOKE_DSRR1
;
2671 asrr0
= SPR_BOOKE_CSRR0
;
2672 asrr1
= SPR_BOOKE_CSRR1
;
2678 cpu_abort(env
, "Debug exception is not implemented yet !\n");
2680 case POWERPC_EXCP_SPEU
: /* SPE/embedded floating-point unavailable */
2681 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
2683 case POWERPC_EXCP_EFPDI
: /* Embedded floating-point data interrupt */
2685 cpu_abort(env
, "Embedded floating point data exception "
2686 "is not implemented yet !\n");
2687 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
2689 case POWERPC_EXCP_EFPRI
: /* Embedded floating-point round interrupt */
2691 cpu_abort(env
, "Embedded floating point round exception "
2692 "is not implemented yet !\n");
2693 env
->spr
[SPR_BOOKE_ESR
] = ESR_SPV
;
2695 case POWERPC_EXCP_EPERFM
: /* Embedded performance monitor interrupt */
2698 "Performance counter exception is not implemented yet !\n");
2700 case POWERPC_EXCP_DOORI
: /* Embedded doorbell interrupt */
2702 case POWERPC_EXCP_DOORCI
: /* Embedded doorbell critical interrupt */
2703 srr0
= SPR_BOOKE_CSRR0
;
2704 srr1
= SPR_BOOKE_CSRR1
;
2706 case POWERPC_EXCP_RESET
: /* System reset exception */
2708 /* indicate that we resumed from power save mode */
2711 new_msr
&= ~((target_ulong
)1 << MSR_ME
);
2715 /* XXX: find a suitable condition to enable the hypervisor mode */
2716 new_msr
|= (target_ulong
)MSR_HVB
;
2719 case POWERPC_EXCP_DSEG
: /* Data segment exception */
2721 new_msr
|= (target_ulong
)MSR_HVB
;
2723 case POWERPC_EXCP_ISEG
: /* Instruction segment exception */
2725 new_msr
|= (target_ulong
)MSR_HVB
;
2727 case POWERPC_EXCP_HDECR
: /* Hypervisor decrementer exception */
2730 new_msr
|= (target_ulong
)MSR_HVB
;
2731 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2733 case POWERPC_EXCP_TRACE
: /* Trace exception */
2735 new_msr
|= (target_ulong
)MSR_HVB
;
2737 case POWERPC_EXCP_HDSI
: /* Hypervisor data storage exception */
2740 new_msr
|= (target_ulong
)MSR_HVB
;
2741 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2743 case POWERPC_EXCP_HISI
: /* Hypervisor instruction storage exception */
2746 new_msr
|= (target_ulong
)MSR_HVB
;
2747 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2749 case POWERPC_EXCP_HDSEG
: /* Hypervisor data segment exception */
2752 new_msr
|= (target_ulong
)MSR_HVB
;
2753 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2755 case POWERPC_EXCP_HISEG
: /* Hypervisor instruction segment exception */
2758 new_msr
|= (target_ulong
)MSR_HVB
;
2759 new_msr
|= env
->msr
& ((target_ulong
)1 << MSR_RI
);
2761 case POWERPC_EXCP_VPU
: /* Vector unavailable exception */
2763 new_msr
|= (target_ulong
)MSR_HVB
;
2765 case POWERPC_EXCP_PIT
: /* Programmable interval timer interrupt */
2766 LOG_EXCP("PIT exception\n");
2768 case POWERPC_EXCP_IO
: /* IO error exception */
2770 cpu_abort(env
, "601 IO error exception is not implemented yet !\n");
2772 case POWERPC_EXCP_RUNM
: /* Run mode exception */
2774 cpu_abort(env
, "601 run mode exception is not implemented yet !\n");
2776 case POWERPC_EXCP_EMUL
: /* Emulation trap exception */
2778 cpu_abort(env
, "602 emulation trap exception "
2779 "is not implemented yet !\n");
2781 case POWERPC_EXCP_IFTLB
: /* Instruction fetch TLB error */
2782 if (lpes1
== 0) /* XXX: check this */
2783 new_msr
|= (target_ulong
)MSR_HVB
;
2784 switch (excp_model
) {
2785 case POWERPC_EXCP_602
:
2786 case POWERPC_EXCP_603
:
2787 case POWERPC_EXCP_603E
:
2788 case POWERPC_EXCP_G2
:
2790 case POWERPC_EXCP_7x5
:
2792 case POWERPC_EXCP_74xx
:
2795 cpu_abort(env
, "Invalid instruction TLB miss exception\n");
2799 case POWERPC_EXCP_DLTLB
: /* Data load TLB miss */
2800 if (lpes1
== 0) /* XXX: check this */
2801 new_msr
|= (target_ulong
)MSR_HVB
;
2802 switch (excp_model
) {
2803 case POWERPC_EXCP_602
:
2804 case POWERPC_EXCP_603
:
2805 case POWERPC_EXCP_603E
:
2806 case POWERPC_EXCP_G2
:
2808 case POWERPC_EXCP_7x5
:
2810 case POWERPC_EXCP_74xx
:
2813 cpu_abort(env
, "Invalid data load TLB miss exception\n");
2817 case POWERPC_EXCP_DSTLB
: /* Data store TLB miss */
2818 if (lpes1
== 0) /* XXX: check this */
2819 new_msr
|= (target_ulong
)MSR_HVB
;
2820 switch (excp_model
) {
2821 case POWERPC_EXCP_602
:
2822 case POWERPC_EXCP_603
:
2823 case POWERPC_EXCP_603E
:
2824 case POWERPC_EXCP_G2
:
2826 /* Swap temporary saved registers with GPRs */
2827 if (!(new_msr
& ((target_ulong
)1 << MSR_TGPR
))) {
2828 new_msr
|= (target_ulong
)1 << MSR_TGPR
;
2829 hreg_swap_gpr_tgpr(env
);
2832 case POWERPC_EXCP_7x5
:
2834 #if defined (DEBUG_SOFTWARE_TLB)
2835 if (qemu_log_enabled()) {
2837 target_ulong
*miss
, *cmp
;
2839 if (excp
== POWERPC_EXCP_IFTLB
) {
2842 miss
= &env
->spr
[SPR_IMISS
];
2843 cmp
= &env
->spr
[SPR_ICMP
];
2845 if (excp
== POWERPC_EXCP_DLTLB
)
2850 miss
= &env
->spr
[SPR_DMISS
];
2851 cmp
= &env
->spr
[SPR_DCMP
];
2853 qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2854 TARGET_FMT_lx
" H1 " TARGET_FMT_lx
" H2 "
2855 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2856 env
->spr
[SPR_HASH1
], env
->spr
[SPR_HASH2
],
2860 msr
|= env
->crf
[0] << 28;
2861 msr
|= env
->error_code
; /* key, D/I, S/L bits */
2862 /* Set way using a LRU mechanism */
2863 msr
|= ((env
->last_way
+ 1) & (env
->nb_ways
- 1)) << 17;
2865 case POWERPC_EXCP_74xx
:
2867 #if defined (DEBUG_SOFTWARE_TLB)
2868 if (qemu_log_enabled()) {
2870 target_ulong
*miss
, *cmp
;
2872 if (excp
== POWERPC_EXCP_IFTLB
) {
2875 miss
= &env
->spr
[SPR_TLBMISS
];
2876 cmp
= &env
->spr
[SPR_PTEHI
];
2878 if (excp
== POWERPC_EXCP_DLTLB
)
2883 miss
= &env
->spr
[SPR_TLBMISS
];
2884 cmp
= &env
->spr
[SPR_PTEHI
];
2886 qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx
" %cC "
2887 TARGET_FMT_lx
" %08x\n", es
, en
, *miss
, en
, *cmp
,
2891 msr
|= env
->error_code
; /* key bit */
2894 cpu_abort(env
, "Invalid data store TLB miss exception\n");
2898 case POWERPC_EXCP_FPA
: /* Floating-point assist exception */
2900 cpu_abort(env
, "Floating point assist exception "
2901 "is not implemented yet !\n");
2903 case POWERPC_EXCP_DABR
: /* Data address breakpoint */
2905 cpu_abort(env
, "DABR exception is not implemented yet !\n");
2907 case POWERPC_EXCP_IABR
: /* Instruction address breakpoint */
2909 cpu_abort(env
, "IABR exception is not implemented yet !\n");
2911 case POWERPC_EXCP_SMI
: /* System management interrupt */
2913 cpu_abort(env
, "SMI exception is not implemented yet !\n");
2915 case POWERPC_EXCP_THERM
: /* Thermal interrupt */
2917 cpu_abort(env
, "Thermal management exception "
2918 "is not implemented yet !\n");
2920 case POWERPC_EXCP_PERFM
: /* Embedded performance monitor interrupt */
2922 new_msr
|= (target_ulong
)MSR_HVB
;
2925 "Performance counter exception is not implemented yet !\n");
2927 case POWERPC_EXCP_VPUA
: /* Vector assist exception */
2929 cpu_abort(env
, "VPU assist exception is not implemented yet !\n");
2931 case POWERPC_EXCP_SOFTP
: /* Soft patch exception */
2934 "970 soft-patch exception is not implemented yet !\n");
2936 case POWERPC_EXCP_MAINT
: /* Maintenance exception */
2939 "970 maintenance exception is not implemented yet !\n");
2941 case POWERPC_EXCP_MEXTBR
: /* Maskable external breakpoint */
2943 cpu_abort(env
, "Maskable external exception "
2944 "is not implemented yet !\n");
2946 case POWERPC_EXCP_NMEXTBR
: /* Non maskable external breakpoint */
2948 cpu_abort(env
, "Non maskable external exception "
2949 "is not implemented yet !\n");
2953 cpu_abort(env
, "Invalid PowerPC exception %d. Aborting\n", excp
);
2956 /* save current instruction location */
2957 env
->spr
[srr0
] = env
->nip
- 4;
2960 /* save next instruction location */
2961 env
->spr
[srr0
] = env
->nip
;
2965 env
->spr
[srr1
] = msr
;
2966 /* If any alternate SRR register are defined, duplicate saved values */
2968 env
->spr
[asrr0
] = env
->spr
[srr0
];
2970 env
->spr
[asrr1
] = env
->spr
[srr1
];
2971 /* If we disactivated any translation, flush TLBs */
2972 if (new_msr
& ((1 << MSR_IR
) | (1 << MSR_DR
)))
2976 new_msr
|= (target_ulong
)1 << MSR_LE
;
2979 /* Jump to handler */
2980 vector
= env
->excp_vectors
[excp
];
2981 if (vector
== (target_ulong
)-1ULL) {
2982 cpu_abort(env
, "Raised an exception without defined vector %d\n",
2985 vector
|= env
->excp_prefix
;
2986 #if defined(TARGET_PPC64)
2987 if (excp_model
== POWERPC_EXCP_BOOKE
) {
2989 vector
= (uint32_t)vector
;
2991 new_msr
|= (target_ulong
)1 << MSR_CM
;
2994 if (!msr_isf
&& !(env
->mmu_model
& POWERPC_MMU_64
)) {
2995 vector
= (uint32_t)vector
;
2997 new_msr
|= (target_ulong
)1 << MSR_SF
;
3001 /* XXX: we don't use hreg_store_msr here as already have treated
3002 * any special case that could occur. Just store MSR and update hflags
3004 env
->msr
= new_msr
& env
->msr_mask
;
3005 hreg_compute_hflags(env
);
3007 /* Reset exception state */
3008 env
->exception_index
= POWERPC_EXCP_NONE
;
3009 env
->error_code
= 0;
3011 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
3012 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
3013 /* XXX: The BookE changes address space when switching modes,
3014 we should probably implement that as different MMU indexes,
3015 but for the moment we do it the slow way and flush all. */
3020 void do_interrupt (CPUState
*env
)
3022 powerpc_excp(env
, env
->excp_model
, env
->exception_index
);
3025 void ppc_hw_interrupt (CPUPPCState
*env
)
3030 qemu_log_mask(CPU_LOG_INT
, "%s: %p pending %08x req %08x me %d ee %d\n",
3031 __func__
, env
, env
->pending_interrupts
,
3032 env
->interrupt_request
, (int)msr_me
, (int)msr_ee
);
3034 /* External reset */
3035 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_RESET
)) {
3036 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_RESET
);
3037 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_RESET
);
3040 /* Machine check exception */
3041 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_MCK
)) {
3042 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_MCK
);
3043 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_MCHECK
);
3047 /* External debug exception */
3048 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DEBUG
)) {
3049 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DEBUG
);
3050 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DEBUG
);
3055 /* XXX: find a suitable condition to enable the hypervisor mode */
3056 hdice
= env
->spr
[SPR_LPCR
] & 1;
3060 if ((msr_ee
!= 0 || msr_hv
== 0 || msr_pr
!= 0) && hdice
!= 0) {
3061 /* Hypervisor decrementer exception */
3062 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_HDECR
)) {
3063 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_HDECR
);
3064 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_HDECR
);
3069 /* External critical interrupt */
3070 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CEXT
)) {
3071 /* Taking a critical external interrupt does not clear the external
3072 * critical interrupt status
3075 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CEXT
);
3077 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_CRITICAL
);
3082 /* Watchdog timer on embedded PowerPC */
3083 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_WDT
)) {
3084 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_WDT
);
3085 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_WDT
);
3088 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_CDOORBELL
)) {
3089 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_CDOORBELL
);
3090 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORCI
);
3093 /* Fixed interval timer on embedded PowerPC */
3094 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_FIT
)) {
3095 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_FIT
);
3096 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_FIT
);
3099 /* Programmable interval timer on embedded PowerPC */
3100 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PIT
)) {
3101 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PIT
);
3102 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PIT
);
3105 /* Decrementer exception */
3106 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DECR
)) {
3107 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DECR
);
3108 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DECR
);
3111 /* External interrupt */
3112 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_EXT
)) {
3113 /* Taking an external interrupt does not clear the external
3117 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_EXT
);
3119 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_EXTERNAL
);
3122 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_DOORBELL
)) {
3123 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_DOORBELL
);
3124 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_DOORI
);
3127 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_PERFM
)) {
3128 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_PERFM
);
3129 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_PERFM
);
3132 /* Thermal interrupt */
3133 if (env
->pending_interrupts
& (1 << PPC_INTERRUPT_THERM
)) {
3134 env
->pending_interrupts
&= ~(1 << PPC_INTERRUPT_THERM
);
3135 powerpc_excp(env
, env
->excp_model
, POWERPC_EXCP_THERM
);
3140 #endif /* !CONFIG_USER_ONLY */
3142 void cpu_dump_rfi (target_ulong RA
, target_ulong msr
)
3144 qemu_log("Return from exception at " TARGET_FMT_lx
" with flags "
3145 TARGET_FMT_lx
"\n", RA
, msr
);
3148 void cpu_reset(CPUPPCState
*env
)
3152 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
3153 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
3154 log_cpu_state(env
, 0);
3157 msr
= (target_ulong
)0;
3159 /* XXX: find a suitable condition to enable the hypervisor mode */
3160 msr
|= (target_ulong
)MSR_HVB
;
3162 msr
|= (target_ulong
)0 << MSR_AP
; /* TO BE CHECKED */
3163 msr
|= (target_ulong
)0 << MSR_SA
; /* TO BE CHECKED */
3164 msr
|= (target_ulong
)1 << MSR_EP
;
3165 #if defined (DO_SINGLE_STEP) && 0
3166 /* Single step trace mode */
3167 msr
|= (target_ulong
)1 << MSR_SE
;
3168 msr
|= (target_ulong
)1 << MSR_BE
;
3170 #if defined(CONFIG_USER_ONLY)
3171 msr
|= (target_ulong
)1 << MSR_FP
; /* Allow floating point usage */
3172 msr
|= (target_ulong
)1 << MSR_VR
; /* Allow altivec usage */
3173 msr
|= (target_ulong
)1 << MSR_SPE
; /* Allow SPE usage */
3174 msr
|= (target_ulong
)1 << MSR_PR
;
3176 env
->excp_prefix
= env
->hreset_excp_prefix
;
3177 env
->nip
= env
->hreset_vector
| env
->excp_prefix
;
3178 if (env
->mmu_model
!= POWERPC_MMU_REAL
)
3179 ppc_tlb_invalidate_all(env
);
3181 env
->msr
= msr
& env
->msr_mask
;
3182 #if defined(TARGET_PPC64)
3183 if (env
->mmu_model
& POWERPC_MMU_64
)
3184 env
->msr
|= (1ULL << MSR_SF
);
3186 hreg_compute_hflags(env
);
3187 env
->reserve_addr
= (target_ulong
)-1ULL;
3188 /* Be sure no exception or interrupt is pending */
3189 env
->pending_interrupts
= 0;
3190 env
->exception_index
= POWERPC_EXCP_NONE
;
3191 env
->error_code
= 0;
3192 /* Flush all TLBs */
3196 CPUPPCState
*cpu_ppc_init (const char *cpu_model
)
3199 const ppc_def_t
*def
;
3201 def
= cpu_ppc_find_by_name(cpu_model
);
3205 env
= g_malloc0(sizeof(CPUPPCState
));
3207 if (tcg_enabled()) {
3208 ppc_translate_init();
3210 /* Adjust cpu index for SMT */
3211 #if !defined(CONFIG_USER_ONLY)
3212 if (kvm_enabled()) {
3213 int smt
= kvmppc_smt_threads();
3215 env
->cpu_index
= (env
->cpu_index
/ smp_threads
)*smt
3216 + (env
->cpu_index
% smp_threads
);
3218 #endif /* !CONFIG_USER_ONLY */
3219 env
->cpu_model_str
= cpu_model
;
3220 cpu_ppc_register_internal(env
, def
);
3222 qemu_init_vcpu(env
);
3227 void cpu_ppc_close (CPUPPCState
*env
)
3229 /* Should also remove all opcode tables... */