2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "sysemu/kvm.h"
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
103 #define PTE_PTEM_MASK 0x7FFFFFBF
104 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
105 #if defined(TARGET_PPC64)
106 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
107 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
110 static inline int pp_check(int key
, int pp
, int nx
)
114 /* Compute access rights */
115 /* When pp is 3/7, the result is undefined. Set it to noaccess */
122 access
|= PAGE_WRITE
;
140 access
= PAGE_READ
| PAGE_WRITE
;
151 static inline int check_prot(int prot
, int rw
, int access_type
)
155 if (access_type
== ACCESS_CODE
) {
156 if (prot
& PAGE_EXEC
) {
162 if (prot
& PAGE_WRITE
) {
168 if (prot
& PAGE_READ
) {
178 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
179 target_ulong pte1
, int h
, int rw
, int type
)
181 target_ulong ptem
, mmask
;
182 int access
, ret
, pteh
, ptev
, pp
;
185 /* Check validity and table match */
186 #if defined(TARGET_PPC64)
188 ptev
= pte64_is_valid(pte0
);
189 pteh
= (pte0
>> 1) & 1;
193 ptev
= pte_is_valid(pte0
);
194 pteh
= (pte0
>> 6) & 1;
196 if (ptev
&& h
== pteh
) {
197 /* Check vsid & api */
198 #if defined(TARGET_PPC64)
200 ptem
= pte0
& PTE64_PTEM_MASK
;
201 mmask
= PTE64_CHECK_MASK
;
202 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
203 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
204 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
208 ptem
= pte0
& PTE_PTEM_MASK
;
209 mmask
= PTE_CHECK_MASK
;
210 pp
= pte1
& 0x00000003;
212 if (ptem
== ctx
->ptem
) {
213 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
214 /* all matches should have equal RPN, WIMG & PP */
215 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
216 qemu_log("Bad RPN/WIMG/PP\n");
220 /* Compute access rights */
221 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
222 /* Keep the matching PTE informations */
225 ret
= check_prot(ctx
->prot
, rw
, type
);
228 LOG_MMU("PTE access granted !\n");
230 /* Access right violation */
231 LOG_MMU("PTE access rejected\n");
239 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
240 target_ulong pte1
, int h
, int rw
, int type
)
242 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
245 #if defined(TARGET_PPC64)
246 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
247 target_ulong pte1
, int h
, int rw
, int type
)
249 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
253 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
258 /* Update page flags */
259 if (!(*pte1p
& 0x00000100)) {
260 /* Update accessed flag */
261 *pte1p
|= 0x00000100;
264 if (!(*pte1p
& 0x00000080)) {
265 if (rw
== 1 && ret
== 0) {
266 /* Update changed flag */
267 *pte1p
|= 0x00000080;
270 /* Force page fault for first write access */
271 ctx
->prot
&= ~PAGE_WRITE
;
278 /* Software driven TLB helpers */
279 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
280 int way
, int is_code
)
284 /* Select TLB num in a way from address */
285 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
287 nr
+= env
->tlb_per_way
* way
;
288 /* 6xx have separate TLBs for instructions and data */
289 if (is_code
&& env
->id_tlbs
== 1) {
296 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
301 /* LOG_SWTLB("Invalidate all TLBs\n"); */
302 /* Invalidate all defined software TLB */
304 if (env
->id_tlbs
== 1) {
307 for (nr
= 0; nr
< max
; nr
++) {
308 tlb
= &env
->tlb
.tlb6
[nr
];
309 pte_invalidate(&tlb
->pte0
);
314 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
316 int is_code
, int match_epn
)
318 #if !defined(FLUSH_ALL_TLBS)
322 /* Invalidate ITLB + DTLB, all ways */
323 for (way
= 0; way
< env
->nb_ways
; way
++) {
324 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
325 tlb
= &env
->tlb
.tlb6
[nr
];
326 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
327 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
329 pte_invalidate(&tlb
->pte0
);
330 tlb_flush_page(env
, tlb
->EPN
);
334 /* XXX: PowerPC specification say this is valid as well */
335 ppc6xx_tlb_invalidate_all(env
);
339 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
340 target_ulong eaddr
, int is_code
)
342 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
345 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
346 int is_code
, target_ulong pte0
, target_ulong pte1
)
351 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
352 tlb
= &env
->tlb
.tlb6
[nr
];
353 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
354 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
355 /* Invalidate any pending reference in QEMU for this virtual address */
356 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
360 /* Store last way for LRU mechanism */
364 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
365 target_ulong eaddr
, int rw
, int access_type
)
372 ret
= -1; /* No TLB found */
373 for (way
= 0; way
< env
->nb_ways
; way
++) {
374 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
375 access_type
== ACCESS_CODE
? 1 : 0);
376 tlb
= &env
->tlb
.tlb6
[nr
];
377 /* This test "emulates" the PTE index match for hardware TLBs */
378 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
379 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
380 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
381 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
382 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
385 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
386 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
387 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
388 tlb
->EPN
, eaddr
, tlb
->pte1
,
389 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
390 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
392 /* TLB inconsistency */
395 /* Access violation */
405 /* XXX: we should go on looping to check all TLBs consistency
406 * but we can speed-up the whole thing as the
407 * result would be undefined if TLBs are not consistent.
416 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
417 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
418 /* Update page flags */
419 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
425 /* Perform BAT hit & translation */
426 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
427 int *validp
, int *protp
, target_ulong
*BATu
,
433 bl
= (*BATu
& 0x00001FFC) << 15;
436 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
437 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
439 pp
= *BATl
& 0x00000003;
441 prot
= PAGE_READ
| PAGE_EXEC
;
452 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
453 int *validp
, int *protp
,
454 target_ulong
*BATu
, target_ulong
*BATl
)
457 int key
, pp
, valid
, prot
;
459 bl
= (*BATl
& 0x0000003F) << 17;
460 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
461 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
463 valid
= (*BATl
>> 6) & 1;
465 pp
= *BATu
& 0x00000003;
467 key
= (*BATu
>> 3) & 1;
469 key
= (*BATu
>> 2) & 1;
471 prot
= pp_check(key
, pp
, 0);
478 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
479 target_ulong
virtual, int rw
, int type
)
481 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
482 target_ulong BEPIl
, BEPIu
, bl
;
486 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
487 type
== ACCESS_CODE
? 'I' : 'D', virtual);
490 BATlt
= env
->IBAT
[1];
491 BATut
= env
->IBAT
[0];
494 BATlt
= env
->DBAT
[1];
495 BATut
= env
->DBAT
[0];
498 for (i
= 0; i
< env
->nb_BATs
; i
++) {
501 BEPIu
= *BATu
& 0xF0000000;
502 BEPIl
= *BATu
& 0x0FFE0000;
503 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
504 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
506 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
508 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
509 " BATl " TARGET_FMT_lx
"\n", __func__
,
510 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
511 if ((virtual & 0xF0000000) == BEPIu
&&
512 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
515 /* Get physical address */
516 ctx
->raddr
= (*BATl
& 0xF0000000) |
517 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
518 (virtual & 0x0001F000);
519 /* Compute access rights */
521 ret
= check_prot(ctx
->prot
, rw
, type
);
523 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
524 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
525 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
532 #if defined(DEBUG_BATS)
533 if (qemu_log_enabled()) {
534 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
535 for (i
= 0; i
< 4; i
++) {
538 BEPIu
= *BATu
& 0xF0000000;
539 BEPIl
= *BATu
& 0x0FFE0000;
540 bl
= (*BATu
& 0x00001FFC) << 15;
541 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
542 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
543 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
544 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
545 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
554 static inline hwaddr
get_pteg_offset(CPUPPCState
*env
,
558 return (hash
* pte_size
* 8) & env
->htab_mask
;
561 /* PTE table lookup */
562 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
563 int rw
, int type
, int target_page_bits
)
566 target_ulong pte0
, pte1
;
570 ret
= -1; /* No entry found */
571 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
572 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
573 for (i
= 0; i
< 8; i
++) {
574 #if defined(TARGET_PPC64)
576 if (env
->external_htab
) {
577 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
578 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
580 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
581 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
584 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
585 LOG_MMU("Load pte from %016" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
586 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
587 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
588 (int)((pte0
>> 1) & 1), ctx
->ptem
);
592 if (env
->external_htab
) {
593 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
594 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
596 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
597 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
599 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
600 LOG_MMU("Load pte from %08" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
601 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
602 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
603 (int)((pte0
>> 6) & 1), ctx
->ptem
);
607 /* PTE inconsistency */
610 /* Access violation */
620 /* XXX: we should go on looping to check all PTEs consistency
621 * but if we can speed-up the whole thing as the
622 * result would be undefined if PTEs are not consistent.
631 LOG_MMU("found PTE at addr %08" HWADDR_PRIx
" prot=%01x ret=%d\n",
632 ctx
->raddr
, ctx
->prot
, ret
);
633 /* Update page flags */
635 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
636 #if defined(TARGET_PPC64)
638 if (env
->external_htab
) {
639 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
642 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
643 (good
* 16) + 8, pte1
);
648 if (env
->external_htab
) {
649 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
652 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
653 (good
* 8) + 4, pte1
);
659 /* We have a TLB that saves 4K pages, so let's
660 * split a huge page to 4k chunks */
661 if (target_page_bits
!= TARGET_PAGE_BITS
) {
662 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
668 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
669 int type
, int target_page_bits
)
671 #if defined(TARGET_PPC64)
672 if (env
->mmu_model
& POWERPC_MMU_64
) {
673 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
677 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
680 #if defined(TARGET_PPC64)
681 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
683 uint64_t esid_256M
, esid_1T
;
686 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
688 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
689 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
691 for (n
= 0; n
< env
->slb_nr
; n
++) {
692 ppc_slb_t
*slb
= &env
->slb
[n
];
694 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
695 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
696 /* We check for 1T matches on all MMUs here - if the MMU
697 * doesn't have 1T segment support, we will have prevented 1T
698 * entries from being inserted in the slbmte code. */
699 if (((slb
->esid
== esid_256M
) &&
700 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
701 || ((slb
->esid
== esid_1T
) &&
702 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
710 /*****************************************************************************/
713 void helper_slbia(CPUPPCState
*env
)
715 int n
, do_invalidate
;
718 /* XXX: Warning: slbia never invalidates the first segment */
719 for (n
= 1; n
< env
->slb_nr
; n
++) {
720 ppc_slb_t
*slb
= &env
->slb
[n
];
722 if (slb
->esid
& SLB_ESID_V
) {
723 slb
->esid
&= ~SLB_ESID_V
;
724 /* XXX: given the fact that segment size is 256 MB or 1TB,
725 * and we still don't have a tlb_flush_mask(env, n, mask)
726 * in QEMU, we just invalidate all TLBs
736 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
740 slb
= slb_lookup(env
, addr
);
745 if (slb
->esid
& SLB_ESID_V
) {
746 slb
->esid
&= ~SLB_ESID_V
;
748 /* XXX: given the fact that segment size is 256 MB or 1TB,
749 * and we still don't have a tlb_flush_mask(env, n, mask)
750 * in QEMU, we just invalidate all TLBs
756 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
758 int slot
= rb
& 0xfff;
759 ppc_slb_t
*slb
= &env
->slb
[slot
];
761 if (rb
& (0x1000 - env
->slb_nr
)) {
762 return -1; /* Reserved bits set or slot too high */
764 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
765 return -1; /* Bad segment size */
767 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
768 return -1; /* 1T segment on MMU that doesn't support it */
771 /* Mask out the slot number as we store the entry */
772 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
775 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
776 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
777 slb
->esid
, slb
->vsid
);
782 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
785 int slot
= rb
& 0xfff;
786 ppc_slb_t
*slb
= &env
->slb
[slot
];
788 if (slot
>= env
->slb_nr
) {
796 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
799 int slot
= rb
& 0xfff;
800 ppc_slb_t
*slb
= &env
->slb
[slot
];
802 if (slot
>= env
->slb_nr
) {
809 #endif /* defined(TARGET_PPC64) */
811 /* Perform segment based translation */
812 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
813 target_ulong eaddr
, int rw
, int type
)
817 int ds
, pr
, target_page_bits
;
822 #if defined(TARGET_PPC64)
823 if (env
->mmu_model
& POWERPC_MMU_64
) {
825 target_ulong pageaddr
;
828 LOG_MMU("Check SLBs\n");
829 slb
= slb_lookup(env
, eaddr
);
834 if (slb
->vsid
& SLB_VSID_B
) {
835 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
838 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
842 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
843 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
844 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
845 : (slb
->vsid
& SLB_VSID_KS
));
847 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
849 pageaddr
= eaddr
& ((1ULL << segment_bits
)
850 - (1ULL << target_page_bits
));
851 if (slb
->vsid
& SLB_VSID_B
) {
852 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
854 hash
= vsid
^ (pageaddr
>> target_page_bits
);
856 /* Only 5 bits of the page index are used in the AVPN */
857 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
858 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
860 #endif /* defined(TARGET_PPC64) */
862 target_ulong sr
, pgidx
;
864 sr
= env
->sr
[eaddr
>> 28];
865 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
866 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
867 ds
= sr
& 0x80000000 ? 1 : 0;
868 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
869 vsid
= sr
& 0x00FFFFFF;
870 target_page_bits
= TARGET_PAGE_BITS
;
871 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
872 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
873 " ir=%d dr=%d pr=%d %d t=%d\n",
874 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
875 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
876 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
878 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
880 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
881 ctx
->key
, ds
, ctx
->nx
, vsid
);
884 /* Check if instruction fetch is allowed, if needed */
885 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
886 /* Page address translation */
887 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
888 " hash " TARGET_FMT_plx
"\n",
889 env
->htab_base
, env
->htab_mask
, hash
);
891 ctx
->hash
[1] = ~hash
;
893 /* Initialize real address with an invalid value */
894 ctx
->raddr
= (hwaddr
)-1ULL;
895 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
896 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
897 /* Software TLB search */
898 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
900 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
901 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
902 " hash=" TARGET_FMT_plx
"\n",
903 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
905 /* Primary table lookup */
906 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
908 /* Secondary table lookup */
909 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
910 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
911 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
912 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
913 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
920 #if defined(DUMP_PAGE_TABLES)
921 if (qemu_log_enabled()) {
923 uint32_t a0
, a1
, a2
, a3
;
925 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
926 "\n", sdr
, mask
+ 0x80);
927 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
929 a0
= ldl_phys(curaddr
);
930 a1
= ldl_phys(curaddr
+ 4);
931 a2
= ldl_phys(curaddr
+ 8);
932 a3
= ldl_phys(curaddr
+ 12);
933 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
934 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
935 curaddr
, a0
, a1
, a2
, a3
);
941 LOG_MMU("No access allowed\n");
947 LOG_MMU("direct store...\n");
948 /* Direct-store segment : absolutely *BUGGY* for now */
950 /* Direct-store implies a 32-bit MMU.
951 * Check the Segment Register's bus unit ID (BUID).
953 sr
= env
->sr
[eaddr
>> 28];
954 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
955 /* Memory-forced I/O controller interface access */
956 /* If T=1 and BUID=x'07F', the 601 performs a memory access
957 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
959 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
960 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
966 /* Integer load/store : only access allowed */
969 /* No code fetch is allowed in direct-store areas */
972 /* Floating point load/store */
975 /* lwarx, ldarx or srwcx. */
978 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
979 /* Should make the instruction do no-op.
980 * As it already do no-op, it's quite easy :-)
988 qemu_log("ERROR: instruction should not need "
989 "address translation\n");
992 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1003 /* Generic TLB check function for embedded PowerPC implementations */
1004 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1006 target_ulong address
, uint32_t pid
, int ext
,
1011 /* Check valid flag */
1012 if (!(tlb
->prot
& PAGE_VALID
)) {
1015 mask
= ~(tlb
->size
- 1);
1016 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1017 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1018 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1020 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1023 /* Check effective address */
1024 if ((address
& mask
) != tlb
->EPN
) {
1027 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1029 /* Extend the physical address to 36 bits */
1030 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
1036 /* Generic TLB search function for PowerPC embedded implementations */
1037 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1044 /* Default return value is no match */
1046 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1047 tlb
= &env
->tlb
.tlbe
[i
];
1048 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1057 /* Helpers specific to PowerPC 40x implementations */
1058 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1063 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1064 tlb
= &env
->tlb
.tlbe
[i
];
1065 tlb
->prot
&= ~PAGE_VALID
;
1070 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1071 target_ulong eaddr
, uint32_t pid
)
1073 #if !defined(FLUSH_ALL_TLBS)
1076 target_ulong page
, end
;
1079 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1080 tlb
= &env
->tlb
.tlbe
[i
];
1081 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1082 end
= tlb
->EPN
+ tlb
->size
;
1083 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1084 tlb_flush_page(env
, page
);
1086 tlb
->prot
&= ~PAGE_VALID
;
1091 ppc4xx_tlb_invalidate_all(env
);
1095 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1096 target_ulong address
, int rw
,
1101 int i
, ret
, zsel
, zpr
, pr
;
1104 raddr
= (hwaddr
)-1ULL;
1106 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1107 tlb
= &env
->tlb
.tlbe
[i
];
1108 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1109 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1112 zsel
= (tlb
->attr
>> 4) & 0xF;
1113 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1114 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1115 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1116 /* Check execute enable bit */
1124 /* All accesses granted */
1125 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1130 /* Raise Zone protection fault. */
1131 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1139 /* Check from TLB entry */
1140 ctx
->prot
= tlb
->prot
;
1141 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1143 env
->spr
[SPR_40x_ESR
] = 0;
1149 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1150 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1155 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1156 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1161 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1163 /* XXX: TO BE FIXED */
1164 if (val
!= 0x00000000) {
1165 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1167 env
->spr
[SPR_405_SLER
] = val
;
1170 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1171 hwaddr
*raddr
, int *prot
,
1172 target_ulong address
, int rw
,
1173 int access_type
, int i
)
1177 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1178 env
->spr
[SPR_BOOKE_PID
],
1179 !env
->nb_pids
, i
) >= 0) {
1183 if (env
->spr
[SPR_BOOKE_PID1
] &&
1184 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1185 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1189 if (env
->spr
[SPR_BOOKE_PID2
] &&
1190 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1191 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1195 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1201 prot2
= tlb
->prot
& 0xF;
1203 prot2
= (tlb
->prot
>> 4) & 0xF;
1206 /* Check the address space */
1207 if (access_type
== ACCESS_CODE
) {
1208 if (msr_ir
!= (tlb
->attr
& 1)) {
1209 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1214 if (prot2
& PAGE_EXEC
) {
1215 LOG_SWTLB("%s: good TLB!\n", __func__
);
1219 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1222 if (msr_dr
!= (tlb
->attr
& 1)) {
1223 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1228 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1229 LOG_SWTLB("%s: found TLB!\n", __func__
);
1233 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1240 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1241 target_ulong address
, int rw
,
1249 raddr
= (hwaddr
)-1ULL;
1250 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1251 tlb
= &env
->tlb
.tlbe
[i
];
1252 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1261 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1262 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1265 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1266 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1272 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
1273 const int check_iprot
)
1277 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1279 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1280 if (flags
& (1 << i
)) {
1281 tlb_size
= booke206_tlb_size(env
, i
);
1282 for (j
= 0; j
< tlb_size
; j
++) {
1283 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1284 tlb
[j
].mas1
&= ~MAS1_VALID
;
1288 tlb
+= booke206_tlb_size(env
, i
);
1294 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
1299 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1301 return 1024ULL << tlbm_size
;
1304 /* TLB check function for MAS based SoftTLBs */
1305 static int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1307 target_ulong address
, uint32_t pid
)
1312 /* Check valid flag */
1313 if (!(tlb
->mas1
& MAS1_VALID
)) {
1317 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1318 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1319 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1320 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1324 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1325 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1329 /* Check effective address */
1330 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1335 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1341 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1342 hwaddr
*raddr
, int *prot
,
1343 target_ulong address
, int rw
,
1349 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1350 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1354 if (env
->spr
[SPR_BOOKE_PID1
] &&
1355 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1356 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1360 if (env
->spr
[SPR_BOOKE_PID2
] &&
1361 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1362 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1366 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1372 if (tlb
->mas7_3
& MAS3_UR
) {
1375 if (tlb
->mas7_3
& MAS3_UW
) {
1376 prot2
|= PAGE_WRITE
;
1378 if (tlb
->mas7_3
& MAS3_UX
) {
1382 if (tlb
->mas7_3
& MAS3_SR
) {
1385 if (tlb
->mas7_3
& MAS3_SW
) {
1386 prot2
|= PAGE_WRITE
;
1388 if (tlb
->mas7_3
& MAS3_SX
) {
1393 /* Check the address space and permissions */
1394 if (access_type
== ACCESS_CODE
) {
1395 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1396 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1401 if (prot2
& PAGE_EXEC
) {
1402 LOG_SWTLB("%s: good TLB!\n", __func__
);
1406 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1409 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1410 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1415 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1416 LOG_SWTLB("%s: found TLB!\n", __func__
);
1420 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1427 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1428 target_ulong address
, int rw
,
1436 raddr
= (hwaddr
)-1ULL;
1438 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1439 int ways
= booke206_tlb_ways(env
, i
);
1441 for (j
= 0; j
< ways
; j
++) {
1442 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1446 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1458 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1459 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1462 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1463 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1469 static const char *book3e_tsize_to_str
[32] = {
1470 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1471 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1472 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1476 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1479 ppcemb_tlb_t
*entry
;
1482 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1483 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1487 cpu_fprintf(f
, "\nTLB:\n");
1488 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1491 entry
= &env
->tlb
.tlbe
[0];
1492 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1495 uint64_t size
= (uint64_t)entry
->size
;
1498 /* Check valid flag */
1499 if (!(entry
->prot
& PAGE_VALID
)) {
1503 mask
= ~(entry
->size
- 1);
1504 ea
= entry
->EPN
& mask
;
1505 pa
= entry
->RPN
& mask
;
1506 /* Extend the physical address to 36 bits */
1507 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1510 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1512 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1514 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1515 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1516 entry
->prot
, entry
->attr
);
1521 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1522 CPUPPCState
*env
, int tlbn
, int offset
,
1525 ppcmas_tlb_t
*entry
;
1528 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1529 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1530 " URWX WIMGE U0123\n");
1532 entry
= &env
->tlb
.tlbm
[offset
];
1533 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1534 hwaddr ea
, pa
, size
;
1537 if (!(entry
->mas1
& MAS1_VALID
)) {
1541 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1542 size
= 1024ULL << tsize
;
1543 ea
= entry
->mas2
& ~(size
- 1);
1544 pa
= entry
->mas7_3
& ~(size
- 1);
1546 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1547 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1548 (uint64_t)ea
, (uint64_t)pa
,
1549 book3e_tsize_to_str
[tsize
],
1550 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1551 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1552 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1553 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1554 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1555 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1556 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1557 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1558 entry
->mas2
& MAS2_W
? 'W' : '-',
1559 entry
->mas2
& MAS2_I
? 'I' : '-',
1560 entry
->mas2
& MAS2_M
? 'M' : '-',
1561 entry
->mas2
& MAS2_G
? 'G' : '-',
1562 entry
->mas2
& MAS2_E
? 'E' : '-',
1563 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1564 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1565 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1566 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1570 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1576 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1577 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1581 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1582 int size
= booke206_tlb_size(env
, i
);
1588 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1593 #if defined(TARGET_PPC64)
1594 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1598 uint64_t slbe
, slbv
;
1600 cpu_synchronize_state(env
);
1602 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1603 for (i
= 0; i
< env
->slb_nr
; i
++) {
1604 slbe
= env
->slb
[i
].esid
;
1605 slbv
= env
->slb
[i
].vsid
;
1606 if (slbe
== 0 && slbv
== 0) {
1609 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1615 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1617 switch (env
->mmu_model
) {
1618 case POWERPC_MMU_BOOKE
:
1619 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1621 case POWERPC_MMU_BOOKE206
:
1622 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1624 #if defined(TARGET_PPC64)
1625 case POWERPC_MMU_64B
:
1626 case POWERPC_MMU_2_06
:
1627 case POWERPC_MMU_2_06d
:
1628 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1632 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1636 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1637 target_ulong eaddr
, int rw
)
1642 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1644 switch (env
->mmu_model
) {
1645 case POWERPC_MMU_32B
:
1646 case POWERPC_MMU_601
:
1647 case POWERPC_MMU_SOFT_6xx
:
1648 case POWERPC_MMU_SOFT_74xx
:
1649 case POWERPC_MMU_SOFT_4xx
:
1650 case POWERPC_MMU_REAL
:
1651 case POWERPC_MMU_BOOKE
:
1652 ctx
->prot
|= PAGE_WRITE
;
1654 #if defined(TARGET_PPC64)
1655 case POWERPC_MMU_64B
:
1656 case POWERPC_MMU_2_06
:
1657 case POWERPC_MMU_2_06d
:
1658 /* Real address are 60 bits long */
1659 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1660 ctx
->prot
|= PAGE_WRITE
;
1663 case POWERPC_MMU_SOFT_4xx_Z
:
1664 if (unlikely(msr_pe
!= 0)) {
1665 /* 403 family add some particular protections,
1666 * using PBL/PBU registers for accesses with no translation.
1669 /* Check PLB validity */
1670 (env
->pb
[0] < env
->pb
[1] &&
1671 /* and address in plb area */
1672 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1673 (env
->pb
[2] < env
->pb
[3] &&
1674 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1675 if (in_plb
^ msr_px
) {
1676 /* Access in protected area */
1678 /* Access is not allowed */
1682 /* Read-write access is allowed */
1683 ctx
->prot
|= PAGE_WRITE
;
1687 case POWERPC_MMU_MPC8xx
:
1689 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1691 case POWERPC_MMU_BOOKE206
:
1692 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1695 cpu_abort(env
, "Unknown or invalid MMU model\n");
1702 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1703 target_ulong eaddr
, int rw
, int access_type
)
1708 qemu_log("%s\n", __func__
);
1710 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1711 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1712 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1713 /* The BookE MMU always performs address translation. The
1714 IS and DS bits only affect the address space. */
1715 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1717 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1718 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1721 /* No address translation. */
1722 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1726 switch (env
->mmu_model
) {
1727 case POWERPC_MMU_32B
:
1728 case POWERPC_MMU_601
:
1729 case POWERPC_MMU_SOFT_6xx
:
1730 case POWERPC_MMU_SOFT_74xx
:
1731 /* Try to find a BAT */
1732 if (env
->nb_BATs
!= 0) {
1733 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1735 #if defined(TARGET_PPC64)
1736 case POWERPC_MMU_64B
:
1737 case POWERPC_MMU_2_06
:
1738 case POWERPC_MMU_2_06d
:
1741 /* We didn't match any BAT entry or don't have BATs */
1742 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1745 case POWERPC_MMU_SOFT_4xx
:
1746 case POWERPC_MMU_SOFT_4xx_Z
:
1747 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1750 case POWERPC_MMU_BOOKE
:
1751 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1754 case POWERPC_MMU_BOOKE206
:
1755 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1758 case POWERPC_MMU_MPC8xx
:
1760 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1762 case POWERPC_MMU_REAL
:
1763 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1766 cpu_abort(env
, "Unknown or invalid MMU model\n");
1771 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1772 __func__
, eaddr
, ret
, ctx
->raddr
);
1778 hwaddr
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1782 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1786 return ctx
.raddr
& TARGET_PAGE_MASK
;
1789 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1792 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1793 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1794 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1795 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1796 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1797 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1800 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1801 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1802 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1805 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1806 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1808 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1809 case MAS4_TIDSELD_PID0
:
1810 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1812 case MAS4_TIDSELD_PID1
:
1813 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1815 case MAS4_TIDSELD_PID2
:
1816 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1820 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1822 /* next victim logic */
1823 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1825 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1826 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1829 /* Perform address translation */
1830 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1840 access_type
= ACCESS_CODE
;
1843 access_type
= env
->access_type
;
1845 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1847 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1848 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1849 mmu_idx
, TARGET_PAGE_SIZE
);
1851 } else if (ret
< 0) {
1853 if (access_type
== ACCESS_CODE
) {
1856 /* No matches in page tables or TLB */
1857 switch (env
->mmu_model
) {
1858 case POWERPC_MMU_SOFT_6xx
:
1859 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1860 env
->error_code
= 1 << 18;
1861 env
->spr
[SPR_IMISS
] = address
;
1862 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1864 case POWERPC_MMU_SOFT_74xx
:
1865 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1867 case POWERPC_MMU_SOFT_4xx
:
1868 case POWERPC_MMU_SOFT_4xx_Z
:
1869 env
->exception_index
= POWERPC_EXCP_ITLB
;
1870 env
->error_code
= 0;
1871 env
->spr
[SPR_40x_DEAR
] = address
;
1872 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1874 case POWERPC_MMU_32B
:
1875 case POWERPC_MMU_601
:
1876 #if defined(TARGET_PPC64)
1877 case POWERPC_MMU_64B
:
1878 case POWERPC_MMU_2_06
:
1879 case POWERPC_MMU_2_06d
:
1881 env
->exception_index
= POWERPC_EXCP_ISI
;
1882 env
->error_code
= 0x40000000;
1884 case POWERPC_MMU_BOOKE206
:
1885 booke206_update_mas_tlb_miss(env
, address
, rw
);
1887 case POWERPC_MMU_BOOKE
:
1888 env
->exception_index
= POWERPC_EXCP_ITLB
;
1889 env
->error_code
= 0;
1890 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1892 case POWERPC_MMU_MPC8xx
:
1894 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1896 case POWERPC_MMU_REAL
:
1897 cpu_abort(env
, "PowerPC in real mode should never raise "
1898 "any MMU exceptions\n");
1901 cpu_abort(env
, "Unknown or invalid MMU model\n");
1906 /* Access rights violation */
1907 env
->exception_index
= POWERPC_EXCP_ISI
;
1908 env
->error_code
= 0x08000000;
1911 /* No execute protection violation */
1912 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1913 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1914 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1916 env
->exception_index
= POWERPC_EXCP_ISI
;
1917 env
->error_code
= 0x10000000;
1920 /* Direct store exception */
1921 /* No code fetch is allowed in direct-store areas */
1922 env
->exception_index
= POWERPC_EXCP_ISI
;
1923 env
->error_code
= 0x10000000;
1925 #if defined(TARGET_PPC64)
1927 /* No match in segment table */
1928 env
->exception_index
= POWERPC_EXCP_ISEG
;
1929 env
->error_code
= 0;
1936 /* No matches in page tables or TLB */
1937 switch (env
->mmu_model
) {
1938 case POWERPC_MMU_SOFT_6xx
:
1940 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1941 env
->error_code
= 1 << 16;
1943 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1944 env
->error_code
= 0;
1946 env
->spr
[SPR_DMISS
] = address
;
1947 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1949 env
->error_code
|= ctx
.key
<< 19;
1950 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1951 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1952 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1953 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1955 case POWERPC_MMU_SOFT_74xx
:
1957 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1959 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1962 /* Implement LRU algorithm */
1963 env
->error_code
= ctx
.key
<< 19;
1964 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1965 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1966 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1968 case POWERPC_MMU_SOFT_4xx
:
1969 case POWERPC_MMU_SOFT_4xx_Z
:
1970 env
->exception_index
= POWERPC_EXCP_DTLB
;
1971 env
->error_code
= 0;
1972 env
->spr
[SPR_40x_DEAR
] = address
;
1974 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1976 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1979 case POWERPC_MMU_32B
:
1980 case POWERPC_MMU_601
:
1981 #if defined(TARGET_PPC64)
1982 case POWERPC_MMU_64B
:
1983 case POWERPC_MMU_2_06
:
1984 case POWERPC_MMU_2_06d
:
1986 env
->exception_index
= POWERPC_EXCP_DSI
;
1987 env
->error_code
= 0;
1988 env
->spr
[SPR_DAR
] = address
;
1990 env
->spr
[SPR_DSISR
] = 0x42000000;
1992 env
->spr
[SPR_DSISR
] = 0x40000000;
1995 case POWERPC_MMU_MPC8xx
:
1997 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1999 case POWERPC_MMU_BOOKE206
:
2000 booke206_update_mas_tlb_miss(env
, address
, rw
);
2002 case POWERPC_MMU_BOOKE
:
2003 env
->exception_index
= POWERPC_EXCP_DTLB
;
2004 env
->error_code
= 0;
2005 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2006 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2008 case POWERPC_MMU_REAL
:
2009 cpu_abort(env
, "PowerPC in real mode should never raise "
2010 "any MMU exceptions\n");
2013 cpu_abort(env
, "Unknown or invalid MMU model\n");
2018 /* Access rights violation */
2019 env
->exception_index
= POWERPC_EXCP_DSI
;
2020 env
->error_code
= 0;
2021 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2022 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2023 env
->spr
[SPR_40x_DEAR
] = address
;
2025 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2027 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2028 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2029 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2030 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2032 env
->spr
[SPR_DAR
] = address
;
2034 env
->spr
[SPR_DSISR
] = 0x0A000000;
2036 env
->spr
[SPR_DSISR
] = 0x08000000;
2041 /* Direct store exception */
2042 switch (access_type
) {
2044 /* Floating point load/store */
2045 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2046 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2047 env
->spr
[SPR_DAR
] = address
;
2050 /* lwarx, ldarx or stwcx. */
2051 env
->exception_index
= POWERPC_EXCP_DSI
;
2052 env
->error_code
= 0;
2053 env
->spr
[SPR_DAR
] = address
;
2055 env
->spr
[SPR_DSISR
] = 0x06000000;
2057 env
->spr
[SPR_DSISR
] = 0x04000000;
2061 /* eciwx or ecowx */
2062 env
->exception_index
= POWERPC_EXCP_DSI
;
2063 env
->error_code
= 0;
2064 env
->spr
[SPR_DAR
] = address
;
2066 env
->spr
[SPR_DSISR
] = 0x06100000;
2068 env
->spr
[SPR_DSISR
] = 0x04100000;
2072 printf("DSI: invalid exception (%d)\n", ret
);
2073 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2075 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2076 env
->spr
[SPR_DAR
] = address
;
2080 #if defined(TARGET_PPC64)
2082 /* No match in segment table */
2083 env
->exception_index
= POWERPC_EXCP_DSEG
;
2084 env
->error_code
= 0;
2085 env
->spr
[SPR_DAR
] = address
;
2091 printf("%s: set exception to %d %02x\n", __func__
,
2092 env
->exception
, env
->error_code
);
2100 /*****************************************************************************/
2101 /* BATs management */
2102 #if !defined(FLUSH_ALL_TLBS)
2103 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2106 target_ulong base
, end
, page
;
2108 base
= BATu
& ~0x0001FFFF;
2109 end
= base
+ mask
+ 0x00020000;
2110 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2111 TARGET_FMT_lx
")\n", base
, end
, mask
);
2112 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2113 tlb_flush_page(env
, page
);
2115 LOG_BATS("Flush done\n");
2119 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2122 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2123 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2126 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2130 dump_store_bat(env
, 'I', 0, nr
, value
);
2131 if (env
->IBAT
[0][nr
] != value
) {
2132 mask
= (value
<< 15) & 0x0FFE0000UL
;
2133 #if !defined(FLUSH_ALL_TLBS)
2134 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2136 /* When storing valid upper BAT, mask BEPI and BRPN
2137 * and invalidate all TLBs covered by this BAT
2139 mask
= (value
<< 15) & 0x0FFE0000UL
;
2140 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2141 (value
& ~0x0001FFFFUL
& ~mask
);
2142 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2143 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2144 #if !defined(FLUSH_ALL_TLBS)
2145 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2152 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2154 dump_store_bat(env
, 'I', 1, nr
, value
);
2155 env
->IBAT
[1][nr
] = value
;
2158 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2162 dump_store_bat(env
, 'D', 0, nr
, value
);
2163 if (env
->DBAT
[0][nr
] != value
) {
2164 /* When storing valid upper BAT, mask BEPI and BRPN
2165 * and invalidate all TLBs covered by this BAT
2167 mask
= (value
<< 15) & 0x0FFE0000UL
;
2168 #if !defined(FLUSH_ALL_TLBS)
2169 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2171 mask
= (value
<< 15) & 0x0FFE0000UL
;
2172 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2173 (value
& ~0x0001FFFFUL
& ~mask
);
2174 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2175 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2176 #if !defined(FLUSH_ALL_TLBS)
2177 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2184 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2186 dump_store_bat(env
, 'D', 1, nr
, value
);
2187 env
->DBAT
[1][nr
] = value
;
2190 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2193 #if defined(FLUSH_ALL_TLBS)
2197 dump_store_bat(env
, 'I', 0, nr
, value
);
2198 if (env
->IBAT
[0][nr
] != value
) {
2199 #if defined(FLUSH_ALL_TLBS)
2202 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2203 if (env
->IBAT
[1][nr
] & 0x40) {
2204 /* Invalidate BAT only if it is valid */
2205 #if !defined(FLUSH_ALL_TLBS)
2206 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2211 /* When storing valid upper BAT, mask BEPI and BRPN
2212 * and invalidate all TLBs covered by this BAT
2214 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2215 (value
& ~0x0001FFFFUL
& ~mask
);
2216 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2217 if (env
->IBAT
[1][nr
] & 0x40) {
2218 #if !defined(FLUSH_ALL_TLBS)
2219 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2224 #if defined(FLUSH_ALL_TLBS)
2232 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2234 #if !defined(FLUSH_ALL_TLBS)
2240 dump_store_bat(env
, 'I', 1, nr
, value
);
2241 if (env
->IBAT
[1][nr
] != value
) {
2242 #if defined(FLUSH_ALL_TLBS)
2245 if (env
->IBAT
[1][nr
] & 0x40) {
2246 #if !defined(FLUSH_ALL_TLBS)
2247 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2248 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2254 #if !defined(FLUSH_ALL_TLBS)
2255 mask
= (value
<< 17) & 0x0FFE0000UL
;
2256 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2261 env
->IBAT
[1][nr
] = value
;
2262 env
->DBAT
[1][nr
] = value
;
2263 #if defined(FLUSH_ALL_TLBS)
2271 /*****************************************************************************/
2272 /* TLB management */
2273 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2275 switch (env
->mmu_model
) {
2276 case POWERPC_MMU_SOFT_6xx
:
2277 case POWERPC_MMU_SOFT_74xx
:
2278 ppc6xx_tlb_invalidate_all(env
);
2280 case POWERPC_MMU_SOFT_4xx
:
2281 case POWERPC_MMU_SOFT_4xx_Z
:
2282 ppc4xx_tlb_invalidate_all(env
);
2284 case POWERPC_MMU_REAL
:
2285 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2287 case POWERPC_MMU_MPC8xx
:
2289 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2291 case POWERPC_MMU_BOOKE
:
2294 case POWERPC_MMU_BOOKE206
:
2295 booke206_flush_tlb(env
, -1, 0);
2297 case POWERPC_MMU_32B
:
2298 case POWERPC_MMU_601
:
2299 #if defined(TARGET_PPC64)
2300 case POWERPC_MMU_64B
:
2301 case POWERPC_MMU_2_06
:
2302 case POWERPC_MMU_2_06d
:
2303 #endif /* defined(TARGET_PPC64) */
2308 cpu_abort(env
, "Unknown MMU model\n");
2313 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2315 #if !defined(FLUSH_ALL_TLBS)
2316 addr
&= TARGET_PAGE_MASK
;
2317 switch (env
->mmu_model
) {
2318 case POWERPC_MMU_SOFT_6xx
:
2319 case POWERPC_MMU_SOFT_74xx
:
2320 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2321 if (env
->id_tlbs
== 1) {
2322 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2325 case POWERPC_MMU_SOFT_4xx
:
2326 case POWERPC_MMU_SOFT_4xx_Z
:
2327 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2329 case POWERPC_MMU_REAL
:
2330 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2332 case POWERPC_MMU_MPC8xx
:
2334 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2336 case POWERPC_MMU_BOOKE
:
2338 cpu_abort(env
, "BookE MMU model is not implemented\n");
2340 case POWERPC_MMU_BOOKE206
:
2342 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2344 case POWERPC_MMU_32B
:
2345 case POWERPC_MMU_601
:
2346 /* tlbie invalidate TLBs for all segments */
2347 addr
&= ~((target_ulong
)-1ULL << 28);
2348 /* XXX: this case should be optimized,
2349 * giving a mask to tlb_flush_page
2351 tlb_flush_page(env
, addr
| (0x0 << 28));
2352 tlb_flush_page(env
, addr
| (0x1 << 28));
2353 tlb_flush_page(env
, addr
| (0x2 << 28));
2354 tlb_flush_page(env
, addr
| (0x3 << 28));
2355 tlb_flush_page(env
, addr
| (0x4 << 28));
2356 tlb_flush_page(env
, addr
| (0x5 << 28));
2357 tlb_flush_page(env
, addr
| (0x6 << 28));
2358 tlb_flush_page(env
, addr
| (0x7 << 28));
2359 tlb_flush_page(env
, addr
| (0x8 << 28));
2360 tlb_flush_page(env
, addr
| (0x9 << 28));
2361 tlb_flush_page(env
, addr
| (0xA << 28));
2362 tlb_flush_page(env
, addr
| (0xB << 28));
2363 tlb_flush_page(env
, addr
| (0xC << 28));
2364 tlb_flush_page(env
, addr
| (0xD << 28));
2365 tlb_flush_page(env
, addr
| (0xE << 28));
2366 tlb_flush_page(env
, addr
| (0xF << 28));
2368 #if defined(TARGET_PPC64)
2369 case POWERPC_MMU_64B
:
2370 case POWERPC_MMU_2_06
:
2371 case POWERPC_MMU_2_06d
:
2372 /* tlbie invalidate TLBs for all segments */
2373 /* XXX: given the fact that there are too many segments to invalidate,
2374 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2375 * we just invalidate all TLBs
2379 #endif /* defined(TARGET_PPC64) */
2382 cpu_abort(env
, "Unknown MMU model\n");
2386 ppc_tlb_invalidate_all(env
);
2390 /*****************************************************************************/
2391 /* Special registers manipulation */
2392 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2394 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2395 if (env
->spr
[SPR_SDR1
] != value
) {
2396 env
->spr
[SPR_SDR1
] = value
;
2397 #if defined(TARGET_PPC64)
2398 if (env
->mmu_model
& POWERPC_MMU_64
) {
2399 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2401 if (htabsize
> 28) {
2402 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2403 " stored in SDR1\n", htabsize
);
2406 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2407 env
->htab_base
= value
& SDR_64_HTABORG
;
2409 #endif /* defined(TARGET_PPC64) */
2411 /* FIXME: Should check for valid HTABMASK values */
2412 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2413 env
->htab_base
= value
& SDR_32_HTABORG
;
2419 /* Segment registers load and store */
2420 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2422 #if defined(TARGET_PPC64)
2423 if (env
->mmu_model
& POWERPC_MMU_64
) {
2428 return env
->sr
[sr_num
];
2431 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2433 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2434 (int)srnum
, value
, env
->sr
[srnum
]);
2435 #if defined(TARGET_PPC64)
2436 if (env
->mmu_model
& POWERPC_MMU_64
) {
2437 uint64_t rb
= 0, rs
= 0;
2440 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2441 /* Set the valid bit */
2444 rb
|= (uint32_t)srnum
;
2447 rs
|= (value
& 0xfffffff) << 12;
2449 rs
|= ((value
>> 27) & 0xf) << 8;
2451 ppc_store_slb(env
, rb
, rs
);
2454 if (env
->sr
[srnum
] != value
) {
2455 env
->sr
[srnum
] = value
;
2456 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2457 flusing the whole TLB. */
2458 #if !defined(FLUSH_ALL_TLBS) && 0
2460 target_ulong page
, end
;
2461 /* Invalidate 256 MB of virtual memory */
2462 page
= (16 << 20) * srnum
;
2463 end
= page
+ (16 << 20);
2464 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2465 tlb_flush_page(env
, page
);
2473 #endif /* !defined(CONFIG_USER_ONLY) */
2475 #if !defined(CONFIG_USER_ONLY)
2476 /* SLB management */
2477 #if defined(TARGET_PPC64)
2478 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2480 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2481 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2482 POWERPC_EXCP_INVAL
);
2486 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2488 target_ulong rt
= 0;
2490 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2491 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2492 POWERPC_EXCP_INVAL
);
2497 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2499 target_ulong rt
= 0;
2501 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2502 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2503 POWERPC_EXCP_INVAL
);
2507 #endif /* defined(TARGET_PPC64) */
2509 /* TLB management */
2510 void helper_tlbia(CPUPPCState
*env
)
2512 ppc_tlb_invalidate_all(env
);
2515 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2517 ppc_tlb_invalidate_one(env
, addr
);
2520 /* Software driven TLBs management */
2521 /* PowerPC 602/603 software TLB load instructions helpers */
2522 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2524 target_ulong RPN
, CMP
, EPN
;
2527 RPN
= env
->spr
[SPR_RPA
];
2529 CMP
= env
->spr
[SPR_ICMP
];
2530 EPN
= env
->spr
[SPR_IMISS
];
2532 CMP
= env
->spr
[SPR_DCMP
];
2533 EPN
= env
->spr
[SPR_DMISS
];
2535 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2536 (void)EPN
; /* avoid a compiler warning */
2537 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2538 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2540 /* Store this TLB */
2541 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2542 way
, is_code
, CMP
, RPN
);
2545 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2547 do_6xx_tlb(env
, EPN
, 0);
2550 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2552 do_6xx_tlb(env
, EPN
, 1);
2555 /* PowerPC 74xx software TLB load instructions helpers */
2556 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2558 target_ulong RPN
, CMP
, EPN
;
2561 RPN
= env
->spr
[SPR_PTELO
];
2562 CMP
= env
->spr
[SPR_PTEHI
];
2563 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2564 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2565 (void)EPN
; /* avoid a compiler warning */
2566 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2567 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2569 /* Store this TLB */
2570 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2571 way
, is_code
, CMP
, RPN
);
2574 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2576 do_74xx_tlb(env
, EPN
, 0);
2579 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2581 do_74xx_tlb(env
, EPN
, 1);
2584 /*****************************************************************************/
2585 /* PowerPC 601 specific instructions (POWER bridge) */
2587 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2591 target_ulong ret
= 0;
2593 /* We don't have to generate many instances of this instruction,
2594 * as rac is supervisor only.
2596 /* XXX: FIX THIS: Pretend we have no BAT */
2597 nb_BATs
= env
->nb_BATs
;
2599 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2602 env
->nb_BATs
= nb_BATs
;
2606 static inline target_ulong
booke_tlb_to_page_size(int size
)
2608 return 1024 << (2 * size
);
2611 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2615 switch (page_size
) {
2649 #if defined(TARGET_PPC64)
2650 case 0x000100000000ULL
:
2653 case 0x000400000000ULL
:
2656 case 0x001000000000ULL
:
2659 case 0x004000000000ULL
:
2662 case 0x010000000000ULL
:
2674 /* Helpers for 4xx TLB management */
2675 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2677 #define PPC4XX_TLBHI_V 0x00000040
2678 #define PPC4XX_TLBHI_E 0x00000020
2679 #define PPC4XX_TLBHI_SIZE_MIN 0
2680 #define PPC4XX_TLBHI_SIZE_MAX 7
2681 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2682 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2683 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2685 #define PPC4XX_TLBLO_EX 0x00000200
2686 #define PPC4XX_TLBLO_WR 0x00000100
2687 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2688 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2690 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2696 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2697 tlb
= &env
->tlb
.tlbe
[entry
];
2699 if (tlb
->prot
& PAGE_VALID
) {
2700 ret
|= PPC4XX_TLBHI_V
;
2702 size
= booke_page_size_to_tlb(tlb
->size
);
2703 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2704 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2706 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2707 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2711 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2716 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2717 tlb
= &env
->tlb
.tlbe
[entry
];
2719 if (tlb
->prot
& PAGE_EXEC
) {
2720 ret
|= PPC4XX_TLBLO_EX
;
2722 if (tlb
->prot
& PAGE_WRITE
) {
2723 ret
|= PPC4XX_TLBLO_WR
;
2728 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2732 target_ulong page
, end
;
2734 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2736 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2737 tlb
= &env
->tlb
.tlbe
[entry
];
2738 /* Invalidate previous TLB (if it's valid) */
2739 if (tlb
->prot
& PAGE_VALID
) {
2740 end
= tlb
->EPN
+ tlb
->size
;
2741 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2742 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2743 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2744 tlb_flush_page(env
, page
);
2747 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2748 & PPC4XX_TLBHI_SIZE_MASK
);
2749 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2750 * If this ever occurs, one should use the ppcemb target instead
2751 * of the ppc or ppc64 one
2753 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2754 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2755 "are not supported (%d)\n",
2756 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2758 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2759 if (val
& PPC4XX_TLBHI_V
) {
2760 tlb
->prot
|= PAGE_VALID
;
2761 if (val
& PPC4XX_TLBHI_E
) {
2762 /* XXX: TO BE FIXED */
2764 "Little-endian TLB entries are not supported by now\n");
2767 tlb
->prot
&= ~PAGE_VALID
;
2769 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2770 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2771 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2772 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2773 tlb
->prot
& PAGE_READ
? 'r' : '-',
2774 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2775 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2776 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2777 /* Invalidate new TLB (if valid) */
2778 if (tlb
->prot
& PAGE_VALID
) {
2779 end
= tlb
->EPN
+ tlb
->size
;
2780 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2781 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2782 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2783 tlb_flush_page(env
, page
);
2788 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2793 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2795 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2796 tlb
= &env
->tlb
.tlbe
[entry
];
2797 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2798 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2799 tlb
->prot
= PAGE_READ
;
2800 if (val
& PPC4XX_TLBLO_EX
) {
2801 tlb
->prot
|= PAGE_EXEC
;
2803 if (val
& PPC4XX_TLBLO_WR
) {
2804 tlb
->prot
|= PAGE_WRITE
;
2806 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2807 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2808 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2809 tlb
->prot
& PAGE_READ
? 'r' : '-',
2810 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2811 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2812 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2815 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2817 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2820 /* PowerPC 440 TLB management */
2821 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2825 target_ulong EPN
, RPN
, size
;
2828 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2829 __func__
, word
, (int)entry
, value
);
2832 tlb
= &env
->tlb
.tlbe
[entry
];
2835 /* Just here to please gcc */
2837 EPN
= value
& 0xFFFFFC00;
2838 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2842 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2843 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2848 tlb
->attr
|= (value
>> 8) & 1;
2849 if (value
& 0x200) {
2850 tlb
->prot
|= PAGE_VALID
;
2852 if (tlb
->prot
& PAGE_VALID
) {
2853 tlb
->prot
&= ~PAGE_VALID
;
2857 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2858 if (do_flush_tlbs
) {
2863 RPN
= value
& 0xFFFFFC0F;
2864 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2870 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2871 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2873 tlb
->prot
|= PAGE_READ
<< 4;
2876 tlb
->prot
|= PAGE_WRITE
<< 4;
2879 tlb
->prot
|= PAGE_EXEC
<< 4;
2882 tlb
->prot
|= PAGE_READ
;
2885 tlb
->prot
|= PAGE_WRITE
;
2888 tlb
->prot
|= PAGE_EXEC
;
2894 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2902 tlb
= &env
->tlb
.tlbe
[entry
];
2905 /* Just here to please gcc */
2908 size
= booke_page_size_to_tlb(tlb
->size
);
2909 if (size
< 0 || size
> 0xF) {
2913 if (tlb
->attr
& 0x1) {
2916 if (tlb
->prot
& PAGE_VALID
) {
2919 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2920 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2926 ret
= tlb
->attr
& ~0x1;
2927 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2930 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2933 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2936 if (tlb
->prot
& PAGE_READ
) {
2939 if (tlb
->prot
& PAGE_WRITE
) {
2942 if (tlb
->prot
& PAGE_EXEC
) {
2950 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2952 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2955 /* PowerPC BookE 2.06 TLB management */
2957 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2959 uint32_t tlbncfg
= 0;
2960 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2961 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2964 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2965 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2967 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2968 cpu_abort(env
, "we don't support HES yet\n");
2971 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2974 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2976 env
->spr
[pidn
] = pid
;
2977 /* changing PIDs mean we're in a different address space now */
2981 void helper_booke206_tlbwe(CPUPPCState
*env
)
2983 uint32_t tlbncfg
, tlbn
;
2985 uint32_t size_tlb
, size_ps
;
2989 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2990 case MAS0_WQ_ALWAYS
:
2991 /* good to go, write that entry */
2994 /* XXX check if reserved */
2999 case MAS0_WQ_CLR_RSRV
:
3000 /* XXX clear entry */
3003 /* no idea what to do */
3007 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3009 /* XXX we don't support direct LRAT setting yet */
3010 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3014 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3015 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3017 tlb
= booke206_cur_tlb(env
);
3020 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3021 POWERPC_EXCP_INVAL
|
3022 POWERPC_EXCP_INVAL_INVAL
);
3025 /* check that we support the targeted size */
3026 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3027 size_ps
= booke206_tlbnps(env
, tlbn
);
3028 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3029 !(size_ps
& (1 << size_tlb
))) {
3030 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3031 POWERPC_EXCP_INVAL
|
3032 POWERPC_EXCP_INVAL_INVAL
);
3036 cpu_abort(env
, "missing HV implementation\n");
3038 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3039 env
->spr
[SPR_BOOKE_MAS3
];
3040 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3043 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3044 /* force !AVAIL TLB entries to correct page size */
3045 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3046 /* XXX can be configured in MMUCSR0 */
3047 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3050 /* Make a mask from TLB size to discard invalid bits in EPN field */
3051 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3052 /* Add a mask for page attributes */
3053 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3056 /* Executing a tlbwe instruction in 32-bit mode will set
3057 * bits 0:31 of the TLB EPN field to zero.
3062 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3064 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3065 /* no IPROT supported by TLB */
3066 tlb
->mas1
&= ~MAS1_IPROT
;
3069 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3070 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3076 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3078 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3079 int way
= booke206_tlbm_to_way(env
, tlb
);
3081 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3082 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3083 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3085 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3086 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3087 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3088 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3091 void helper_booke206_tlbre(CPUPPCState
*env
)
3093 ppcmas_tlb_t
*tlb
= NULL
;
3095 tlb
= booke206_cur_tlb(env
);
3097 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3099 booke206_tlb_to_mas(env
, tlb
);
3103 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3105 ppcmas_tlb_t
*tlb
= NULL
;
3110 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3111 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3113 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3114 int ways
= booke206_tlb_ways(env
, i
);
3116 for (j
= 0; j
< ways
; j
++) {
3117 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3123 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3127 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3131 booke206_tlb_to_mas(env
, tlb
);
3136 /* no entry found, fill with defaults */
3137 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3138 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3139 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3140 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3141 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3143 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3144 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3147 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3150 /* next victim logic */
3151 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3153 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3154 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3157 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3161 int ways
= booke206_tlb_ways(env
, tlbn
);
3164 for (i
= 0; i
< ways
; i
++) {
3165 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3169 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3170 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3171 !(tlb
->mas1
& MAS1_IPROT
)) {
3172 tlb
->mas1
&= ~MAS1_VALID
;
3177 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3179 if (address
& 0x4) {
3180 /* flush all entries */
3181 if (address
& 0x8) {
3182 /* flush all of TLB1 */
3183 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3185 /* flush all of TLB0 */
3186 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3191 if (address
& 0x8) {
3192 /* flush TLB1 entries */
3193 booke206_invalidate_ea_tlb(env
, 1, address
);
3196 /* flush TLB0 entries */
3197 booke206_invalidate_ea_tlb(env
, 0, address
);
3198 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3202 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3204 /* XXX missing LPID handling */
3205 booke206_flush_tlb(env
, -1, 1);
3208 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3211 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3212 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3215 /* XXX missing LPID handling */
3216 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3217 tlb_size
= booke206_tlb_size(env
, i
);
3218 for (j
= 0; j
< tlb_size
; j
++) {
3219 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3220 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3221 tlb
[j
].mas1
&= ~MAS1_VALID
;
3224 tlb
+= booke206_tlb_size(env
, i
);
3229 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3233 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3234 int pid
= tid
>> MAS6_SPID_SHIFT
;
3235 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3236 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3237 /* XXX check for unsupported isize and raise an invalid opcode then */
3238 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3239 /* XXX implement MAV2 handling */
3242 /* XXX missing LPID handling */
3243 /* flush by pid and ea */
3244 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3245 int ways
= booke206_tlb_ways(env
, i
);
3247 for (j
= 0; j
< ways
; j
++) {
3248 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3252 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3253 (tlb
->mas1
& MAS1_IPROT
) ||
3254 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3255 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3258 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3259 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3262 /* XXX e500mc doesn't match SAS, but other cores might */
3263 tlb
->mas1
&= ~MAS1_VALID
;
3269 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3274 flags
|= BOOKE206_FLUSH_TLB1
;
3278 flags
|= BOOKE206_FLUSH_TLB0
;
3281 booke206_flush_tlb(env
, flags
, 1);