2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "sysemu/kvm.h"
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
103 #define PTE_PTEM_MASK 0x7FFFFFBF
104 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
105 #if defined(TARGET_PPC64)
106 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
107 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
110 static inline int pp_check(int key
, int pp
, int nx
)
114 /* Compute access rights */
115 /* When pp is 3/7, the result is undefined. Set it to noaccess */
122 access
|= PAGE_WRITE
;
140 access
= PAGE_READ
| PAGE_WRITE
;
151 static inline int check_prot(int prot
, int rw
, int access_type
)
155 if (access_type
== ACCESS_CODE
) {
156 if (prot
& PAGE_EXEC
) {
162 if (prot
& PAGE_WRITE
) {
168 if (prot
& PAGE_READ
) {
178 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
179 target_ulong pte1
, int h
, int rw
, int type
)
181 target_ulong ptem
, mmask
;
182 int access
, ret
, pteh
, ptev
, pp
;
185 /* Check validity and table match */
186 #if defined(TARGET_PPC64)
188 ptev
= pte64_is_valid(pte0
);
189 pteh
= (pte0
>> 1) & 1;
193 ptev
= pte_is_valid(pte0
);
194 pteh
= (pte0
>> 6) & 1;
196 if (ptev
&& h
== pteh
) {
197 /* Check vsid & api */
198 #if defined(TARGET_PPC64)
200 ptem
= pte0
& PTE64_PTEM_MASK
;
201 mmask
= PTE64_CHECK_MASK
;
202 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
203 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
204 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
208 ptem
= pte0
& PTE_PTEM_MASK
;
209 mmask
= PTE_CHECK_MASK
;
210 pp
= pte1
& 0x00000003;
212 if (ptem
== ctx
->ptem
) {
213 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
214 /* all matches should have equal RPN, WIMG & PP */
215 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
216 qemu_log("Bad RPN/WIMG/PP\n");
220 /* Compute access rights */
221 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
222 /* Keep the matching PTE informations */
225 ret
= check_prot(ctx
->prot
, rw
, type
);
228 LOG_MMU("PTE access granted !\n");
230 /* Access right violation */
231 LOG_MMU("PTE access rejected\n");
239 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
240 target_ulong pte1
, int h
, int rw
, int type
)
242 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
245 #if defined(TARGET_PPC64)
246 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
247 target_ulong pte1
, int h
, int rw
, int type
)
249 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
253 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
258 /* Update page flags */
259 if (!(*pte1p
& 0x00000100)) {
260 /* Update accessed flag */
261 *pte1p
|= 0x00000100;
264 if (!(*pte1p
& 0x00000080)) {
265 if (rw
== 1 && ret
== 0) {
266 /* Update changed flag */
267 *pte1p
|= 0x00000080;
270 /* Force page fault for first write access */
271 ctx
->prot
&= ~PAGE_WRITE
;
278 /* Software driven TLB helpers */
279 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
280 int way
, int is_code
)
284 /* Select TLB num in a way from address */
285 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
287 nr
+= env
->tlb_per_way
* way
;
288 /* 6xx have separate TLBs for instructions and data */
289 if (is_code
&& env
->id_tlbs
== 1) {
296 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
301 /* LOG_SWTLB("Invalidate all TLBs\n"); */
302 /* Invalidate all defined software TLB */
304 if (env
->id_tlbs
== 1) {
307 for (nr
= 0; nr
< max
; nr
++) {
308 tlb
= &env
->tlb
.tlb6
[nr
];
309 pte_invalidate(&tlb
->pte0
);
314 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
316 int is_code
, int match_epn
)
318 #if !defined(FLUSH_ALL_TLBS)
322 /* Invalidate ITLB + DTLB, all ways */
323 for (way
= 0; way
< env
->nb_ways
; way
++) {
324 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
325 tlb
= &env
->tlb
.tlb6
[nr
];
326 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
327 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
329 pte_invalidate(&tlb
->pte0
);
330 tlb_flush_page(env
, tlb
->EPN
);
334 /* XXX: PowerPC specification say this is valid as well */
335 ppc6xx_tlb_invalidate_all(env
);
339 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
340 target_ulong eaddr
, int is_code
)
342 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
345 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
346 int is_code
, target_ulong pte0
, target_ulong pte1
)
351 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
352 tlb
= &env
->tlb
.tlb6
[nr
];
353 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
354 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
355 /* Invalidate any pending reference in QEMU for this virtual address */
356 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
360 /* Store last way for LRU mechanism */
364 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
365 target_ulong eaddr
, int rw
, int access_type
)
372 ret
= -1; /* No TLB found */
373 for (way
= 0; way
< env
->nb_ways
; way
++) {
374 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
375 access_type
== ACCESS_CODE
? 1 : 0);
376 tlb
= &env
->tlb
.tlb6
[nr
];
377 /* This test "emulates" the PTE index match for hardware TLBs */
378 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
379 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
380 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
381 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
382 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
385 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
386 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
387 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
388 tlb
->EPN
, eaddr
, tlb
->pte1
,
389 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
390 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
392 /* TLB inconsistency */
395 /* Access violation */
405 /* XXX: we should go on looping to check all TLBs consistency
406 * but we can speed-up the whole thing as the
407 * result would be undefined if TLBs are not consistent.
416 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
417 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
418 /* Update page flags */
419 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
425 /* Perform BAT hit & translation */
426 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
427 int *validp
, int *protp
, target_ulong
*BATu
,
433 bl
= (*BATu
& 0x00001FFC) << 15;
436 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
437 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
439 pp
= *BATl
& 0x00000003;
441 prot
= PAGE_READ
| PAGE_EXEC
;
452 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
453 int *validp
, int *protp
,
454 target_ulong
*BATu
, target_ulong
*BATl
)
457 int key
, pp
, valid
, prot
;
459 bl
= (*BATl
& 0x0000003F) << 17;
460 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
461 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
463 valid
= (*BATl
>> 6) & 1;
465 pp
= *BATu
& 0x00000003;
467 key
= (*BATu
>> 3) & 1;
469 key
= (*BATu
>> 2) & 1;
471 prot
= pp_check(key
, pp
, 0);
478 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
479 target_ulong
virtual, int rw
, int type
)
481 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
482 target_ulong BEPIl
, BEPIu
, bl
;
486 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
487 type
== ACCESS_CODE
? 'I' : 'D', virtual);
490 BATlt
= env
->IBAT
[1];
491 BATut
= env
->IBAT
[0];
494 BATlt
= env
->DBAT
[1];
495 BATut
= env
->DBAT
[0];
498 for (i
= 0; i
< env
->nb_BATs
; i
++) {
501 BEPIu
= *BATu
& 0xF0000000;
502 BEPIl
= *BATu
& 0x0FFE0000;
503 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
504 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
506 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
508 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
509 " BATl " TARGET_FMT_lx
"\n", __func__
,
510 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
511 if ((virtual & 0xF0000000) == BEPIu
&&
512 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
515 /* Get physical address */
516 ctx
->raddr
= (*BATl
& 0xF0000000) |
517 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
518 (virtual & 0x0001F000);
519 /* Compute access rights */
521 ret
= check_prot(ctx
->prot
, rw
, type
);
523 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
524 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
525 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
532 #if defined(DEBUG_BATS)
533 if (qemu_log_enabled()) {
534 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
535 for (i
= 0; i
< 4; i
++) {
538 BEPIu
= *BATu
& 0xF0000000;
539 BEPIl
= *BATu
& 0x0FFE0000;
540 bl
= (*BATu
& 0x00001FFC) << 15;
541 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
542 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
543 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
544 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
545 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
554 static inline hwaddr
get_pteg_offset(CPUPPCState
*env
,
558 return (hash
* pte_size
* 8) & env
->htab_mask
;
561 /* PTE table lookup */
562 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
563 int rw
, int type
, int target_page_bits
)
566 target_ulong pte0
, pte1
;
570 ret
= -1; /* No entry found */
571 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
572 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
573 for (i
= 0; i
< 8; i
++) {
574 #if defined(TARGET_PPC64)
576 if (env
->external_htab
) {
577 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
578 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
580 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
581 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
584 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
585 LOG_MMU("Load pte from %016" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
586 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
587 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
588 (int)((pte0
>> 1) & 1), ctx
->ptem
);
592 if (env
->external_htab
) {
593 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
594 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
596 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
597 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
599 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
600 LOG_MMU("Load pte from %08" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
601 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
602 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
603 (int)((pte0
>> 6) & 1), ctx
->ptem
);
607 /* PTE inconsistency */
610 /* Access violation */
620 /* XXX: we should go on looping to check all PTEs consistency
621 * but if we can speed-up the whole thing as the
622 * result would be undefined if PTEs are not consistent.
631 LOG_MMU("found PTE at addr %08" HWADDR_PRIx
" prot=%01x ret=%d\n",
632 ctx
->raddr
, ctx
->prot
, ret
);
633 /* Update page flags */
635 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
636 #if defined(TARGET_PPC64)
638 if (env
->external_htab
) {
639 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
642 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
643 (good
* 16) + 8, pte1
);
648 if (env
->external_htab
) {
649 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
652 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
653 (good
* 8) + 4, pte1
);
659 /* We have a TLB that saves 4K pages, so let's
660 * split a huge page to 4k chunks */
661 if (target_page_bits
!= TARGET_PAGE_BITS
) {
662 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
668 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
669 int type
, int target_page_bits
)
671 #if defined(TARGET_PPC64)
672 if (env
->mmu_model
& POWERPC_MMU_64
) {
673 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
677 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
680 #if defined(TARGET_PPC64)
681 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
683 uint64_t esid_256M
, esid_1T
;
686 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
688 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
689 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
691 for (n
= 0; n
< env
->slb_nr
; n
++) {
692 ppc_slb_t
*slb
= &env
->slb
[n
];
694 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
695 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
696 /* We check for 1T matches on all MMUs here - if the MMU
697 * doesn't have 1T segment support, we will have prevented 1T
698 * entries from being inserted in the slbmte code. */
699 if (((slb
->esid
== esid_256M
) &&
700 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
701 || ((slb
->esid
== esid_1T
) &&
702 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
710 /*****************************************************************************/
713 void helper_slbia(CPUPPCState
*env
)
715 int n
, do_invalidate
;
718 /* XXX: Warning: slbia never invalidates the first segment */
719 for (n
= 1; n
< env
->slb_nr
; n
++) {
720 ppc_slb_t
*slb
= &env
->slb
[n
];
722 if (slb
->esid
& SLB_ESID_V
) {
723 slb
->esid
&= ~SLB_ESID_V
;
724 /* XXX: given the fact that segment size is 256 MB or 1TB,
725 * and we still don't have a tlb_flush_mask(env, n, mask)
726 * in QEMU, we just invalidate all TLBs
736 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
740 slb
= slb_lookup(env
, addr
);
745 if (slb
->esid
& SLB_ESID_V
) {
746 slb
->esid
&= ~SLB_ESID_V
;
748 /* XXX: given the fact that segment size is 256 MB or 1TB,
749 * and we still don't have a tlb_flush_mask(env, n, mask)
750 * in QEMU, we just invalidate all TLBs
756 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
758 int slot
= rb
& 0xfff;
759 ppc_slb_t
*slb
= &env
->slb
[slot
];
761 if (rb
& (0x1000 - env
->slb_nr
)) {
762 return -1; /* Reserved bits set or slot too high */
764 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
765 return -1; /* Bad segment size */
767 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
768 return -1; /* 1T segment on MMU that doesn't support it */
771 /* Mask out the slot number as we store the entry */
772 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
775 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
776 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
777 slb
->esid
, slb
->vsid
);
782 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
785 int slot
= rb
& 0xfff;
786 ppc_slb_t
*slb
= &env
->slb
[slot
];
788 if (slot
>= env
->slb_nr
) {
796 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
799 int slot
= rb
& 0xfff;
800 ppc_slb_t
*slb
= &env
->slb
[slot
];
802 if (slot
>= env
->slb_nr
) {
809 #endif /* defined(TARGET_PPC64) */
811 /* Perform segment based translation */
812 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
813 target_ulong eaddr
, int rw
, int type
)
817 int ds
, pr
, target_page_bits
;
822 #if defined(TARGET_PPC64)
823 if (env
->mmu_model
& POWERPC_MMU_64
) {
825 target_ulong pageaddr
;
828 LOG_MMU("Check SLBs\n");
829 slb
= slb_lookup(env
, eaddr
);
834 if (slb
->vsid
& SLB_VSID_B
) {
835 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
838 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
842 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
843 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
844 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
845 : (slb
->vsid
& SLB_VSID_KS
));
847 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
849 pageaddr
= eaddr
& ((1ULL << segment_bits
)
850 - (1ULL << target_page_bits
));
851 if (slb
->vsid
& SLB_VSID_B
) {
852 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
854 hash
= vsid
^ (pageaddr
>> target_page_bits
);
856 /* Only 5 bits of the page index are used in the AVPN */
857 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
858 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
860 #endif /* defined(TARGET_PPC64) */
862 target_ulong sr
, pgidx
;
864 sr
= env
->sr
[eaddr
>> 28];
865 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
866 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
867 ds
= sr
& 0x80000000 ? 1 : 0;
868 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
869 vsid
= sr
& 0x00FFFFFF;
870 target_page_bits
= TARGET_PAGE_BITS
;
871 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
872 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
873 " ir=%d dr=%d pr=%d %d t=%d\n",
874 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
875 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
876 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
878 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
880 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
881 ctx
->key
, ds
, ctx
->nx
, vsid
);
884 /* Check if instruction fetch is allowed, if needed */
885 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
886 /* Page address translation */
887 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
888 " hash " TARGET_FMT_plx
"\n",
889 env
->htab_base
, env
->htab_mask
, hash
);
891 ctx
->hash
[1] = ~hash
;
893 /* Initialize real address with an invalid value */
894 ctx
->raddr
= (hwaddr
)-1ULL;
895 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
896 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
897 /* Software TLB search */
898 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
900 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
901 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
902 " hash=" TARGET_FMT_plx
"\n",
903 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
905 /* Primary table lookup */
906 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
908 /* Secondary table lookup */
909 if (eaddr
!= 0xEFFFFFFF) {
910 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
911 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
912 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
913 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
915 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
922 #if defined(DUMP_PAGE_TABLES)
923 if (qemu_log_enabled()) {
925 uint32_t a0
, a1
, a2
, a3
;
927 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
928 "\n", sdr
, mask
+ 0x80);
929 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
931 a0
= ldl_phys(curaddr
);
932 a1
= ldl_phys(curaddr
+ 4);
933 a2
= ldl_phys(curaddr
+ 8);
934 a3
= ldl_phys(curaddr
+ 12);
935 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
936 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
937 curaddr
, a0
, a1
, a2
, a3
);
943 LOG_MMU("No access allowed\n");
949 LOG_MMU("direct store...\n");
950 /* Direct-store segment : absolutely *BUGGY* for now */
952 /* Direct-store implies a 32-bit MMU.
953 * Check the Segment Register's bus unit ID (BUID).
955 sr
= env
->sr
[eaddr
>> 28];
956 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
957 /* Memory-forced I/O controller interface access */
958 /* If T=1 and BUID=x'07F', the 601 performs a memory access
959 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
961 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
962 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
968 /* Integer load/store : only access allowed */
971 /* No code fetch is allowed in direct-store areas */
974 /* Floating point load/store */
977 /* lwarx, ldarx or srwcx. */
980 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
981 /* Should make the instruction do no-op.
982 * As it already do no-op, it's quite easy :-)
990 qemu_log("ERROR: instruction should not need "
991 "address translation\n");
994 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1005 /* Generic TLB check function for embedded PowerPC implementations */
1006 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1008 target_ulong address
, uint32_t pid
, int ext
,
1013 /* Check valid flag */
1014 if (!(tlb
->prot
& PAGE_VALID
)) {
1017 mask
= ~(tlb
->size
- 1);
1018 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1019 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1020 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1022 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1025 /* Check effective address */
1026 if ((address
& mask
) != tlb
->EPN
) {
1029 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1031 /* Extend the physical address to 36 bits */
1032 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
1038 /* Generic TLB search function for PowerPC embedded implementations */
1039 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1046 /* Default return value is no match */
1048 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1049 tlb
= &env
->tlb
.tlbe
[i
];
1050 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1059 /* Helpers specific to PowerPC 40x implementations */
1060 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1065 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1066 tlb
= &env
->tlb
.tlbe
[i
];
1067 tlb
->prot
&= ~PAGE_VALID
;
1072 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1073 target_ulong eaddr
, uint32_t pid
)
1075 #if !defined(FLUSH_ALL_TLBS)
1078 target_ulong page
, end
;
1081 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1082 tlb
= &env
->tlb
.tlbe
[i
];
1083 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1084 end
= tlb
->EPN
+ tlb
->size
;
1085 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1086 tlb_flush_page(env
, page
);
1088 tlb
->prot
&= ~PAGE_VALID
;
1093 ppc4xx_tlb_invalidate_all(env
);
1097 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1098 target_ulong address
, int rw
,
1103 int i
, ret
, zsel
, zpr
, pr
;
1106 raddr
= (hwaddr
)-1ULL;
1108 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1109 tlb
= &env
->tlb
.tlbe
[i
];
1110 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1111 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1114 zsel
= (tlb
->attr
>> 4) & 0xF;
1115 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1116 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1117 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1118 /* Check execute enable bit */
1126 /* All accesses granted */
1127 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1132 /* Raise Zone protection fault. */
1133 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1141 /* Check from TLB entry */
1142 ctx
->prot
= tlb
->prot
;
1143 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1145 env
->spr
[SPR_40x_ESR
] = 0;
1151 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1152 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1157 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1158 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1163 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1165 /* XXX: TO BE FIXED */
1166 if (val
!= 0x00000000) {
1167 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1169 env
->spr
[SPR_405_SLER
] = val
;
1172 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1173 hwaddr
*raddr
, int *prot
,
1174 target_ulong address
, int rw
,
1175 int access_type
, int i
)
1179 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1180 env
->spr
[SPR_BOOKE_PID
],
1181 !env
->nb_pids
, i
) >= 0) {
1185 if (env
->spr
[SPR_BOOKE_PID1
] &&
1186 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1187 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1191 if (env
->spr
[SPR_BOOKE_PID2
] &&
1192 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1193 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1197 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1203 prot2
= tlb
->prot
& 0xF;
1205 prot2
= (tlb
->prot
>> 4) & 0xF;
1208 /* Check the address space */
1209 if (access_type
== ACCESS_CODE
) {
1210 if (msr_ir
!= (tlb
->attr
& 1)) {
1211 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1216 if (prot2
& PAGE_EXEC
) {
1217 LOG_SWTLB("%s: good TLB!\n", __func__
);
1221 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1224 if (msr_dr
!= (tlb
->attr
& 1)) {
1225 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1230 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1231 LOG_SWTLB("%s: found TLB!\n", __func__
);
1235 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1242 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1243 target_ulong address
, int rw
,
1251 raddr
= (hwaddr
)-1ULL;
1252 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1253 tlb
= &env
->tlb
.tlbe
[i
];
1254 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1263 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1264 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1267 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1268 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1274 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
1275 const int check_iprot
)
1279 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1281 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1282 if (flags
& (1 << i
)) {
1283 tlb_size
= booke206_tlb_size(env
, i
);
1284 for (j
= 0; j
< tlb_size
; j
++) {
1285 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1286 tlb
[j
].mas1
&= ~MAS1_VALID
;
1290 tlb
+= booke206_tlb_size(env
, i
);
1296 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
1301 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1303 return 1024ULL << tlbm_size
;
1306 /* TLB check function for MAS based SoftTLBs */
1307 static int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1309 target_ulong address
, uint32_t pid
)
1314 /* Check valid flag */
1315 if (!(tlb
->mas1
& MAS1_VALID
)) {
1319 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1320 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1321 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1322 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1326 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1327 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1331 /* Check effective address */
1332 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1337 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1343 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1344 hwaddr
*raddr
, int *prot
,
1345 target_ulong address
, int rw
,
1351 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1352 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1356 if (env
->spr
[SPR_BOOKE_PID1
] &&
1357 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1358 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1362 if (env
->spr
[SPR_BOOKE_PID2
] &&
1363 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1364 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1368 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1374 if (tlb
->mas7_3
& MAS3_UR
) {
1377 if (tlb
->mas7_3
& MAS3_UW
) {
1378 prot2
|= PAGE_WRITE
;
1380 if (tlb
->mas7_3
& MAS3_UX
) {
1384 if (tlb
->mas7_3
& MAS3_SR
) {
1387 if (tlb
->mas7_3
& MAS3_SW
) {
1388 prot2
|= PAGE_WRITE
;
1390 if (tlb
->mas7_3
& MAS3_SX
) {
1395 /* Check the address space and permissions */
1396 if (access_type
== ACCESS_CODE
) {
1397 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1398 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1403 if (prot2
& PAGE_EXEC
) {
1404 LOG_SWTLB("%s: good TLB!\n", __func__
);
1408 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1411 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1412 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1417 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1418 LOG_SWTLB("%s: found TLB!\n", __func__
);
1422 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1429 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1430 target_ulong address
, int rw
,
1438 raddr
= (hwaddr
)-1ULL;
1440 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1441 int ways
= booke206_tlb_ways(env
, i
);
1443 for (j
= 0; j
< ways
; j
++) {
1444 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1448 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1460 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1461 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1464 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1465 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1471 static const char *book3e_tsize_to_str
[32] = {
1472 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1473 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1474 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1478 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1481 ppcemb_tlb_t
*entry
;
1484 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1485 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1489 cpu_fprintf(f
, "\nTLB:\n");
1490 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1493 entry
= &env
->tlb
.tlbe
[0];
1494 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1497 uint64_t size
= (uint64_t)entry
->size
;
1500 /* Check valid flag */
1501 if (!(entry
->prot
& PAGE_VALID
)) {
1505 mask
= ~(entry
->size
- 1);
1506 ea
= entry
->EPN
& mask
;
1507 pa
= entry
->RPN
& mask
;
1508 /* Extend the physical address to 36 bits */
1509 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1512 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1514 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1516 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1517 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1518 entry
->prot
, entry
->attr
);
1523 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1524 CPUPPCState
*env
, int tlbn
, int offset
,
1527 ppcmas_tlb_t
*entry
;
1530 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1531 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1532 " URWX WIMGE U0123\n");
1534 entry
= &env
->tlb
.tlbm
[offset
];
1535 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1536 hwaddr ea
, pa
, size
;
1539 if (!(entry
->mas1
& MAS1_VALID
)) {
1543 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1544 size
= 1024ULL << tsize
;
1545 ea
= entry
->mas2
& ~(size
- 1);
1546 pa
= entry
->mas7_3
& ~(size
- 1);
1548 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1549 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1550 (uint64_t)ea
, (uint64_t)pa
,
1551 book3e_tsize_to_str
[tsize
],
1552 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1553 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1554 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1555 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1556 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1557 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1558 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1559 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1560 entry
->mas2
& MAS2_W
? 'W' : '-',
1561 entry
->mas2
& MAS2_I
? 'I' : '-',
1562 entry
->mas2
& MAS2_M
? 'M' : '-',
1563 entry
->mas2
& MAS2_G
? 'G' : '-',
1564 entry
->mas2
& MAS2_E
? 'E' : '-',
1565 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1566 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1567 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1568 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1572 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1578 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1579 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1583 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1584 int size
= booke206_tlb_size(env
, i
);
1590 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1595 #if defined(TARGET_PPC64)
1596 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1600 uint64_t slbe
, slbv
;
1602 cpu_synchronize_state(env
);
1604 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1605 for (i
= 0; i
< env
->slb_nr
; i
++) {
1606 slbe
= env
->slb
[i
].esid
;
1607 slbv
= env
->slb
[i
].vsid
;
1608 if (slbe
== 0 && slbv
== 0) {
1611 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1617 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1619 switch (env
->mmu_model
) {
1620 case POWERPC_MMU_BOOKE
:
1621 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1623 case POWERPC_MMU_BOOKE206
:
1624 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1626 #if defined(TARGET_PPC64)
1627 case POWERPC_MMU_64B
:
1628 case POWERPC_MMU_2_06
:
1629 case POWERPC_MMU_2_06d
:
1630 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1634 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1638 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1639 target_ulong eaddr
, int rw
)
1644 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1646 switch (env
->mmu_model
) {
1647 case POWERPC_MMU_32B
:
1648 case POWERPC_MMU_601
:
1649 case POWERPC_MMU_SOFT_6xx
:
1650 case POWERPC_MMU_SOFT_74xx
:
1651 case POWERPC_MMU_SOFT_4xx
:
1652 case POWERPC_MMU_REAL
:
1653 case POWERPC_MMU_BOOKE
:
1654 ctx
->prot
|= PAGE_WRITE
;
1656 #if defined(TARGET_PPC64)
1657 case POWERPC_MMU_64B
:
1658 case POWERPC_MMU_2_06
:
1659 case POWERPC_MMU_2_06d
:
1660 /* Real address are 60 bits long */
1661 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1662 ctx
->prot
|= PAGE_WRITE
;
1665 case POWERPC_MMU_SOFT_4xx_Z
:
1666 if (unlikely(msr_pe
!= 0)) {
1667 /* 403 family add some particular protections,
1668 * using PBL/PBU registers for accesses with no translation.
1671 /* Check PLB validity */
1672 (env
->pb
[0] < env
->pb
[1] &&
1673 /* and address in plb area */
1674 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1675 (env
->pb
[2] < env
->pb
[3] &&
1676 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1677 if (in_plb
^ msr_px
) {
1678 /* Access in protected area */
1680 /* Access is not allowed */
1684 /* Read-write access is allowed */
1685 ctx
->prot
|= PAGE_WRITE
;
1689 case POWERPC_MMU_MPC8xx
:
1691 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1693 case POWERPC_MMU_BOOKE206
:
1694 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1697 cpu_abort(env
, "Unknown or invalid MMU model\n");
1704 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1705 target_ulong eaddr
, int rw
, int access_type
)
1710 qemu_log("%s\n", __func__
);
1712 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1713 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1714 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1715 /* The BookE MMU always performs address translation. The
1716 IS and DS bits only affect the address space. */
1717 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1719 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1720 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1723 /* No address translation. */
1724 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1728 switch (env
->mmu_model
) {
1729 case POWERPC_MMU_32B
:
1730 case POWERPC_MMU_601
:
1731 case POWERPC_MMU_SOFT_6xx
:
1732 case POWERPC_MMU_SOFT_74xx
:
1733 /* Try to find a BAT */
1734 if (env
->nb_BATs
!= 0) {
1735 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1737 #if defined(TARGET_PPC64)
1738 case POWERPC_MMU_64B
:
1739 case POWERPC_MMU_2_06
:
1740 case POWERPC_MMU_2_06d
:
1743 /* We didn't match any BAT entry or don't have BATs */
1744 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1747 case POWERPC_MMU_SOFT_4xx
:
1748 case POWERPC_MMU_SOFT_4xx_Z
:
1749 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1752 case POWERPC_MMU_BOOKE
:
1753 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1756 case POWERPC_MMU_BOOKE206
:
1757 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1760 case POWERPC_MMU_MPC8xx
:
1762 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1764 case POWERPC_MMU_REAL
:
1765 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1768 cpu_abort(env
, "Unknown or invalid MMU model\n");
1773 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1774 __func__
, eaddr
, ret
, ctx
->raddr
);
1780 hwaddr
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1784 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1788 return ctx
.raddr
& TARGET_PAGE_MASK
;
1791 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1794 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1795 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1796 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1797 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1798 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1799 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1802 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1803 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1804 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1807 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1808 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1810 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1811 case MAS4_TIDSELD_PID0
:
1812 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1814 case MAS4_TIDSELD_PID1
:
1815 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1817 case MAS4_TIDSELD_PID2
:
1818 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1822 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1824 /* next victim logic */
1825 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1827 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1828 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1831 /* Perform address translation */
1832 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1842 access_type
= ACCESS_CODE
;
1845 access_type
= env
->access_type
;
1847 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1849 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1850 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1851 mmu_idx
, TARGET_PAGE_SIZE
);
1853 } else if (ret
< 0) {
1855 if (access_type
== ACCESS_CODE
) {
1858 /* No matches in page tables or TLB */
1859 switch (env
->mmu_model
) {
1860 case POWERPC_MMU_SOFT_6xx
:
1861 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1862 env
->error_code
= 1 << 18;
1863 env
->spr
[SPR_IMISS
] = address
;
1864 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1866 case POWERPC_MMU_SOFT_74xx
:
1867 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1869 case POWERPC_MMU_SOFT_4xx
:
1870 case POWERPC_MMU_SOFT_4xx_Z
:
1871 env
->exception_index
= POWERPC_EXCP_ITLB
;
1872 env
->error_code
= 0;
1873 env
->spr
[SPR_40x_DEAR
] = address
;
1874 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1876 case POWERPC_MMU_32B
:
1877 case POWERPC_MMU_601
:
1878 #if defined(TARGET_PPC64)
1879 case POWERPC_MMU_64B
:
1880 case POWERPC_MMU_2_06
:
1881 case POWERPC_MMU_2_06d
:
1883 env
->exception_index
= POWERPC_EXCP_ISI
;
1884 env
->error_code
= 0x40000000;
1886 case POWERPC_MMU_BOOKE206
:
1887 booke206_update_mas_tlb_miss(env
, address
, rw
);
1889 case POWERPC_MMU_BOOKE
:
1890 env
->exception_index
= POWERPC_EXCP_ITLB
;
1891 env
->error_code
= 0;
1892 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1894 case POWERPC_MMU_MPC8xx
:
1896 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1898 case POWERPC_MMU_REAL
:
1899 cpu_abort(env
, "PowerPC in real mode should never raise "
1900 "any MMU exceptions\n");
1903 cpu_abort(env
, "Unknown or invalid MMU model\n");
1908 /* Access rights violation */
1909 env
->exception_index
= POWERPC_EXCP_ISI
;
1910 env
->error_code
= 0x08000000;
1913 /* No execute protection violation */
1914 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1915 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1916 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1918 env
->exception_index
= POWERPC_EXCP_ISI
;
1919 env
->error_code
= 0x10000000;
1922 /* Direct store exception */
1923 /* No code fetch is allowed in direct-store areas */
1924 env
->exception_index
= POWERPC_EXCP_ISI
;
1925 env
->error_code
= 0x10000000;
1927 #if defined(TARGET_PPC64)
1929 /* No match in segment table */
1930 env
->exception_index
= POWERPC_EXCP_ISEG
;
1931 env
->error_code
= 0;
1938 /* No matches in page tables or TLB */
1939 switch (env
->mmu_model
) {
1940 case POWERPC_MMU_SOFT_6xx
:
1942 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1943 env
->error_code
= 1 << 16;
1945 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1946 env
->error_code
= 0;
1948 env
->spr
[SPR_DMISS
] = address
;
1949 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1951 env
->error_code
|= ctx
.key
<< 19;
1952 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1953 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1954 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1955 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1957 case POWERPC_MMU_SOFT_74xx
:
1959 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1961 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1964 /* Implement LRU algorithm */
1965 env
->error_code
= ctx
.key
<< 19;
1966 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1967 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1968 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1970 case POWERPC_MMU_SOFT_4xx
:
1971 case POWERPC_MMU_SOFT_4xx_Z
:
1972 env
->exception_index
= POWERPC_EXCP_DTLB
;
1973 env
->error_code
= 0;
1974 env
->spr
[SPR_40x_DEAR
] = address
;
1976 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1978 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1981 case POWERPC_MMU_32B
:
1982 case POWERPC_MMU_601
:
1983 #if defined(TARGET_PPC64)
1984 case POWERPC_MMU_64B
:
1985 case POWERPC_MMU_2_06
:
1986 case POWERPC_MMU_2_06d
:
1988 env
->exception_index
= POWERPC_EXCP_DSI
;
1989 env
->error_code
= 0;
1990 env
->spr
[SPR_DAR
] = address
;
1992 env
->spr
[SPR_DSISR
] = 0x42000000;
1994 env
->spr
[SPR_DSISR
] = 0x40000000;
1997 case POWERPC_MMU_MPC8xx
:
1999 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2001 case POWERPC_MMU_BOOKE206
:
2002 booke206_update_mas_tlb_miss(env
, address
, rw
);
2004 case POWERPC_MMU_BOOKE
:
2005 env
->exception_index
= POWERPC_EXCP_DTLB
;
2006 env
->error_code
= 0;
2007 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2008 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2010 case POWERPC_MMU_REAL
:
2011 cpu_abort(env
, "PowerPC in real mode should never raise "
2012 "any MMU exceptions\n");
2015 cpu_abort(env
, "Unknown or invalid MMU model\n");
2020 /* Access rights violation */
2021 env
->exception_index
= POWERPC_EXCP_DSI
;
2022 env
->error_code
= 0;
2023 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2024 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2025 env
->spr
[SPR_40x_DEAR
] = address
;
2027 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2029 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2030 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2031 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2032 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2034 env
->spr
[SPR_DAR
] = address
;
2036 env
->spr
[SPR_DSISR
] = 0x0A000000;
2038 env
->spr
[SPR_DSISR
] = 0x08000000;
2043 /* Direct store exception */
2044 switch (access_type
) {
2046 /* Floating point load/store */
2047 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2048 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2049 env
->spr
[SPR_DAR
] = address
;
2052 /* lwarx, ldarx or stwcx. */
2053 env
->exception_index
= POWERPC_EXCP_DSI
;
2054 env
->error_code
= 0;
2055 env
->spr
[SPR_DAR
] = address
;
2057 env
->spr
[SPR_DSISR
] = 0x06000000;
2059 env
->spr
[SPR_DSISR
] = 0x04000000;
2063 /* eciwx or ecowx */
2064 env
->exception_index
= POWERPC_EXCP_DSI
;
2065 env
->error_code
= 0;
2066 env
->spr
[SPR_DAR
] = address
;
2068 env
->spr
[SPR_DSISR
] = 0x06100000;
2070 env
->spr
[SPR_DSISR
] = 0x04100000;
2074 printf("DSI: invalid exception (%d)\n", ret
);
2075 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2077 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2078 env
->spr
[SPR_DAR
] = address
;
2082 #if defined(TARGET_PPC64)
2084 /* No match in segment table */
2085 env
->exception_index
= POWERPC_EXCP_DSEG
;
2086 env
->error_code
= 0;
2087 env
->spr
[SPR_DAR
] = address
;
2093 printf("%s: set exception to %d %02x\n", __func__
,
2094 env
->exception
, env
->error_code
);
2102 /*****************************************************************************/
2103 /* BATs management */
2104 #if !defined(FLUSH_ALL_TLBS)
2105 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2108 target_ulong base
, end
, page
;
2110 base
= BATu
& ~0x0001FFFF;
2111 end
= base
+ mask
+ 0x00020000;
2112 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2113 TARGET_FMT_lx
")\n", base
, end
, mask
);
2114 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2115 tlb_flush_page(env
, page
);
2117 LOG_BATS("Flush done\n");
2121 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2124 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2125 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2128 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2132 dump_store_bat(env
, 'I', 0, nr
, value
);
2133 if (env
->IBAT
[0][nr
] != value
) {
2134 mask
= (value
<< 15) & 0x0FFE0000UL
;
2135 #if !defined(FLUSH_ALL_TLBS)
2136 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2138 /* When storing valid upper BAT, mask BEPI and BRPN
2139 * and invalidate all TLBs covered by this BAT
2141 mask
= (value
<< 15) & 0x0FFE0000UL
;
2142 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2143 (value
& ~0x0001FFFFUL
& ~mask
);
2144 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2145 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2146 #if !defined(FLUSH_ALL_TLBS)
2147 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2154 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2156 dump_store_bat(env
, 'I', 1, nr
, value
);
2157 env
->IBAT
[1][nr
] = value
;
2160 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2164 dump_store_bat(env
, 'D', 0, nr
, value
);
2165 if (env
->DBAT
[0][nr
] != value
) {
2166 /* When storing valid upper BAT, mask BEPI and BRPN
2167 * and invalidate all TLBs covered by this BAT
2169 mask
= (value
<< 15) & 0x0FFE0000UL
;
2170 #if !defined(FLUSH_ALL_TLBS)
2171 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2173 mask
= (value
<< 15) & 0x0FFE0000UL
;
2174 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2175 (value
& ~0x0001FFFFUL
& ~mask
);
2176 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2177 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2178 #if !defined(FLUSH_ALL_TLBS)
2179 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2186 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2188 dump_store_bat(env
, 'D', 1, nr
, value
);
2189 env
->DBAT
[1][nr
] = value
;
2192 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2195 #if defined(FLUSH_ALL_TLBS)
2199 dump_store_bat(env
, 'I', 0, nr
, value
);
2200 if (env
->IBAT
[0][nr
] != value
) {
2201 #if defined(FLUSH_ALL_TLBS)
2204 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2205 if (env
->IBAT
[1][nr
] & 0x40) {
2206 /* Invalidate BAT only if it is valid */
2207 #if !defined(FLUSH_ALL_TLBS)
2208 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2213 /* When storing valid upper BAT, mask BEPI and BRPN
2214 * and invalidate all TLBs covered by this BAT
2216 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2217 (value
& ~0x0001FFFFUL
& ~mask
);
2218 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2219 if (env
->IBAT
[1][nr
] & 0x40) {
2220 #if !defined(FLUSH_ALL_TLBS)
2221 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2226 #if defined(FLUSH_ALL_TLBS)
2234 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2236 #if !defined(FLUSH_ALL_TLBS)
2242 dump_store_bat(env
, 'I', 1, nr
, value
);
2243 if (env
->IBAT
[1][nr
] != value
) {
2244 #if defined(FLUSH_ALL_TLBS)
2247 if (env
->IBAT
[1][nr
] & 0x40) {
2248 #if !defined(FLUSH_ALL_TLBS)
2249 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2250 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2256 #if !defined(FLUSH_ALL_TLBS)
2257 mask
= (value
<< 17) & 0x0FFE0000UL
;
2258 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2263 env
->IBAT
[1][nr
] = value
;
2264 env
->DBAT
[1][nr
] = value
;
2265 #if defined(FLUSH_ALL_TLBS)
2273 /*****************************************************************************/
2274 /* TLB management */
2275 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2277 switch (env
->mmu_model
) {
2278 case POWERPC_MMU_SOFT_6xx
:
2279 case POWERPC_MMU_SOFT_74xx
:
2280 ppc6xx_tlb_invalidate_all(env
);
2282 case POWERPC_MMU_SOFT_4xx
:
2283 case POWERPC_MMU_SOFT_4xx_Z
:
2284 ppc4xx_tlb_invalidate_all(env
);
2286 case POWERPC_MMU_REAL
:
2287 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2289 case POWERPC_MMU_MPC8xx
:
2291 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2293 case POWERPC_MMU_BOOKE
:
2296 case POWERPC_MMU_BOOKE206
:
2297 booke206_flush_tlb(env
, -1, 0);
2299 case POWERPC_MMU_32B
:
2300 case POWERPC_MMU_601
:
2301 #if defined(TARGET_PPC64)
2302 case POWERPC_MMU_64B
:
2303 case POWERPC_MMU_2_06
:
2304 case POWERPC_MMU_2_06d
:
2305 #endif /* defined(TARGET_PPC64) */
2310 cpu_abort(env
, "Unknown MMU model\n");
2315 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2317 #if !defined(FLUSH_ALL_TLBS)
2318 addr
&= TARGET_PAGE_MASK
;
2319 switch (env
->mmu_model
) {
2320 case POWERPC_MMU_SOFT_6xx
:
2321 case POWERPC_MMU_SOFT_74xx
:
2322 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2323 if (env
->id_tlbs
== 1) {
2324 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2327 case POWERPC_MMU_SOFT_4xx
:
2328 case POWERPC_MMU_SOFT_4xx_Z
:
2329 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2331 case POWERPC_MMU_REAL
:
2332 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2334 case POWERPC_MMU_MPC8xx
:
2336 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2338 case POWERPC_MMU_BOOKE
:
2340 cpu_abort(env
, "BookE MMU model is not implemented\n");
2342 case POWERPC_MMU_BOOKE206
:
2344 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2346 case POWERPC_MMU_32B
:
2347 case POWERPC_MMU_601
:
2348 /* tlbie invalidate TLBs for all segments */
2349 addr
&= ~((target_ulong
)-1ULL << 28);
2350 /* XXX: this case should be optimized,
2351 * giving a mask to tlb_flush_page
2353 tlb_flush_page(env
, addr
| (0x0 << 28));
2354 tlb_flush_page(env
, addr
| (0x1 << 28));
2355 tlb_flush_page(env
, addr
| (0x2 << 28));
2356 tlb_flush_page(env
, addr
| (0x3 << 28));
2357 tlb_flush_page(env
, addr
| (0x4 << 28));
2358 tlb_flush_page(env
, addr
| (0x5 << 28));
2359 tlb_flush_page(env
, addr
| (0x6 << 28));
2360 tlb_flush_page(env
, addr
| (0x7 << 28));
2361 tlb_flush_page(env
, addr
| (0x8 << 28));
2362 tlb_flush_page(env
, addr
| (0x9 << 28));
2363 tlb_flush_page(env
, addr
| (0xA << 28));
2364 tlb_flush_page(env
, addr
| (0xB << 28));
2365 tlb_flush_page(env
, addr
| (0xC << 28));
2366 tlb_flush_page(env
, addr
| (0xD << 28));
2367 tlb_flush_page(env
, addr
| (0xE << 28));
2368 tlb_flush_page(env
, addr
| (0xF << 28));
2370 #if defined(TARGET_PPC64)
2371 case POWERPC_MMU_64B
:
2372 case POWERPC_MMU_2_06
:
2373 case POWERPC_MMU_2_06d
:
2374 /* tlbie invalidate TLBs for all segments */
2375 /* XXX: given the fact that there are too many segments to invalidate,
2376 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2377 * we just invalidate all TLBs
2381 #endif /* defined(TARGET_PPC64) */
2384 cpu_abort(env
, "Unknown MMU model\n");
2388 ppc_tlb_invalidate_all(env
);
2392 /*****************************************************************************/
2393 /* Special registers manipulation */
2394 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2396 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2397 if (env
->spr
[SPR_SDR1
] != value
) {
2398 env
->spr
[SPR_SDR1
] = value
;
2399 #if defined(TARGET_PPC64)
2400 if (env
->mmu_model
& POWERPC_MMU_64
) {
2401 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2403 if (htabsize
> 28) {
2404 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2405 " stored in SDR1\n", htabsize
);
2408 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2409 env
->htab_base
= value
& SDR_64_HTABORG
;
2411 #endif /* defined(TARGET_PPC64) */
2413 /* FIXME: Should check for valid HTABMASK values */
2414 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2415 env
->htab_base
= value
& SDR_32_HTABORG
;
2421 /* Segment registers load and store */
2422 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2424 #if defined(TARGET_PPC64)
2425 if (env
->mmu_model
& POWERPC_MMU_64
) {
2430 return env
->sr
[sr_num
];
2433 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2435 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2436 (int)srnum
, value
, env
->sr
[srnum
]);
2437 #if defined(TARGET_PPC64)
2438 if (env
->mmu_model
& POWERPC_MMU_64
) {
2439 uint64_t rb
= 0, rs
= 0;
2442 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2443 /* Set the valid bit */
2446 rb
|= (uint32_t)srnum
;
2449 rs
|= (value
& 0xfffffff) << 12;
2451 rs
|= ((value
>> 27) & 0xf) << 8;
2453 ppc_store_slb(env
, rb
, rs
);
2456 if (env
->sr
[srnum
] != value
) {
2457 env
->sr
[srnum
] = value
;
2458 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2459 flusing the whole TLB. */
2460 #if !defined(FLUSH_ALL_TLBS) && 0
2462 target_ulong page
, end
;
2463 /* Invalidate 256 MB of virtual memory */
2464 page
= (16 << 20) * srnum
;
2465 end
= page
+ (16 << 20);
2466 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2467 tlb_flush_page(env
, page
);
2475 #endif /* !defined(CONFIG_USER_ONLY) */
2477 #if !defined(CONFIG_USER_ONLY)
2478 /* SLB management */
2479 #if defined(TARGET_PPC64)
2480 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2482 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2483 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2484 POWERPC_EXCP_INVAL
);
2488 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2490 target_ulong rt
= 0;
2492 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2493 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2494 POWERPC_EXCP_INVAL
);
2499 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2501 target_ulong rt
= 0;
2503 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2504 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2505 POWERPC_EXCP_INVAL
);
2509 #endif /* defined(TARGET_PPC64) */
2511 /* TLB management */
2512 void helper_tlbia(CPUPPCState
*env
)
2514 ppc_tlb_invalidate_all(env
);
2517 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2519 ppc_tlb_invalidate_one(env
, addr
);
2522 /* Software driven TLBs management */
2523 /* PowerPC 602/603 software TLB load instructions helpers */
2524 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2526 target_ulong RPN
, CMP
, EPN
;
2529 RPN
= env
->spr
[SPR_RPA
];
2531 CMP
= env
->spr
[SPR_ICMP
];
2532 EPN
= env
->spr
[SPR_IMISS
];
2534 CMP
= env
->spr
[SPR_DCMP
];
2535 EPN
= env
->spr
[SPR_DMISS
];
2537 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2538 (void)EPN
; /* avoid a compiler warning */
2539 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2540 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2542 /* Store this TLB */
2543 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2544 way
, is_code
, CMP
, RPN
);
2547 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2549 do_6xx_tlb(env
, EPN
, 0);
2552 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2554 do_6xx_tlb(env
, EPN
, 1);
2557 /* PowerPC 74xx software TLB load instructions helpers */
2558 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2560 target_ulong RPN
, CMP
, EPN
;
2563 RPN
= env
->spr
[SPR_PTELO
];
2564 CMP
= env
->spr
[SPR_PTEHI
];
2565 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2566 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2567 (void)EPN
; /* avoid a compiler warning */
2568 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2569 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2571 /* Store this TLB */
2572 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2573 way
, is_code
, CMP
, RPN
);
2576 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2578 do_74xx_tlb(env
, EPN
, 0);
2581 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2583 do_74xx_tlb(env
, EPN
, 1);
2586 /*****************************************************************************/
2587 /* PowerPC 601 specific instructions (POWER bridge) */
2589 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2593 target_ulong ret
= 0;
2595 /* We don't have to generate many instances of this instruction,
2596 * as rac is supervisor only.
2598 /* XXX: FIX THIS: Pretend we have no BAT */
2599 nb_BATs
= env
->nb_BATs
;
2601 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2604 env
->nb_BATs
= nb_BATs
;
2608 static inline target_ulong
booke_tlb_to_page_size(int size
)
2610 return 1024 << (2 * size
);
2613 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2617 switch (page_size
) {
2651 #if defined(TARGET_PPC64)
2652 case 0x000100000000ULL
:
2655 case 0x000400000000ULL
:
2658 case 0x001000000000ULL
:
2661 case 0x004000000000ULL
:
2664 case 0x010000000000ULL
:
2676 /* Helpers for 4xx TLB management */
2677 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2679 #define PPC4XX_TLBHI_V 0x00000040
2680 #define PPC4XX_TLBHI_E 0x00000020
2681 #define PPC4XX_TLBHI_SIZE_MIN 0
2682 #define PPC4XX_TLBHI_SIZE_MAX 7
2683 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2684 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2685 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2687 #define PPC4XX_TLBLO_EX 0x00000200
2688 #define PPC4XX_TLBLO_WR 0x00000100
2689 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2690 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2692 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2698 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2699 tlb
= &env
->tlb
.tlbe
[entry
];
2701 if (tlb
->prot
& PAGE_VALID
) {
2702 ret
|= PPC4XX_TLBHI_V
;
2704 size
= booke_page_size_to_tlb(tlb
->size
);
2705 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2706 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2708 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2709 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2713 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2718 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2719 tlb
= &env
->tlb
.tlbe
[entry
];
2721 if (tlb
->prot
& PAGE_EXEC
) {
2722 ret
|= PPC4XX_TLBLO_EX
;
2724 if (tlb
->prot
& PAGE_WRITE
) {
2725 ret
|= PPC4XX_TLBLO_WR
;
2730 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2734 target_ulong page
, end
;
2736 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2738 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2739 tlb
= &env
->tlb
.tlbe
[entry
];
2740 /* Invalidate previous TLB (if it's valid) */
2741 if (tlb
->prot
& PAGE_VALID
) {
2742 end
= tlb
->EPN
+ tlb
->size
;
2743 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2744 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2745 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2746 tlb_flush_page(env
, page
);
2749 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2750 & PPC4XX_TLBHI_SIZE_MASK
);
2751 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2752 * If this ever occurs, one should use the ppcemb target instead
2753 * of the ppc or ppc64 one
2755 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2756 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2757 "are not supported (%d)\n",
2758 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2760 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2761 if (val
& PPC4XX_TLBHI_V
) {
2762 tlb
->prot
|= PAGE_VALID
;
2763 if (val
& PPC4XX_TLBHI_E
) {
2764 /* XXX: TO BE FIXED */
2766 "Little-endian TLB entries are not supported by now\n");
2769 tlb
->prot
&= ~PAGE_VALID
;
2771 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2772 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2773 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2774 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2775 tlb
->prot
& PAGE_READ
? 'r' : '-',
2776 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2777 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2778 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2779 /* Invalidate new TLB (if valid) */
2780 if (tlb
->prot
& PAGE_VALID
) {
2781 end
= tlb
->EPN
+ tlb
->size
;
2782 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2783 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2784 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2785 tlb_flush_page(env
, page
);
2790 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2795 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2797 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2798 tlb
= &env
->tlb
.tlbe
[entry
];
2799 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2800 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2801 tlb
->prot
= PAGE_READ
;
2802 if (val
& PPC4XX_TLBLO_EX
) {
2803 tlb
->prot
|= PAGE_EXEC
;
2805 if (val
& PPC4XX_TLBLO_WR
) {
2806 tlb
->prot
|= PAGE_WRITE
;
2808 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2809 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2810 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2811 tlb
->prot
& PAGE_READ
? 'r' : '-',
2812 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2813 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2814 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2817 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2819 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2822 /* PowerPC 440 TLB management */
2823 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2827 target_ulong EPN
, RPN
, size
;
2830 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2831 __func__
, word
, (int)entry
, value
);
2834 tlb
= &env
->tlb
.tlbe
[entry
];
2837 /* Just here to please gcc */
2839 EPN
= value
& 0xFFFFFC00;
2840 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2844 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2845 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2850 tlb
->attr
|= (value
>> 8) & 1;
2851 if (value
& 0x200) {
2852 tlb
->prot
|= PAGE_VALID
;
2854 if (tlb
->prot
& PAGE_VALID
) {
2855 tlb
->prot
&= ~PAGE_VALID
;
2859 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2860 if (do_flush_tlbs
) {
2865 RPN
= value
& 0xFFFFFC0F;
2866 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2872 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2873 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2875 tlb
->prot
|= PAGE_READ
<< 4;
2878 tlb
->prot
|= PAGE_WRITE
<< 4;
2881 tlb
->prot
|= PAGE_EXEC
<< 4;
2884 tlb
->prot
|= PAGE_READ
;
2887 tlb
->prot
|= PAGE_WRITE
;
2890 tlb
->prot
|= PAGE_EXEC
;
2896 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2904 tlb
= &env
->tlb
.tlbe
[entry
];
2907 /* Just here to please gcc */
2910 size
= booke_page_size_to_tlb(tlb
->size
);
2911 if (size
< 0 || size
> 0xF) {
2915 if (tlb
->attr
& 0x1) {
2918 if (tlb
->prot
& PAGE_VALID
) {
2921 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2922 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2928 ret
= tlb
->attr
& ~0x1;
2929 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2932 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2935 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2938 if (tlb
->prot
& PAGE_READ
) {
2941 if (tlb
->prot
& PAGE_WRITE
) {
2944 if (tlb
->prot
& PAGE_EXEC
) {
2952 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2954 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2957 /* PowerPC BookE 2.06 TLB management */
2959 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2961 uint32_t tlbncfg
= 0;
2962 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2963 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2966 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2967 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2969 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2970 cpu_abort(env
, "we don't support HES yet\n");
2973 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2976 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2978 env
->spr
[pidn
] = pid
;
2979 /* changing PIDs mean we're in a different address space now */
2983 void helper_booke206_tlbwe(CPUPPCState
*env
)
2985 uint32_t tlbncfg
, tlbn
;
2987 uint32_t size_tlb
, size_ps
;
2991 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2992 case MAS0_WQ_ALWAYS
:
2993 /* good to go, write that entry */
2996 /* XXX check if reserved */
3001 case MAS0_WQ_CLR_RSRV
:
3002 /* XXX clear entry */
3005 /* no idea what to do */
3009 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3011 /* XXX we don't support direct LRAT setting yet */
3012 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3016 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3017 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3019 tlb
= booke206_cur_tlb(env
);
3022 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3023 POWERPC_EXCP_INVAL
|
3024 POWERPC_EXCP_INVAL_INVAL
);
3027 /* check that we support the targeted size */
3028 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3029 size_ps
= booke206_tlbnps(env
, tlbn
);
3030 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3031 !(size_ps
& (1 << size_tlb
))) {
3032 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3033 POWERPC_EXCP_INVAL
|
3034 POWERPC_EXCP_INVAL_INVAL
);
3038 cpu_abort(env
, "missing HV implementation\n");
3040 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3041 env
->spr
[SPR_BOOKE_MAS3
];
3042 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3045 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3046 /* force !AVAIL TLB entries to correct page size */
3047 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3048 /* XXX can be configured in MMUCSR0 */
3049 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3052 /* Make a mask from TLB size to discard invalid bits in EPN field */
3053 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3054 /* Add a mask for page attributes */
3055 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3058 /* Executing a tlbwe instruction in 32-bit mode will set
3059 * bits 0:31 of the TLB EPN field to zero.
3064 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3066 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3067 /* no IPROT supported by TLB */
3068 tlb
->mas1
&= ~MAS1_IPROT
;
3071 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3072 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3078 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3080 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3081 int way
= booke206_tlbm_to_way(env
, tlb
);
3083 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3084 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3085 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3087 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3088 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3089 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3090 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3093 void helper_booke206_tlbre(CPUPPCState
*env
)
3095 ppcmas_tlb_t
*tlb
= NULL
;
3097 tlb
= booke206_cur_tlb(env
);
3099 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3101 booke206_tlb_to_mas(env
, tlb
);
3105 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3107 ppcmas_tlb_t
*tlb
= NULL
;
3112 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3113 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3115 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3116 int ways
= booke206_tlb_ways(env
, i
);
3118 for (j
= 0; j
< ways
; j
++) {
3119 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3125 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3129 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3133 booke206_tlb_to_mas(env
, tlb
);
3138 /* no entry found, fill with defaults */
3139 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3140 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3141 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3142 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3143 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3145 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3146 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3149 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3152 /* next victim logic */
3153 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3155 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3156 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3159 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3163 int ways
= booke206_tlb_ways(env
, tlbn
);
3166 for (i
= 0; i
< ways
; i
++) {
3167 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3171 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3172 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3173 !(tlb
->mas1
& MAS1_IPROT
)) {
3174 tlb
->mas1
&= ~MAS1_VALID
;
3179 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3181 if (address
& 0x4) {
3182 /* flush all entries */
3183 if (address
& 0x8) {
3184 /* flush all of TLB1 */
3185 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3187 /* flush all of TLB0 */
3188 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3193 if (address
& 0x8) {
3194 /* flush TLB1 entries */
3195 booke206_invalidate_ea_tlb(env
, 1, address
);
3198 /* flush TLB0 entries */
3199 booke206_invalidate_ea_tlb(env
, 0, address
);
3200 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3204 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3206 /* XXX missing LPID handling */
3207 booke206_flush_tlb(env
, -1, 1);
3210 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3213 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3214 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3217 /* XXX missing LPID handling */
3218 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3219 tlb_size
= booke206_tlb_size(env
, i
);
3220 for (j
= 0; j
< tlb_size
; j
++) {
3221 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3222 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3223 tlb
[j
].mas1
&= ~MAS1_VALID
;
3226 tlb
+= booke206_tlb_size(env
, i
);
3231 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3235 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3236 int pid
= tid
>> MAS6_SPID_SHIFT
;
3237 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3238 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3239 /* XXX check for unsupported isize and raise an invalid opcode then */
3240 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3241 /* XXX implement MAV2 handling */
3244 /* XXX missing LPID handling */
3245 /* flush by pid and ea */
3246 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3247 int ways
= booke206_tlb_ways(env
, i
);
3249 for (j
= 0; j
< ways
; j
++) {
3250 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3254 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3255 (tlb
->mas1
& MAS1_IPROT
) ||
3256 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3257 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3260 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3261 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3264 /* XXX e500mc doesn't match SAS, but other cores might */
3265 tlb
->mas1
&= ~MAS1_VALID
;
3271 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3276 flags
|= BOOKE206_FLUSH_TLB1
;
3280 flags
|= BOOKE206_FLUSH_TLB0
;
3283 booke206_flush_tlb(env
, flags
, 1);