2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "sysemu/kvm.h"
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
102 static inline void pte64_invalidate(target_ulong
*pte0
)
104 *pte0
&= ~0x0000000000000001ULL
;
108 #define PTE_PTEM_MASK 0x7FFFFFBF
109 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
110 #if defined(TARGET_PPC64)
111 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
112 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
115 static inline int pp_check(int key
, int pp
, int nx
)
119 /* Compute access rights */
120 /* When pp is 3/7, the result is undefined. Set it to noaccess */
127 access
|= PAGE_WRITE
;
145 access
= PAGE_READ
| PAGE_WRITE
;
156 static inline int check_prot(int prot
, int rw
, int access_type
)
160 if (access_type
== ACCESS_CODE
) {
161 if (prot
& PAGE_EXEC
) {
167 if (prot
& PAGE_WRITE
) {
173 if (prot
& PAGE_READ
) {
183 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
184 target_ulong pte1
, int h
, int rw
, int type
)
186 target_ulong ptem
, mmask
;
187 int access
, ret
, pteh
, ptev
, pp
;
190 /* Check validity and table match */
191 #if defined(TARGET_PPC64)
193 ptev
= pte64_is_valid(pte0
);
194 pteh
= (pte0
>> 1) & 1;
198 ptev
= pte_is_valid(pte0
);
199 pteh
= (pte0
>> 6) & 1;
201 if (ptev
&& h
== pteh
) {
202 /* Check vsid & api */
203 #if defined(TARGET_PPC64)
205 ptem
= pte0
& PTE64_PTEM_MASK
;
206 mmask
= PTE64_CHECK_MASK
;
207 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
208 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
209 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
213 ptem
= pte0
& PTE_PTEM_MASK
;
214 mmask
= PTE_CHECK_MASK
;
215 pp
= pte1
& 0x00000003;
217 if (ptem
== ctx
->ptem
) {
218 if (ctx
->raddr
!= (hwaddr
)-1ULL) {
219 /* all matches should have equal RPN, WIMG & PP */
220 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
221 qemu_log("Bad RPN/WIMG/PP\n");
225 /* Compute access rights */
226 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
227 /* Keep the matching PTE informations */
230 ret
= check_prot(ctx
->prot
, rw
, type
);
233 LOG_MMU("PTE access granted !\n");
235 /* Access right violation */
236 LOG_MMU("PTE access rejected\n");
244 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
245 target_ulong pte1
, int h
, int rw
, int type
)
247 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
250 #if defined(TARGET_PPC64)
251 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
252 target_ulong pte1
, int h
, int rw
, int type
)
254 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
258 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
263 /* Update page flags */
264 if (!(*pte1p
& 0x00000100)) {
265 /* Update accessed flag */
266 *pte1p
|= 0x00000100;
269 if (!(*pte1p
& 0x00000080)) {
270 if (rw
== 1 && ret
== 0) {
271 /* Update changed flag */
272 *pte1p
|= 0x00000080;
275 /* Force page fault for first write access */
276 ctx
->prot
&= ~PAGE_WRITE
;
283 /* Software driven TLB helpers */
284 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
285 int way
, int is_code
)
289 /* Select TLB num in a way from address */
290 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
292 nr
+= env
->tlb_per_way
* way
;
293 /* 6xx have separate TLBs for instructions and data */
294 if (is_code
&& env
->id_tlbs
== 1) {
301 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
306 /* LOG_SWTLB("Invalidate all TLBs\n"); */
307 /* Invalidate all defined software TLB */
309 if (env
->id_tlbs
== 1) {
312 for (nr
= 0; nr
< max
; nr
++) {
313 tlb
= &env
->tlb
.tlb6
[nr
];
314 pte_invalidate(&tlb
->pte0
);
319 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
321 int is_code
, int match_epn
)
323 #if !defined(FLUSH_ALL_TLBS)
327 /* Invalidate ITLB + DTLB, all ways */
328 for (way
= 0; way
< env
->nb_ways
; way
++) {
329 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
330 tlb
= &env
->tlb
.tlb6
[nr
];
331 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
332 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
334 pte_invalidate(&tlb
->pte0
);
335 tlb_flush_page(env
, tlb
->EPN
);
339 /* XXX: PowerPC specification say this is valid as well */
340 ppc6xx_tlb_invalidate_all(env
);
344 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
345 target_ulong eaddr
, int is_code
)
347 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
350 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
351 int is_code
, target_ulong pte0
, target_ulong pte1
)
356 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
357 tlb
= &env
->tlb
.tlb6
[nr
];
358 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
359 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
360 /* Invalidate any pending reference in QEMU for this virtual address */
361 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
365 /* Store last way for LRU mechanism */
369 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
370 target_ulong eaddr
, int rw
, int access_type
)
377 ret
= -1; /* No TLB found */
378 for (way
= 0; way
< env
->nb_ways
; way
++) {
379 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
380 access_type
== ACCESS_CODE
? 1 : 0);
381 tlb
= &env
->tlb
.tlb6
[nr
];
382 /* This test "emulates" the PTE index match for hardware TLBs */
383 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
384 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
385 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
386 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
387 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
390 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
391 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
392 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
393 tlb
->EPN
, eaddr
, tlb
->pte1
,
394 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
395 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
397 /* TLB inconsistency */
400 /* Access violation */
410 /* XXX: we should go on looping to check all TLBs consistency
411 * but we can speed-up the whole thing as the
412 * result would be undefined if TLBs are not consistent.
421 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
422 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
423 /* Update page flags */
424 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
430 /* Perform BAT hit & translation */
431 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
432 int *validp
, int *protp
, target_ulong
*BATu
,
438 bl
= (*BATu
& 0x00001FFC) << 15;
441 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
442 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
444 pp
= *BATl
& 0x00000003;
446 prot
= PAGE_READ
| PAGE_EXEC
;
457 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
458 int *validp
, int *protp
,
459 target_ulong
*BATu
, target_ulong
*BATl
)
462 int key
, pp
, valid
, prot
;
464 bl
= (*BATl
& 0x0000003F) << 17;
465 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
466 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
468 valid
= (*BATl
>> 6) & 1;
470 pp
= *BATu
& 0x00000003;
472 key
= (*BATu
>> 3) & 1;
474 key
= (*BATu
>> 2) & 1;
476 prot
= pp_check(key
, pp
, 0);
483 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
484 target_ulong
virtual, int rw
, int type
)
486 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
487 target_ulong BEPIl
, BEPIu
, bl
;
491 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
492 type
== ACCESS_CODE
? 'I' : 'D', virtual);
495 BATlt
= env
->IBAT
[1];
496 BATut
= env
->IBAT
[0];
499 BATlt
= env
->DBAT
[1];
500 BATut
= env
->DBAT
[0];
503 for (i
= 0; i
< env
->nb_BATs
; i
++) {
506 BEPIu
= *BATu
& 0xF0000000;
507 BEPIl
= *BATu
& 0x0FFE0000;
508 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
509 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
511 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
513 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
514 " BATl " TARGET_FMT_lx
"\n", __func__
,
515 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
516 if ((virtual & 0xF0000000) == BEPIu
&&
517 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
520 /* Get physical address */
521 ctx
->raddr
= (*BATl
& 0xF0000000) |
522 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
523 (virtual & 0x0001F000);
524 /* Compute access rights */
526 ret
= check_prot(ctx
->prot
, rw
, type
);
528 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
529 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
530 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
537 #if defined(DEBUG_BATS)
538 if (qemu_log_enabled()) {
539 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
540 for (i
= 0; i
< 4; i
++) {
543 BEPIu
= *BATu
& 0xF0000000;
544 BEPIl
= *BATu
& 0x0FFE0000;
545 bl
= (*BATu
& 0x00001FFC) << 15;
546 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
547 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
548 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
549 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
550 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
559 static inline hwaddr
get_pteg_offset(CPUPPCState
*env
,
563 return (hash
* pte_size
* 8) & env
->htab_mask
;
566 /* PTE table lookup */
567 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
568 int rw
, int type
, int target_page_bits
)
571 target_ulong pte0
, pte1
;
575 ret
= -1; /* No entry found */
576 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
577 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
578 for (i
= 0; i
< 8; i
++) {
579 #if defined(TARGET_PPC64)
581 if (env
->external_htab
) {
582 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
583 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
585 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
586 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
589 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
590 LOG_MMU("Load pte from %016" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
591 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
592 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
593 (int)((pte0
>> 1) & 1), ctx
->ptem
);
597 if (env
->external_htab
) {
598 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
599 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
601 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
602 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
604 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
605 LOG_MMU("Load pte from %08" HWADDR_PRIx
" => " TARGET_FMT_lx
" "
606 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
607 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
608 (int)((pte0
>> 6) & 1), ctx
->ptem
);
612 /* PTE inconsistency */
615 /* Access violation */
625 /* XXX: we should go on looping to check all PTEs consistency
626 * but if we can speed-up the whole thing as the
627 * result would be undefined if PTEs are not consistent.
636 LOG_MMU("found PTE at addr %08" HWADDR_PRIx
" prot=%01x ret=%d\n",
637 ctx
->raddr
, ctx
->prot
, ret
);
638 /* Update page flags */
640 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
641 #if defined(TARGET_PPC64)
643 if (env
->external_htab
) {
644 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
647 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
648 (good
* 16) + 8, pte1
);
653 if (env
->external_htab
) {
654 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
657 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
658 (good
* 8) + 4, pte1
);
664 /* We have a TLB that saves 4K pages, so let's
665 * split a huge page to 4k chunks */
666 if (target_page_bits
!= TARGET_PAGE_BITS
) {
667 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
673 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
674 int type
, int target_page_bits
)
676 #if defined(TARGET_PPC64)
677 if (env
->mmu_model
& POWERPC_MMU_64
) {
678 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
682 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
685 #if defined(TARGET_PPC64)
686 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
688 uint64_t esid_256M
, esid_1T
;
691 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
693 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
694 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
696 for (n
= 0; n
< env
->slb_nr
; n
++) {
697 ppc_slb_t
*slb
= &env
->slb
[n
];
699 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
700 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
701 /* We check for 1T matches on all MMUs here - if the MMU
702 * doesn't have 1T segment support, we will have prevented 1T
703 * entries from being inserted in the slbmte code. */
704 if (((slb
->esid
== esid_256M
) &&
705 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
706 || ((slb
->esid
== esid_1T
) &&
707 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
715 /*****************************************************************************/
718 void helper_slbia(CPUPPCState
*env
)
720 int n
, do_invalidate
;
723 /* XXX: Warning: slbia never invalidates the first segment */
724 for (n
= 1; n
< env
->slb_nr
; n
++) {
725 ppc_slb_t
*slb
= &env
->slb
[n
];
727 if (slb
->esid
& SLB_ESID_V
) {
728 slb
->esid
&= ~SLB_ESID_V
;
729 /* XXX: given the fact that segment size is 256 MB or 1TB,
730 * and we still don't have a tlb_flush_mask(env, n, mask)
731 * in QEMU, we just invalidate all TLBs
741 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
745 slb
= slb_lookup(env
, addr
);
750 if (slb
->esid
& SLB_ESID_V
) {
751 slb
->esid
&= ~SLB_ESID_V
;
753 /* XXX: given the fact that segment size is 256 MB or 1TB,
754 * and we still don't have a tlb_flush_mask(env, n, mask)
755 * in QEMU, we just invalidate all TLBs
761 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
763 int slot
= rb
& 0xfff;
764 ppc_slb_t
*slb
= &env
->slb
[slot
];
766 if (rb
& (0x1000 - env
->slb_nr
)) {
767 return -1; /* Reserved bits set or slot too high */
769 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
770 return -1; /* Bad segment size */
772 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
773 return -1; /* 1T segment on MMU that doesn't support it */
776 /* Mask out the slot number as we store the entry */
777 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
780 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
781 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
782 slb
->esid
, slb
->vsid
);
787 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
790 int slot
= rb
& 0xfff;
791 ppc_slb_t
*slb
= &env
->slb
[slot
];
793 if (slot
>= env
->slb_nr
) {
801 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
804 int slot
= rb
& 0xfff;
805 ppc_slb_t
*slb
= &env
->slb
[slot
];
807 if (slot
>= env
->slb_nr
) {
814 #endif /* defined(TARGET_PPC64) */
816 /* Perform segment based translation */
817 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
818 target_ulong eaddr
, int rw
, int type
)
822 int ds
, pr
, target_page_bits
;
827 #if defined(TARGET_PPC64)
828 if (env
->mmu_model
& POWERPC_MMU_64
) {
830 target_ulong pageaddr
;
833 LOG_MMU("Check SLBs\n");
834 slb
= slb_lookup(env
, eaddr
);
839 if (slb
->vsid
& SLB_VSID_B
) {
840 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
843 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
847 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
848 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
849 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
850 : (slb
->vsid
& SLB_VSID_KS
));
852 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
854 pageaddr
= eaddr
& ((1ULL << segment_bits
)
855 - (1ULL << target_page_bits
));
856 if (slb
->vsid
& SLB_VSID_B
) {
857 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
859 hash
= vsid
^ (pageaddr
>> target_page_bits
);
861 /* Only 5 bits of the page index are used in the AVPN */
862 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
863 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
865 #endif /* defined(TARGET_PPC64) */
867 target_ulong sr
, pgidx
;
869 sr
= env
->sr
[eaddr
>> 28];
870 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
871 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
872 ds
= sr
& 0x80000000 ? 1 : 0;
873 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
874 vsid
= sr
& 0x00FFFFFF;
875 target_page_bits
= TARGET_PAGE_BITS
;
876 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
877 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
878 " ir=%d dr=%d pr=%d %d t=%d\n",
879 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
880 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
881 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
883 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
885 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
886 ctx
->key
, ds
, ctx
->nx
, vsid
);
889 /* Check if instruction fetch is allowed, if needed */
890 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
891 /* Page address translation */
892 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
893 " hash " TARGET_FMT_plx
"\n",
894 env
->htab_base
, env
->htab_mask
, hash
);
896 ctx
->hash
[1] = ~hash
;
898 /* Initialize real address with an invalid value */
899 ctx
->raddr
= (hwaddr
)-1ULL;
900 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
901 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
902 /* Software TLB search */
903 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
905 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
906 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
907 " hash=" TARGET_FMT_plx
"\n",
908 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
910 /* Primary table lookup */
911 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
913 /* Secondary table lookup */
914 if (eaddr
!= 0xEFFFFFFF) {
915 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
916 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
917 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
918 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
920 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
927 #if defined(DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
930 uint32_t a0
, a1
, a2
, a3
;
932 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
933 "\n", sdr
, mask
+ 0x80);
934 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
936 a0
= ldl_phys(curaddr
);
937 a1
= ldl_phys(curaddr
+ 4);
938 a2
= ldl_phys(curaddr
+ 8);
939 a3
= ldl_phys(curaddr
+ 12);
940 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
941 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
942 curaddr
, a0
, a1
, a2
, a3
);
948 LOG_MMU("No access allowed\n");
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr
= env
->sr
[eaddr
>> 28];
961 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
967 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
973 /* Integer load/store : only access allowed */
976 /* No code fetch is allowed in direct-store areas */
979 /* Floating point load/store */
982 /* lwarx, ldarx or srwcx. */
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
999 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1013 target_ulong address
, uint32_t pid
, int ext
,
1018 /* Check valid flag */
1019 if (!(tlb
->prot
& PAGE_VALID
)) {
1022 mask
= ~(tlb
->size
- 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1025 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1027 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1030 /* Check effective address */
1031 if ((address
& mask
) != tlb
->EPN
) {
1034 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1036 /* Extend the physical address to 36 bits */
1037 *raddrp
|= (uint64_t)(tlb
->RPN
& 0xF) << 32;
1043 /* Generic TLB search function for PowerPC embedded implementations */
1044 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1051 /* Default return value is no match */
1053 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1054 tlb
= &env
->tlb
.tlbe
[i
];
1055 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1064 /* Helpers specific to PowerPC 40x implementations */
1065 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1070 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1071 tlb
= &env
->tlb
.tlbe
[i
];
1072 tlb
->prot
&= ~PAGE_VALID
;
1077 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1078 target_ulong eaddr
, uint32_t pid
)
1080 #if !defined(FLUSH_ALL_TLBS)
1083 target_ulong page
, end
;
1086 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1087 tlb
= &env
->tlb
.tlbe
[i
];
1088 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1089 end
= tlb
->EPN
+ tlb
->size
;
1090 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1091 tlb_flush_page(env
, page
);
1093 tlb
->prot
&= ~PAGE_VALID
;
1098 ppc4xx_tlb_invalidate_all(env
);
1102 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1103 target_ulong address
, int rw
,
1108 int i
, ret
, zsel
, zpr
, pr
;
1111 raddr
= (hwaddr
)-1ULL;
1113 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1114 tlb
= &env
->tlb
.tlbe
[i
];
1115 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1116 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1119 zsel
= (tlb
->attr
>> 4) & 0xF;
1120 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1121 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1122 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1123 /* Check execute enable bit */
1131 /* All accesses granted */
1132 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1137 /* Raise Zone protection fault. */
1138 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1146 /* Check from TLB entry */
1147 ctx
->prot
= tlb
->prot
;
1148 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1150 env
->spr
[SPR_40x_ESR
] = 0;
1156 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1157 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1162 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1163 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1168 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1170 /* XXX: TO BE FIXED */
1171 if (val
!= 0x00000000) {
1172 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1174 env
->spr
[SPR_405_SLER
] = val
;
1177 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1178 hwaddr
*raddr
, int *prot
,
1179 target_ulong address
, int rw
,
1180 int access_type
, int i
)
1184 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1185 env
->spr
[SPR_BOOKE_PID
],
1186 !env
->nb_pids
, i
) >= 0) {
1190 if (env
->spr
[SPR_BOOKE_PID1
] &&
1191 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1192 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1196 if (env
->spr
[SPR_BOOKE_PID2
] &&
1197 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1198 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1202 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1208 prot2
= tlb
->prot
& 0xF;
1210 prot2
= (tlb
->prot
>> 4) & 0xF;
1213 /* Check the address space */
1214 if (access_type
== ACCESS_CODE
) {
1215 if (msr_ir
!= (tlb
->attr
& 1)) {
1216 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1221 if (prot2
& PAGE_EXEC
) {
1222 LOG_SWTLB("%s: good TLB!\n", __func__
);
1226 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1229 if (msr_dr
!= (tlb
->attr
& 1)) {
1230 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1235 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1236 LOG_SWTLB("%s: found TLB!\n", __func__
);
1240 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1247 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1248 target_ulong address
, int rw
,
1256 raddr
= (hwaddr
)-1ULL;
1257 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1258 tlb
= &env
->tlb
.tlbe
[i
];
1259 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1268 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1269 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1272 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1273 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1279 static void booke206_flush_tlb(CPUPPCState
*env
, int flags
,
1280 const int check_iprot
)
1284 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1286 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1287 if (flags
& (1 << i
)) {
1288 tlb_size
= booke206_tlb_size(env
, i
);
1289 for (j
= 0; j
< tlb_size
; j
++) {
1290 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1291 tlb
[j
].mas1
&= ~MAS1_VALID
;
1295 tlb
+= booke206_tlb_size(env
, i
);
1301 static hwaddr
booke206_tlb_to_page_size(CPUPPCState
*env
,
1306 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1308 return 1024ULL << tlbm_size
;
1311 /* TLB check function for MAS based SoftTLBs */
1312 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1314 target_ulong address
, uint32_t pid
)
1319 /* Check valid flag */
1320 if (!(tlb
->mas1
& MAS1_VALID
)) {
1324 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1325 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1326 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1327 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1331 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1332 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1336 /* Check effective address */
1337 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1342 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1348 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1349 hwaddr
*raddr
, int *prot
,
1350 target_ulong address
, int rw
,
1356 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1357 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1361 if (env
->spr
[SPR_BOOKE_PID1
] &&
1362 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1363 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1367 if (env
->spr
[SPR_BOOKE_PID2
] &&
1368 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1369 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1373 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1379 if (tlb
->mas7_3
& MAS3_UR
) {
1382 if (tlb
->mas7_3
& MAS3_UW
) {
1383 prot2
|= PAGE_WRITE
;
1385 if (tlb
->mas7_3
& MAS3_UX
) {
1389 if (tlb
->mas7_3
& MAS3_SR
) {
1392 if (tlb
->mas7_3
& MAS3_SW
) {
1393 prot2
|= PAGE_WRITE
;
1395 if (tlb
->mas7_3
& MAS3_SX
) {
1400 /* Check the address space and permissions */
1401 if (access_type
== ACCESS_CODE
) {
1402 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1403 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1408 if (prot2
& PAGE_EXEC
) {
1409 LOG_SWTLB("%s: good TLB!\n", __func__
);
1413 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1416 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1417 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1422 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1423 LOG_SWTLB("%s: found TLB!\n", __func__
);
1427 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1434 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1435 target_ulong address
, int rw
,
1443 raddr
= (hwaddr
)-1ULL;
1445 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1446 int ways
= booke206_tlb_ways(env
, i
);
1448 for (j
= 0; j
< ways
; j
++) {
1449 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1453 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1465 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1466 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1469 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1470 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1476 static const char *book3e_tsize_to_str
[32] = {
1477 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1478 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1479 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1483 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1486 ppcemb_tlb_t
*entry
;
1489 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1490 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1494 cpu_fprintf(f
, "\nTLB:\n");
1495 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1498 entry
= &env
->tlb
.tlbe
[0];
1499 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1502 uint64_t size
= (uint64_t)entry
->size
;
1505 /* Check valid flag */
1506 if (!(entry
->prot
& PAGE_VALID
)) {
1510 mask
= ~(entry
->size
- 1);
1511 ea
= entry
->EPN
& mask
;
1512 pa
= entry
->RPN
& mask
;
1513 /* Extend the physical address to 36 bits */
1514 pa
|= (hwaddr
)(entry
->RPN
& 0xF) << 32;
1517 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1519 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1521 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1522 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1523 entry
->prot
, entry
->attr
);
1528 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1529 CPUPPCState
*env
, int tlbn
, int offset
,
1532 ppcmas_tlb_t
*entry
;
1535 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1536 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1537 " URWX WIMGE U0123\n");
1539 entry
= &env
->tlb
.tlbm
[offset
];
1540 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1541 hwaddr ea
, pa
, size
;
1544 if (!(entry
->mas1
& MAS1_VALID
)) {
1548 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1549 size
= 1024ULL << tsize
;
1550 ea
= entry
->mas2
& ~(size
- 1);
1551 pa
= entry
->mas7_3
& ~(size
- 1);
1553 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1554 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1555 (uint64_t)ea
, (uint64_t)pa
,
1556 book3e_tsize_to_str
[tsize
],
1557 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1558 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1559 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1560 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1561 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1562 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1563 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1564 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1565 entry
->mas2
& MAS2_W
? 'W' : '-',
1566 entry
->mas2
& MAS2_I
? 'I' : '-',
1567 entry
->mas2
& MAS2_M
? 'M' : '-',
1568 entry
->mas2
& MAS2_G
? 'G' : '-',
1569 entry
->mas2
& MAS2_E
? 'E' : '-',
1570 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1571 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1572 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1573 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1577 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1583 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1584 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1588 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1589 int size
= booke206_tlb_size(env
, i
);
1595 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1600 #if defined(TARGET_PPC64)
1601 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1605 uint64_t slbe
, slbv
;
1607 cpu_synchronize_state(env
);
1609 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1610 for (i
= 0; i
< env
->slb_nr
; i
++) {
1611 slbe
= env
->slb
[i
].esid
;
1612 slbv
= env
->slb
[i
].vsid
;
1613 if (slbe
== 0 && slbv
== 0) {
1616 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1622 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1624 switch (env
->mmu_model
) {
1625 case POWERPC_MMU_BOOKE
:
1626 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1628 case POWERPC_MMU_BOOKE206
:
1629 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1631 #if defined(TARGET_PPC64)
1632 case POWERPC_MMU_64B
:
1633 case POWERPC_MMU_2_06
:
1634 case POWERPC_MMU_2_06d
:
1635 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1639 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1643 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1644 target_ulong eaddr
, int rw
)
1649 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1651 switch (env
->mmu_model
) {
1652 case POWERPC_MMU_32B
:
1653 case POWERPC_MMU_601
:
1654 case POWERPC_MMU_SOFT_6xx
:
1655 case POWERPC_MMU_SOFT_74xx
:
1656 case POWERPC_MMU_SOFT_4xx
:
1657 case POWERPC_MMU_REAL
:
1658 case POWERPC_MMU_BOOKE
:
1659 ctx
->prot
|= PAGE_WRITE
;
1661 #if defined(TARGET_PPC64)
1662 case POWERPC_MMU_64B
:
1663 case POWERPC_MMU_2_06
:
1664 case POWERPC_MMU_2_06d
:
1665 /* Real address are 60 bits long */
1666 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1667 ctx
->prot
|= PAGE_WRITE
;
1670 case POWERPC_MMU_SOFT_4xx_Z
:
1671 if (unlikely(msr_pe
!= 0)) {
1672 /* 403 family add some particular protections,
1673 * using PBL/PBU registers for accesses with no translation.
1676 /* Check PLB validity */
1677 (env
->pb
[0] < env
->pb
[1] &&
1678 /* and address in plb area */
1679 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1680 (env
->pb
[2] < env
->pb
[3] &&
1681 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1682 if (in_plb
^ msr_px
) {
1683 /* Access in protected area */
1685 /* Access is not allowed */
1689 /* Read-write access is allowed */
1690 ctx
->prot
|= PAGE_WRITE
;
1694 case POWERPC_MMU_MPC8xx
:
1696 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1698 case POWERPC_MMU_BOOKE206
:
1699 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1702 cpu_abort(env
, "Unknown or invalid MMU model\n");
1709 static int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1710 target_ulong eaddr
, int rw
, int access_type
)
1715 qemu_log("%s\n", __func__
);
1717 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1718 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1719 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1720 /* The BookE MMU always performs address translation. The
1721 IS and DS bits only affect the address space. */
1722 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1724 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1725 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1728 /* No address translation. */
1729 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1733 switch (env
->mmu_model
) {
1734 case POWERPC_MMU_32B
:
1735 case POWERPC_MMU_601
:
1736 case POWERPC_MMU_SOFT_6xx
:
1737 case POWERPC_MMU_SOFT_74xx
:
1738 /* Try to find a BAT */
1739 if (env
->nb_BATs
!= 0) {
1740 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1742 #if defined(TARGET_PPC64)
1743 case POWERPC_MMU_64B
:
1744 case POWERPC_MMU_2_06
:
1745 case POWERPC_MMU_2_06d
:
1748 /* We didn't match any BAT entry or don't have BATs */
1749 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1752 case POWERPC_MMU_SOFT_4xx
:
1753 case POWERPC_MMU_SOFT_4xx_Z
:
1754 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1757 case POWERPC_MMU_BOOKE
:
1758 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1761 case POWERPC_MMU_BOOKE206
:
1762 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1765 case POWERPC_MMU_MPC8xx
:
1767 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1769 case POWERPC_MMU_REAL
:
1770 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1773 cpu_abort(env
, "Unknown or invalid MMU model\n");
1778 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1779 __func__
, eaddr
, ret
, ctx
->raddr
);
1785 hwaddr
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1789 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1793 return ctx
.raddr
& TARGET_PAGE_MASK
;
1796 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1799 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1800 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1801 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1802 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1803 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1804 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1807 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1808 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1809 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1812 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1813 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1815 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1816 case MAS4_TIDSELD_PID0
:
1817 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1819 case MAS4_TIDSELD_PID1
:
1820 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1822 case MAS4_TIDSELD_PID2
:
1823 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1827 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1829 /* next victim logic */
1830 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1832 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1833 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1836 /* Perform address translation */
1837 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1847 access_type
= ACCESS_CODE
;
1850 access_type
= env
->access_type
;
1852 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1854 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1855 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1856 mmu_idx
, TARGET_PAGE_SIZE
);
1858 } else if (ret
< 0) {
1860 if (access_type
== ACCESS_CODE
) {
1863 /* No matches in page tables or TLB */
1864 switch (env
->mmu_model
) {
1865 case POWERPC_MMU_SOFT_6xx
:
1866 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1867 env
->error_code
= 1 << 18;
1868 env
->spr
[SPR_IMISS
] = address
;
1869 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1871 case POWERPC_MMU_SOFT_74xx
:
1872 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1874 case POWERPC_MMU_SOFT_4xx
:
1875 case POWERPC_MMU_SOFT_4xx_Z
:
1876 env
->exception_index
= POWERPC_EXCP_ITLB
;
1877 env
->error_code
= 0;
1878 env
->spr
[SPR_40x_DEAR
] = address
;
1879 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1881 case POWERPC_MMU_32B
:
1882 case POWERPC_MMU_601
:
1883 #if defined(TARGET_PPC64)
1884 case POWERPC_MMU_64B
:
1885 case POWERPC_MMU_2_06
:
1886 case POWERPC_MMU_2_06d
:
1888 env
->exception_index
= POWERPC_EXCP_ISI
;
1889 env
->error_code
= 0x40000000;
1891 case POWERPC_MMU_BOOKE206
:
1892 booke206_update_mas_tlb_miss(env
, address
, rw
);
1894 case POWERPC_MMU_BOOKE
:
1895 env
->exception_index
= POWERPC_EXCP_ITLB
;
1896 env
->error_code
= 0;
1897 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1899 case POWERPC_MMU_MPC8xx
:
1901 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1903 case POWERPC_MMU_REAL
:
1904 cpu_abort(env
, "PowerPC in real mode should never raise "
1905 "any MMU exceptions\n");
1908 cpu_abort(env
, "Unknown or invalid MMU model\n");
1913 /* Access rights violation */
1914 env
->exception_index
= POWERPC_EXCP_ISI
;
1915 env
->error_code
= 0x08000000;
1918 /* No execute protection violation */
1919 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1920 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1921 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1923 env
->exception_index
= POWERPC_EXCP_ISI
;
1924 env
->error_code
= 0x10000000;
1927 /* Direct store exception */
1928 /* No code fetch is allowed in direct-store areas */
1929 env
->exception_index
= POWERPC_EXCP_ISI
;
1930 env
->error_code
= 0x10000000;
1932 #if defined(TARGET_PPC64)
1934 /* No match in segment table */
1935 env
->exception_index
= POWERPC_EXCP_ISEG
;
1936 env
->error_code
= 0;
1943 /* No matches in page tables or TLB */
1944 switch (env
->mmu_model
) {
1945 case POWERPC_MMU_SOFT_6xx
:
1947 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1948 env
->error_code
= 1 << 16;
1950 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1951 env
->error_code
= 0;
1953 env
->spr
[SPR_DMISS
] = address
;
1954 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1956 env
->error_code
|= ctx
.key
<< 19;
1957 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1958 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1959 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1960 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1962 case POWERPC_MMU_SOFT_74xx
:
1964 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1966 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1969 /* Implement LRU algorithm */
1970 env
->error_code
= ctx
.key
<< 19;
1971 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1972 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1973 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1975 case POWERPC_MMU_SOFT_4xx
:
1976 case POWERPC_MMU_SOFT_4xx_Z
:
1977 env
->exception_index
= POWERPC_EXCP_DTLB
;
1978 env
->error_code
= 0;
1979 env
->spr
[SPR_40x_DEAR
] = address
;
1981 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1983 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1986 case POWERPC_MMU_32B
:
1987 case POWERPC_MMU_601
:
1988 #if defined(TARGET_PPC64)
1989 case POWERPC_MMU_64B
:
1990 case POWERPC_MMU_2_06
:
1991 case POWERPC_MMU_2_06d
:
1993 env
->exception_index
= POWERPC_EXCP_DSI
;
1994 env
->error_code
= 0;
1995 env
->spr
[SPR_DAR
] = address
;
1997 env
->spr
[SPR_DSISR
] = 0x42000000;
1999 env
->spr
[SPR_DSISR
] = 0x40000000;
2002 case POWERPC_MMU_MPC8xx
:
2004 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2006 case POWERPC_MMU_BOOKE206
:
2007 booke206_update_mas_tlb_miss(env
, address
, rw
);
2009 case POWERPC_MMU_BOOKE
:
2010 env
->exception_index
= POWERPC_EXCP_DTLB
;
2011 env
->error_code
= 0;
2012 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2013 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2015 case POWERPC_MMU_REAL
:
2016 cpu_abort(env
, "PowerPC in real mode should never raise "
2017 "any MMU exceptions\n");
2020 cpu_abort(env
, "Unknown or invalid MMU model\n");
2025 /* Access rights violation */
2026 env
->exception_index
= POWERPC_EXCP_DSI
;
2027 env
->error_code
= 0;
2028 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2029 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2030 env
->spr
[SPR_40x_DEAR
] = address
;
2032 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2034 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2035 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2036 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2037 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2039 env
->spr
[SPR_DAR
] = address
;
2041 env
->spr
[SPR_DSISR
] = 0x0A000000;
2043 env
->spr
[SPR_DSISR
] = 0x08000000;
2048 /* Direct store exception */
2049 switch (access_type
) {
2051 /* Floating point load/store */
2052 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2053 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2054 env
->spr
[SPR_DAR
] = address
;
2057 /* lwarx, ldarx or stwcx. */
2058 env
->exception_index
= POWERPC_EXCP_DSI
;
2059 env
->error_code
= 0;
2060 env
->spr
[SPR_DAR
] = address
;
2062 env
->spr
[SPR_DSISR
] = 0x06000000;
2064 env
->spr
[SPR_DSISR
] = 0x04000000;
2068 /* eciwx or ecowx */
2069 env
->exception_index
= POWERPC_EXCP_DSI
;
2070 env
->error_code
= 0;
2071 env
->spr
[SPR_DAR
] = address
;
2073 env
->spr
[SPR_DSISR
] = 0x06100000;
2075 env
->spr
[SPR_DSISR
] = 0x04100000;
2079 printf("DSI: invalid exception (%d)\n", ret
);
2080 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2082 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2083 env
->spr
[SPR_DAR
] = address
;
2087 #if defined(TARGET_PPC64)
2089 /* No match in segment table */
2090 env
->exception_index
= POWERPC_EXCP_DSEG
;
2091 env
->error_code
= 0;
2092 env
->spr
[SPR_DAR
] = address
;
2098 printf("%s: set exception to %d %02x\n", __func__
,
2099 env
->exception
, env
->error_code
);
2107 /*****************************************************************************/
2108 /* BATs management */
2109 #if !defined(FLUSH_ALL_TLBS)
2110 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2113 target_ulong base
, end
, page
;
2115 base
= BATu
& ~0x0001FFFF;
2116 end
= base
+ mask
+ 0x00020000;
2117 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2118 TARGET_FMT_lx
")\n", base
, end
, mask
);
2119 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2120 tlb_flush_page(env
, page
);
2122 LOG_BATS("Flush done\n");
2126 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2129 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2130 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2133 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2137 dump_store_bat(env
, 'I', 0, nr
, value
);
2138 if (env
->IBAT
[0][nr
] != value
) {
2139 mask
= (value
<< 15) & 0x0FFE0000UL
;
2140 #if !defined(FLUSH_ALL_TLBS)
2141 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2143 /* When storing valid upper BAT, mask BEPI and BRPN
2144 * and invalidate all TLBs covered by this BAT
2146 mask
= (value
<< 15) & 0x0FFE0000UL
;
2147 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2148 (value
& ~0x0001FFFFUL
& ~mask
);
2149 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2150 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2151 #if !defined(FLUSH_ALL_TLBS)
2152 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2159 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2161 dump_store_bat(env
, 'I', 1, nr
, value
);
2162 env
->IBAT
[1][nr
] = value
;
2165 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2169 dump_store_bat(env
, 'D', 0, nr
, value
);
2170 if (env
->DBAT
[0][nr
] != value
) {
2171 /* When storing valid upper BAT, mask BEPI and BRPN
2172 * and invalidate all TLBs covered by this BAT
2174 mask
= (value
<< 15) & 0x0FFE0000UL
;
2175 #if !defined(FLUSH_ALL_TLBS)
2176 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2178 mask
= (value
<< 15) & 0x0FFE0000UL
;
2179 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2180 (value
& ~0x0001FFFFUL
& ~mask
);
2181 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2182 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2183 #if !defined(FLUSH_ALL_TLBS)
2184 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2191 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2193 dump_store_bat(env
, 'D', 1, nr
, value
);
2194 env
->DBAT
[1][nr
] = value
;
2197 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2200 #if defined(FLUSH_ALL_TLBS)
2204 dump_store_bat(env
, 'I', 0, nr
, value
);
2205 if (env
->IBAT
[0][nr
] != value
) {
2206 #if defined(FLUSH_ALL_TLBS)
2209 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2210 if (env
->IBAT
[1][nr
] & 0x40) {
2211 /* Invalidate BAT only if it is valid */
2212 #if !defined(FLUSH_ALL_TLBS)
2213 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2218 /* When storing valid upper BAT, mask BEPI and BRPN
2219 * and invalidate all TLBs covered by this BAT
2221 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2222 (value
& ~0x0001FFFFUL
& ~mask
);
2223 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2224 if (env
->IBAT
[1][nr
] & 0x40) {
2225 #if !defined(FLUSH_ALL_TLBS)
2226 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2231 #if defined(FLUSH_ALL_TLBS)
2239 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2241 #if !defined(FLUSH_ALL_TLBS)
2247 dump_store_bat(env
, 'I', 1, nr
, value
);
2248 if (env
->IBAT
[1][nr
] != value
) {
2249 #if defined(FLUSH_ALL_TLBS)
2252 if (env
->IBAT
[1][nr
] & 0x40) {
2253 #if !defined(FLUSH_ALL_TLBS)
2254 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2255 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2261 #if !defined(FLUSH_ALL_TLBS)
2262 mask
= (value
<< 17) & 0x0FFE0000UL
;
2263 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2268 env
->IBAT
[1][nr
] = value
;
2269 env
->DBAT
[1][nr
] = value
;
2270 #if defined(FLUSH_ALL_TLBS)
2278 /*****************************************************************************/
2279 /* TLB management */
2280 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2282 switch (env
->mmu_model
) {
2283 case POWERPC_MMU_SOFT_6xx
:
2284 case POWERPC_MMU_SOFT_74xx
:
2285 ppc6xx_tlb_invalidate_all(env
);
2287 case POWERPC_MMU_SOFT_4xx
:
2288 case POWERPC_MMU_SOFT_4xx_Z
:
2289 ppc4xx_tlb_invalidate_all(env
);
2291 case POWERPC_MMU_REAL
:
2292 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2294 case POWERPC_MMU_MPC8xx
:
2296 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2298 case POWERPC_MMU_BOOKE
:
2301 case POWERPC_MMU_BOOKE206
:
2302 booke206_flush_tlb(env
, -1, 0);
2304 case POWERPC_MMU_32B
:
2305 case POWERPC_MMU_601
:
2306 #if defined(TARGET_PPC64)
2307 case POWERPC_MMU_64B
:
2308 case POWERPC_MMU_2_06
:
2309 case POWERPC_MMU_2_06d
:
2310 #endif /* defined(TARGET_PPC64) */
2315 cpu_abort(env
, "Unknown MMU model\n");
2320 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2322 #if !defined(FLUSH_ALL_TLBS)
2323 addr
&= TARGET_PAGE_MASK
;
2324 switch (env
->mmu_model
) {
2325 case POWERPC_MMU_SOFT_6xx
:
2326 case POWERPC_MMU_SOFT_74xx
:
2327 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2328 if (env
->id_tlbs
== 1) {
2329 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2332 case POWERPC_MMU_SOFT_4xx
:
2333 case POWERPC_MMU_SOFT_4xx_Z
:
2334 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2336 case POWERPC_MMU_REAL
:
2337 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2339 case POWERPC_MMU_MPC8xx
:
2341 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2343 case POWERPC_MMU_BOOKE
:
2345 cpu_abort(env
, "BookE MMU model is not implemented\n");
2347 case POWERPC_MMU_BOOKE206
:
2349 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2351 case POWERPC_MMU_32B
:
2352 case POWERPC_MMU_601
:
2353 /* tlbie invalidate TLBs for all segments */
2354 addr
&= ~((target_ulong
)-1ULL << 28);
2355 /* XXX: this case should be optimized,
2356 * giving a mask to tlb_flush_page
2358 tlb_flush_page(env
, addr
| (0x0 << 28));
2359 tlb_flush_page(env
, addr
| (0x1 << 28));
2360 tlb_flush_page(env
, addr
| (0x2 << 28));
2361 tlb_flush_page(env
, addr
| (0x3 << 28));
2362 tlb_flush_page(env
, addr
| (0x4 << 28));
2363 tlb_flush_page(env
, addr
| (0x5 << 28));
2364 tlb_flush_page(env
, addr
| (0x6 << 28));
2365 tlb_flush_page(env
, addr
| (0x7 << 28));
2366 tlb_flush_page(env
, addr
| (0x8 << 28));
2367 tlb_flush_page(env
, addr
| (0x9 << 28));
2368 tlb_flush_page(env
, addr
| (0xA << 28));
2369 tlb_flush_page(env
, addr
| (0xB << 28));
2370 tlb_flush_page(env
, addr
| (0xC << 28));
2371 tlb_flush_page(env
, addr
| (0xD << 28));
2372 tlb_flush_page(env
, addr
| (0xE << 28));
2373 tlb_flush_page(env
, addr
| (0xF << 28));
2375 #if defined(TARGET_PPC64)
2376 case POWERPC_MMU_64B
:
2377 case POWERPC_MMU_2_06
:
2378 case POWERPC_MMU_2_06d
:
2379 /* tlbie invalidate TLBs for all segments */
2380 /* XXX: given the fact that there are too many segments to invalidate,
2381 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2382 * we just invalidate all TLBs
2386 #endif /* defined(TARGET_PPC64) */
2389 cpu_abort(env
, "Unknown MMU model\n");
2393 ppc_tlb_invalidate_all(env
);
2397 /*****************************************************************************/
2398 /* Special registers manipulation */
2399 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2401 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2402 if (env
->spr
[SPR_SDR1
] != value
) {
2403 env
->spr
[SPR_SDR1
] = value
;
2404 #if defined(TARGET_PPC64)
2405 if (env
->mmu_model
& POWERPC_MMU_64
) {
2406 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2408 if (htabsize
> 28) {
2409 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2410 " stored in SDR1\n", htabsize
);
2413 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2414 env
->htab_base
= value
& SDR_64_HTABORG
;
2416 #endif /* defined(TARGET_PPC64) */
2418 /* FIXME: Should check for valid HTABMASK values */
2419 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2420 env
->htab_base
= value
& SDR_32_HTABORG
;
2426 /* Segment registers load and store */
2427 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2429 #if defined(TARGET_PPC64)
2430 if (env
->mmu_model
& POWERPC_MMU_64
) {
2435 return env
->sr
[sr_num
];
2438 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2440 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2441 (int)srnum
, value
, env
->sr
[srnum
]);
2442 #if defined(TARGET_PPC64)
2443 if (env
->mmu_model
& POWERPC_MMU_64
) {
2444 uint64_t rb
= 0, rs
= 0;
2447 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2448 /* Set the valid bit */
2451 rb
|= (uint32_t)srnum
;
2454 rs
|= (value
& 0xfffffff) << 12;
2456 rs
|= ((value
>> 27) & 0xf) << 8;
2458 ppc_store_slb(env
, rb
, rs
);
2461 if (env
->sr
[srnum
] != value
) {
2462 env
->sr
[srnum
] = value
;
2463 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2464 flusing the whole TLB. */
2465 #if !defined(FLUSH_ALL_TLBS) && 0
2467 target_ulong page
, end
;
2468 /* Invalidate 256 MB of virtual memory */
2469 page
= (16 << 20) * srnum
;
2470 end
= page
+ (16 << 20);
2471 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2472 tlb_flush_page(env
, page
);
2480 #endif /* !defined(CONFIG_USER_ONLY) */
2482 #if !defined(CONFIG_USER_ONLY)
2483 /* SLB management */
2484 #if defined(TARGET_PPC64)
2485 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2487 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2488 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2489 POWERPC_EXCP_INVAL
);
2493 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2495 target_ulong rt
= 0;
2497 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2498 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2499 POWERPC_EXCP_INVAL
);
2504 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2506 target_ulong rt
= 0;
2508 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2509 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2510 POWERPC_EXCP_INVAL
);
2514 #endif /* defined(TARGET_PPC64) */
2516 /* TLB management */
2517 void helper_tlbia(CPUPPCState
*env
)
2519 ppc_tlb_invalidate_all(env
);
2522 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2524 ppc_tlb_invalidate_one(env
, addr
);
2527 /* Software driven TLBs management */
2528 /* PowerPC 602/603 software TLB load instructions helpers */
2529 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2531 target_ulong RPN
, CMP
, EPN
;
2534 RPN
= env
->spr
[SPR_RPA
];
2536 CMP
= env
->spr
[SPR_ICMP
];
2537 EPN
= env
->spr
[SPR_IMISS
];
2539 CMP
= env
->spr
[SPR_DCMP
];
2540 EPN
= env
->spr
[SPR_DMISS
];
2542 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2543 (void)EPN
; /* avoid a compiler warning */
2544 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2545 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2547 /* Store this TLB */
2548 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2549 way
, is_code
, CMP
, RPN
);
2552 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2554 do_6xx_tlb(env
, EPN
, 0);
2557 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2559 do_6xx_tlb(env
, EPN
, 1);
2562 /* PowerPC 74xx software TLB load instructions helpers */
2563 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2565 target_ulong RPN
, CMP
, EPN
;
2568 RPN
= env
->spr
[SPR_PTELO
];
2569 CMP
= env
->spr
[SPR_PTEHI
];
2570 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2571 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2572 (void)EPN
; /* avoid a compiler warning */
2573 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2574 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2576 /* Store this TLB */
2577 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2578 way
, is_code
, CMP
, RPN
);
2581 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2583 do_74xx_tlb(env
, EPN
, 0);
2586 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2588 do_74xx_tlb(env
, EPN
, 1);
2591 /*****************************************************************************/
2592 /* PowerPC 601 specific instructions (POWER bridge) */
2594 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2598 target_ulong ret
= 0;
2600 /* We don't have to generate many instances of this instruction,
2601 * as rac is supervisor only.
2603 /* XXX: FIX THIS: Pretend we have no BAT */
2604 nb_BATs
= env
->nb_BATs
;
2606 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2609 env
->nb_BATs
= nb_BATs
;
2613 static inline target_ulong
booke_tlb_to_page_size(int size
)
2615 return 1024 << (2 * size
);
2618 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2622 switch (page_size
) {
2656 #if defined(TARGET_PPC64)
2657 case 0x000100000000ULL
:
2660 case 0x000400000000ULL
:
2663 case 0x001000000000ULL
:
2666 case 0x004000000000ULL
:
2669 case 0x010000000000ULL
:
2681 /* Helpers for 4xx TLB management */
2682 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2684 #define PPC4XX_TLBHI_V 0x00000040
2685 #define PPC4XX_TLBHI_E 0x00000020
2686 #define PPC4XX_TLBHI_SIZE_MIN 0
2687 #define PPC4XX_TLBHI_SIZE_MAX 7
2688 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2689 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2690 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2692 #define PPC4XX_TLBLO_EX 0x00000200
2693 #define PPC4XX_TLBLO_WR 0x00000100
2694 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2695 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2697 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2703 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2704 tlb
= &env
->tlb
.tlbe
[entry
];
2706 if (tlb
->prot
& PAGE_VALID
) {
2707 ret
|= PPC4XX_TLBHI_V
;
2709 size
= booke_page_size_to_tlb(tlb
->size
);
2710 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2711 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2713 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2714 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2718 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2723 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2724 tlb
= &env
->tlb
.tlbe
[entry
];
2726 if (tlb
->prot
& PAGE_EXEC
) {
2727 ret
|= PPC4XX_TLBLO_EX
;
2729 if (tlb
->prot
& PAGE_WRITE
) {
2730 ret
|= PPC4XX_TLBLO_WR
;
2735 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2739 target_ulong page
, end
;
2741 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2743 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2744 tlb
= &env
->tlb
.tlbe
[entry
];
2745 /* Invalidate previous TLB (if it's valid) */
2746 if (tlb
->prot
& PAGE_VALID
) {
2747 end
= tlb
->EPN
+ tlb
->size
;
2748 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2749 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2750 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2751 tlb_flush_page(env
, page
);
2754 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2755 & PPC4XX_TLBHI_SIZE_MASK
);
2756 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2757 * If this ever occurs, one should use the ppcemb target instead
2758 * of the ppc or ppc64 one
2760 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2761 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2762 "are not supported (%d)\n",
2763 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2765 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2766 if (val
& PPC4XX_TLBHI_V
) {
2767 tlb
->prot
|= PAGE_VALID
;
2768 if (val
& PPC4XX_TLBHI_E
) {
2769 /* XXX: TO BE FIXED */
2771 "Little-endian TLB entries are not supported by now\n");
2774 tlb
->prot
&= ~PAGE_VALID
;
2776 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2777 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2778 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2779 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2780 tlb
->prot
& PAGE_READ
? 'r' : '-',
2781 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2782 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2783 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2784 /* Invalidate new TLB (if valid) */
2785 if (tlb
->prot
& PAGE_VALID
) {
2786 end
= tlb
->EPN
+ tlb
->size
;
2787 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2788 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2789 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2790 tlb_flush_page(env
, page
);
2795 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2800 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2802 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2803 tlb
= &env
->tlb
.tlbe
[entry
];
2804 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2805 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2806 tlb
->prot
= PAGE_READ
;
2807 if (val
& PPC4XX_TLBLO_EX
) {
2808 tlb
->prot
|= PAGE_EXEC
;
2810 if (val
& PPC4XX_TLBLO_WR
) {
2811 tlb
->prot
|= PAGE_WRITE
;
2813 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2814 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2815 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2816 tlb
->prot
& PAGE_READ
? 'r' : '-',
2817 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2818 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2819 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2822 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2824 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2827 /* PowerPC 440 TLB management */
2828 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2832 target_ulong EPN
, RPN
, size
;
2835 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2836 __func__
, word
, (int)entry
, value
);
2839 tlb
= &env
->tlb
.tlbe
[entry
];
2842 /* Just here to please gcc */
2844 EPN
= value
& 0xFFFFFC00;
2845 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2849 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2850 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2855 tlb
->attr
|= (value
>> 8) & 1;
2856 if (value
& 0x200) {
2857 tlb
->prot
|= PAGE_VALID
;
2859 if (tlb
->prot
& PAGE_VALID
) {
2860 tlb
->prot
&= ~PAGE_VALID
;
2864 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2865 if (do_flush_tlbs
) {
2870 RPN
= value
& 0xFFFFFC0F;
2871 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2877 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2878 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2880 tlb
->prot
|= PAGE_READ
<< 4;
2883 tlb
->prot
|= PAGE_WRITE
<< 4;
2886 tlb
->prot
|= PAGE_EXEC
<< 4;
2889 tlb
->prot
|= PAGE_READ
;
2892 tlb
->prot
|= PAGE_WRITE
;
2895 tlb
->prot
|= PAGE_EXEC
;
2901 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2909 tlb
= &env
->tlb
.tlbe
[entry
];
2912 /* Just here to please gcc */
2915 size
= booke_page_size_to_tlb(tlb
->size
);
2916 if (size
< 0 || size
> 0xF) {
2920 if (tlb
->attr
& 0x1) {
2923 if (tlb
->prot
& PAGE_VALID
) {
2926 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2927 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2933 ret
= tlb
->attr
& ~0x1;
2934 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2937 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2940 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2943 if (tlb
->prot
& PAGE_READ
) {
2946 if (tlb
->prot
& PAGE_WRITE
) {
2949 if (tlb
->prot
& PAGE_EXEC
) {
2957 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2959 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2962 /* PowerPC BookE 2.06 TLB management */
2964 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2966 uint32_t tlbncfg
= 0;
2967 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2968 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
2971 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
2972 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
2974 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
2975 cpu_abort(env
, "we don't support HES yet\n");
2978 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
2981 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
2983 env
->spr
[pidn
] = pid
;
2984 /* changing PIDs mean we're in a different address space now */
2988 void helper_booke206_tlbwe(CPUPPCState
*env
)
2990 uint32_t tlbncfg
, tlbn
;
2992 uint32_t size_tlb
, size_ps
;
2996 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
2997 case MAS0_WQ_ALWAYS
:
2998 /* good to go, write that entry */
3001 /* XXX check if reserved */
3006 case MAS0_WQ_CLR_RSRV
:
3007 /* XXX clear entry */
3010 /* no idea what to do */
3014 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3016 /* XXX we don't support direct LRAT setting yet */
3017 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3021 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3022 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3024 tlb
= booke206_cur_tlb(env
);
3027 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3028 POWERPC_EXCP_INVAL
|
3029 POWERPC_EXCP_INVAL_INVAL
);
3032 /* check that we support the targeted size */
3033 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3034 size_ps
= booke206_tlbnps(env
, tlbn
);
3035 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3036 !(size_ps
& (1 << size_tlb
))) {
3037 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3038 POWERPC_EXCP_INVAL
|
3039 POWERPC_EXCP_INVAL_INVAL
);
3043 cpu_abort(env
, "missing HV implementation\n");
3045 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3046 env
->spr
[SPR_BOOKE_MAS3
];
3047 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3050 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3051 /* force !AVAIL TLB entries to correct page size */
3052 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3053 /* XXX can be configured in MMUCSR0 */
3054 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3057 /* Make a mask from TLB size to discard invalid bits in EPN field */
3058 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3059 /* Add a mask for page attributes */
3060 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3063 /* Executing a tlbwe instruction in 32-bit mode will set
3064 * bits 0:31 of the TLB EPN field to zero.
3069 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3071 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3072 /* no IPROT supported by TLB */
3073 tlb
->mas1
&= ~MAS1_IPROT
;
3076 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3077 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3083 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3085 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3086 int way
= booke206_tlbm_to_way(env
, tlb
);
3088 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3089 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3090 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3092 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3093 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3094 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3095 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3098 void helper_booke206_tlbre(CPUPPCState
*env
)
3100 ppcmas_tlb_t
*tlb
= NULL
;
3102 tlb
= booke206_cur_tlb(env
);
3104 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3106 booke206_tlb_to_mas(env
, tlb
);
3110 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3112 ppcmas_tlb_t
*tlb
= NULL
;
3117 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3118 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3120 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3121 int ways
= booke206_tlb_ways(env
, i
);
3123 for (j
= 0; j
< ways
; j
++) {
3124 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3130 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3134 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3138 booke206_tlb_to_mas(env
, tlb
);
3143 /* no entry found, fill with defaults */
3144 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3145 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3146 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3147 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3148 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3150 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3151 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3154 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3157 /* next victim logic */
3158 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3160 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3161 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3164 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3168 int ways
= booke206_tlb_ways(env
, tlbn
);
3171 for (i
= 0; i
< ways
; i
++) {
3172 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3176 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3177 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3178 !(tlb
->mas1
& MAS1_IPROT
)) {
3179 tlb
->mas1
&= ~MAS1_VALID
;
3184 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3186 if (address
& 0x4) {
3187 /* flush all entries */
3188 if (address
& 0x8) {
3189 /* flush all of TLB1 */
3190 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3192 /* flush all of TLB0 */
3193 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3198 if (address
& 0x8) {
3199 /* flush TLB1 entries */
3200 booke206_invalidate_ea_tlb(env
, 1, address
);
3203 /* flush TLB0 entries */
3204 booke206_invalidate_ea_tlb(env
, 0, address
);
3205 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3209 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3211 /* XXX missing LPID handling */
3212 booke206_flush_tlb(env
, -1, 1);
3215 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3218 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3219 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3222 /* XXX missing LPID handling */
3223 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3224 tlb_size
= booke206_tlb_size(env
, i
);
3225 for (j
= 0; j
< tlb_size
; j
++) {
3226 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3227 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3228 tlb
[j
].mas1
&= ~MAS1_VALID
;
3231 tlb
+= booke206_tlb_size(env
, i
);
3236 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3240 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3241 int pid
= tid
>> MAS6_SPID_SHIFT
;
3242 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3243 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3244 /* XXX check for unsupported isize and raise an invalid opcode then */
3245 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3246 /* XXX implement MAV2 handling */
3249 /* XXX missing LPID handling */
3250 /* flush by pid and ea */
3251 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3252 int ways
= booke206_tlb_ways(env
, i
);
3254 for (j
= 0; j
< ways
; j
++) {
3255 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3259 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3260 (tlb
->mas1
& MAS1_IPROT
) ||
3261 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3262 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3265 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3266 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3269 /* XXX e500mc doesn't match SAS, but other cores might */
3270 tlb
->mas1
&= ~MAS1_VALID
;
3276 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3281 flags
|= BOOKE206_FLUSH_TLB1
;
3285 flags
|= BOOKE206_FLUSH_TLB0
;
3288 booke206_flush_tlb(env
, flags
, 1);