2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 //#define DEBUG_SOFTWARE_TLB
28 //#define DUMP_PAGE_TABLES
29 //#define DEBUG_SOFTWARE_TLB
30 //#define FLUSH_ALL_TLBS
33 # define LOG_MMU(...) qemu_log(__VA_ARGS__)
34 # define LOG_MMU_STATE(env) log_cpu_state((env), 0)
36 # define LOG_MMU(...) do { } while (0)
37 # define LOG_MMU_STATE(...) do { } while (0)
40 #ifdef DEBUG_SOFTWARE_TLB
41 # define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
43 # define LOG_SWTLB(...) do { } while (0)
47 # define LOG_BATS(...) qemu_log(__VA_ARGS__)
49 # define LOG_BATS(...) do { } while (0)
53 # define LOG_SLB(...) qemu_log(__VA_ARGS__)
55 # define LOG_SLB(...) do { } while (0)
58 /*****************************************************************************/
59 /* PowerPC MMU emulation */
60 #if defined(CONFIG_USER_ONLY)
61 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
64 int exception
, error_code
;
67 exception
= POWERPC_EXCP_ISI
;
68 error_code
= 0x40000000;
70 exception
= POWERPC_EXCP_DSI
;
71 error_code
= 0x40000000;
73 error_code
|= 0x02000000;
75 env
->spr
[SPR_DAR
] = address
;
76 env
->spr
[SPR_DSISR
] = error_code
;
78 env
->exception_index
= exception
;
79 env
->error_code
= error_code
;
85 /* Common routines used by software and hardware TLBs emulation */
86 static inline int pte_is_valid(target_ulong pte0
)
88 return pte0
& 0x80000000 ? 1 : 0;
91 static inline void pte_invalidate(target_ulong
*pte0
)
96 #if defined(TARGET_PPC64)
97 static inline int pte64_is_valid(target_ulong pte0
)
99 return pte0
& 0x0000000000000001ULL
? 1 : 0;
102 static inline void pte64_invalidate(target_ulong
*pte0
)
104 *pte0
&= ~0x0000000000000001ULL
;
108 #define PTE_PTEM_MASK 0x7FFFFFBF
109 #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
110 #if defined(TARGET_PPC64)
111 #define PTE64_PTEM_MASK 0xFFFFFFFFFFFFFF80ULL
112 #define PTE64_CHECK_MASK (TARGET_PAGE_MASK | 0x7F)
115 static inline int pp_check(int key
, int pp
, int nx
)
119 /* Compute access rights */
120 /* When pp is 3/7, the result is undefined. Set it to noaccess */
127 access
|= PAGE_WRITE
;
145 access
= PAGE_READ
| PAGE_WRITE
;
156 static inline int check_prot(int prot
, int rw
, int access_type
)
160 if (access_type
== ACCESS_CODE
) {
161 if (prot
& PAGE_EXEC
) {
167 if (prot
& PAGE_WRITE
) {
173 if (prot
& PAGE_READ
) {
183 static inline int pte_check(mmu_ctx_t
*ctx
, int is_64b
, target_ulong pte0
,
184 target_ulong pte1
, int h
, int rw
, int type
)
186 target_ulong ptem
, mmask
;
187 int access
, ret
, pteh
, ptev
, pp
;
190 /* Check validity and table match */
191 #if defined(TARGET_PPC64)
193 ptev
= pte64_is_valid(pte0
);
194 pteh
= (pte0
>> 1) & 1;
198 ptev
= pte_is_valid(pte0
);
199 pteh
= (pte0
>> 6) & 1;
201 if (ptev
&& h
== pteh
) {
202 /* Check vsid & api */
203 #if defined(TARGET_PPC64)
205 ptem
= pte0
& PTE64_PTEM_MASK
;
206 mmask
= PTE64_CHECK_MASK
;
207 pp
= (pte1
& 0x00000003) | ((pte1
>> 61) & 0x00000004);
208 ctx
->nx
= (pte1
>> 2) & 1; /* No execute bit */
209 ctx
->nx
|= (pte1
>> 3) & 1; /* Guarded bit */
213 ptem
= pte0
& PTE_PTEM_MASK
;
214 mmask
= PTE_CHECK_MASK
;
215 pp
= pte1
& 0x00000003;
217 if (ptem
== ctx
->ptem
) {
218 if (ctx
->raddr
!= (target_phys_addr_t
)-1ULL) {
219 /* all matches should have equal RPN, WIMG & PP */
220 if ((ctx
->raddr
& mmask
) != (pte1
& mmask
)) {
221 qemu_log("Bad RPN/WIMG/PP\n");
225 /* Compute access rights */
226 access
= pp_check(ctx
->key
, pp
, ctx
->nx
);
227 /* Keep the matching PTE informations */
230 ret
= check_prot(ctx
->prot
, rw
, type
);
233 LOG_MMU("PTE access granted !\n");
235 /* Access right violation */
236 LOG_MMU("PTE access rejected\n");
244 static inline int pte32_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
245 target_ulong pte1
, int h
, int rw
, int type
)
247 return pte_check(ctx
, 0, pte0
, pte1
, h
, rw
, type
);
250 #if defined(TARGET_PPC64)
251 static inline int pte64_check(mmu_ctx_t
*ctx
, target_ulong pte0
,
252 target_ulong pte1
, int h
, int rw
, int type
)
254 return pte_check(ctx
, 1, pte0
, pte1
, h
, rw
, type
);
258 static inline int pte_update_flags(mmu_ctx_t
*ctx
, target_ulong
*pte1p
,
263 /* Update page flags */
264 if (!(*pte1p
& 0x00000100)) {
265 /* Update accessed flag */
266 *pte1p
|= 0x00000100;
269 if (!(*pte1p
& 0x00000080)) {
270 if (rw
== 1 && ret
== 0) {
271 /* Update changed flag */
272 *pte1p
|= 0x00000080;
275 /* Force page fault for first write access */
276 ctx
->prot
&= ~PAGE_WRITE
;
283 /* Software driven TLB helpers */
284 static inline int ppc6xx_tlb_getnum(CPUPPCState
*env
, target_ulong eaddr
,
285 int way
, int is_code
)
289 /* Select TLB num in a way from address */
290 nr
= (eaddr
>> TARGET_PAGE_BITS
) & (env
->tlb_per_way
- 1);
292 nr
+= env
->tlb_per_way
* way
;
293 /* 6xx have separate TLBs for instructions and data */
294 if (is_code
&& env
->id_tlbs
== 1) {
301 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState
*env
)
306 /* LOG_SWTLB("Invalidate all TLBs\n"); */
307 /* Invalidate all defined software TLB */
309 if (env
->id_tlbs
== 1) {
312 for (nr
= 0; nr
< max
; nr
++) {
313 tlb
= &env
->tlb
.tlb6
[nr
];
314 pte_invalidate(&tlb
->pte0
);
319 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState
*env
,
321 int is_code
, int match_epn
)
323 #if !defined(FLUSH_ALL_TLBS)
327 /* Invalidate ITLB + DTLB, all ways */
328 for (way
= 0; way
< env
->nb_ways
; way
++) {
329 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
, is_code
);
330 tlb
= &env
->tlb
.tlb6
[nr
];
331 if (pte_is_valid(tlb
->pte0
) && (match_epn
== 0 || eaddr
== tlb
->EPN
)) {
332 LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx
"\n", nr
,
334 pte_invalidate(&tlb
->pte0
);
335 tlb_flush_page(env
, tlb
->EPN
);
339 /* XXX: PowerPC specification say this is valid as well */
340 ppc6xx_tlb_invalidate_all(env
);
344 static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState
*env
,
345 target_ulong eaddr
, int is_code
)
347 ppc6xx_tlb_invalidate_virt2(env
, eaddr
, is_code
, 0);
350 static void ppc6xx_tlb_store(CPUPPCState
*env
, target_ulong EPN
, int way
,
351 int is_code
, target_ulong pte0
, target_ulong pte1
)
356 nr
= ppc6xx_tlb_getnum(env
, EPN
, way
, is_code
);
357 tlb
= &env
->tlb
.tlb6
[nr
];
358 LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
359 " PTE1 " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
, EPN
, pte0
, pte1
);
360 /* Invalidate any pending reference in QEMU for this virtual address */
361 ppc6xx_tlb_invalidate_virt2(env
, EPN
, is_code
, 1);
365 /* Store last way for LRU mechanism */
369 static inline int ppc6xx_tlb_check(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
370 target_ulong eaddr
, int rw
, int access_type
)
377 ret
= -1; /* No TLB found */
378 for (way
= 0; way
< env
->nb_ways
; way
++) {
379 nr
= ppc6xx_tlb_getnum(env
, eaddr
, way
,
380 access_type
== ACCESS_CODE
? 1 : 0);
381 tlb
= &env
->tlb
.tlb6
[nr
];
382 /* This test "emulates" the PTE index match for hardware TLBs */
383 if ((eaddr
& TARGET_PAGE_MASK
) != tlb
->EPN
) {
384 LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx
" " TARGET_FMT_lx
385 "] <> " TARGET_FMT_lx
"\n", nr
, env
->nb_tlb
,
386 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
387 tlb
->EPN
, tlb
->EPN
+ TARGET_PAGE_SIZE
, eaddr
);
390 LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx
" <> " TARGET_FMT_lx
" "
391 TARGET_FMT_lx
" %c %c\n", nr
, env
->nb_tlb
,
392 pte_is_valid(tlb
->pte0
) ? "valid" : "inval",
393 tlb
->EPN
, eaddr
, tlb
->pte1
,
394 rw
? 'S' : 'L', access_type
== ACCESS_CODE
? 'I' : 'D');
395 switch (pte32_check(ctx
, tlb
->pte0
, tlb
->pte1
, 0, rw
, access_type
)) {
397 /* TLB inconsistency */
400 /* Access violation */
410 /* XXX: we should go on looping to check all TLBs consistency
411 * but we can speed-up the whole thing as the
412 * result would be undefined if TLBs are not consistent.
421 LOG_SWTLB("found TLB at addr " TARGET_FMT_plx
" prot=%01x ret=%d\n",
422 ctx
->raddr
& TARGET_PAGE_MASK
, ctx
->prot
, ret
);
423 /* Update page flags */
424 pte_update_flags(ctx
, &env
->tlb
.tlb6
[best
].pte1
, ret
, rw
);
430 /* Perform BAT hit & translation */
431 static inline void bat_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
432 int *validp
, int *protp
, target_ulong
*BATu
,
438 bl
= (*BATu
& 0x00001FFC) << 15;
441 if (((msr_pr
== 0) && (*BATu
& 0x00000002)) ||
442 ((msr_pr
!= 0) && (*BATu
& 0x00000001))) {
444 pp
= *BATl
& 0x00000003;
446 prot
= PAGE_READ
| PAGE_EXEC
;
457 static inline void bat_601_size_prot(CPUPPCState
*env
, target_ulong
*blp
,
458 int *validp
, int *protp
,
459 target_ulong
*BATu
, target_ulong
*BATl
)
462 int key
, pp
, valid
, prot
;
464 bl
= (*BATl
& 0x0000003F) << 17;
465 LOG_BATS("b %02x ==> bl " TARGET_FMT_lx
" msk " TARGET_FMT_lx
"\n",
466 (uint8_t)(*BATl
& 0x0000003F), bl
, ~bl
);
468 valid
= (*BATl
>> 6) & 1;
470 pp
= *BATu
& 0x00000003;
472 key
= (*BATu
>> 3) & 1;
474 key
= (*BATu
>> 2) & 1;
476 prot
= pp_check(key
, pp
, 0);
483 static inline int get_bat(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
484 target_ulong
virtual, int rw
, int type
)
486 target_ulong
*BATlt
, *BATut
, *BATu
, *BATl
;
487 target_ulong BEPIl
, BEPIu
, bl
;
491 LOG_BATS("%s: %cBAT v " TARGET_FMT_lx
"\n", __func__
,
492 type
== ACCESS_CODE
? 'I' : 'D', virtual);
495 BATlt
= env
->IBAT
[1];
496 BATut
= env
->IBAT
[0];
499 BATlt
= env
->DBAT
[1];
500 BATut
= env
->DBAT
[0];
503 for (i
= 0; i
< env
->nb_BATs
; i
++) {
506 BEPIu
= *BATu
& 0xF0000000;
507 BEPIl
= *BATu
& 0x0FFE0000;
508 if (unlikely(env
->mmu_model
== POWERPC_MMU_601
)) {
509 bat_601_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
511 bat_size_prot(env
, &bl
, &valid
, &prot
, BATu
, BATl
);
513 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
514 " BATl " TARGET_FMT_lx
"\n", __func__
,
515 type
== ACCESS_CODE
? 'I' : 'D', i
, virtual, *BATu
, *BATl
);
516 if ((virtual & 0xF0000000) == BEPIu
&&
517 ((virtual & 0x0FFE0000) & ~bl
) == BEPIl
) {
520 /* Get physical address */
521 ctx
->raddr
= (*BATl
& 0xF0000000) |
522 ((virtual & 0x0FFE0000 & bl
) | (*BATl
& 0x0FFE0000)) |
523 (virtual & 0x0001F000);
524 /* Compute access rights */
526 ret
= check_prot(ctx
->prot
, rw
, type
);
528 LOG_BATS("BAT %d match: r " TARGET_FMT_plx
" prot=%c%c\n",
529 i
, ctx
->raddr
, ctx
->prot
& PAGE_READ
? 'R' : '-',
530 ctx
->prot
& PAGE_WRITE
? 'W' : '-');
537 #if defined(DEBUG_BATS)
538 if (qemu_log_enabled()) {
539 LOG_BATS("no BAT match for " TARGET_FMT_lx
":\n", virtual);
540 for (i
= 0; i
< 4; i
++) {
543 BEPIu
= *BATu
& 0xF0000000;
544 BEPIl
= *BATu
& 0x0FFE0000;
545 bl
= (*BATu
& 0x00001FFC) << 15;
546 LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx
" BATu " TARGET_FMT_lx
547 " BATl " TARGET_FMT_lx
"\n\t" TARGET_FMT_lx
" "
548 TARGET_FMT_lx
" " TARGET_FMT_lx
"\n",
549 __func__
, type
== ACCESS_CODE
? 'I' : 'D', i
, virtual,
550 *BATu
, *BATl
, BEPIu
, BEPIl
, bl
);
559 static inline target_phys_addr_t
get_pteg_offset(CPUPPCState
*env
,
560 target_phys_addr_t hash
,
563 return (hash
* pte_size
* 8) & env
->htab_mask
;
566 /* PTE table lookup */
567 static inline int find_pte2(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int is_64b
, int h
,
568 int rw
, int type
, int target_page_bits
)
570 target_phys_addr_t pteg_off
;
571 target_ulong pte0
, pte1
;
575 ret
= -1; /* No entry found */
576 pteg_off
= get_pteg_offset(env
, ctx
->hash
[h
],
577 is_64b
? HASH_PTE_SIZE_64
: HASH_PTE_SIZE_32
);
578 for (i
= 0; i
< 8; i
++) {
579 #if defined(TARGET_PPC64)
581 if (env
->external_htab
) {
582 pte0
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16));
583 pte1
= ldq_p(env
->external_htab
+ pteg_off
+ (i
* 16) + 8);
585 pte0
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16));
586 pte1
= ldq_phys(env
->htab_base
+ pteg_off
+ (i
* 16) + 8);
589 r
= pte64_check(ctx
, pte0
, pte1
, h
, rw
, type
);
590 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
591 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
592 pteg_off
+ (i
* 16), pte0
, pte1
, (int)(pte0
& 1), h
,
593 (int)((pte0
>> 1) & 1), ctx
->ptem
);
597 if (env
->external_htab
) {
598 pte0
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8));
599 pte1
= ldl_p(env
->external_htab
+ pteg_off
+ (i
* 8) + 4);
601 pte0
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8));
602 pte1
= ldl_phys(env
->htab_base
+ pteg_off
+ (i
* 8) + 4);
604 r
= pte32_check(ctx
, pte0
, pte1
, h
, rw
, type
);
605 LOG_MMU("Load pte from " TARGET_FMT_lx
" => " TARGET_FMT_lx
" "
606 TARGET_FMT_lx
" %d %d %d " TARGET_FMT_lx
"\n",
607 pteg_off
+ (i
* 8), pte0
, pte1
, (int)(pte0
>> 31), h
,
608 (int)((pte0
>> 6) & 1), ctx
->ptem
);
612 /* PTE inconsistency */
615 /* Access violation */
625 /* XXX: we should go on looping to check all PTEs consistency
626 * but if we can speed-up the whole thing as the
627 * result would be undefined if PTEs are not consistent.
636 LOG_MMU("found PTE at addr " TARGET_FMT_lx
" prot=%01x ret=%d\n",
637 ctx
->raddr
, ctx
->prot
, ret
);
638 /* Update page flags */
640 if (pte_update_flags(ctx
, &pte1
, ret
, rw
) == 1) {
641 #if defined(TARGET_PPC64)
643 if (env
->external_htab
) {
644 stq_p(env
->external_htab
+ pteg_off
+ (good
* 16) + 8,
647 stq_phys_notdirty(env
->htab_base
+ pteg_off
+
648 (good
* 16) + 8, pte1
);
653 if (env
->external_htab
) {
654 stl_p(env
->external_htab
+ pteg_off
+ (good
* 8) + 4,
657 stl_phys_notdirty(env
->htab_base
+ pteg_off
+
658 (good
* 8) + 4, pte1
);
664 /* We have a TLB that saves 4K pages, so let's
665 * split a huge page to 4k chunks */
666 if (target_page_bits
!= TARGET_PAGE_BITS
) {
667 ctx
->raddr
|= (ctx
->eaddr
& ((1 << target_page_bits
) - 1))
673 static inline int find_pte(CPUPPCState
*env
, mmu_ctx_t
*ctx
, int h
, int rw
,
674 int type
, int target_page_bits
)
676 #if defined(TARGET_PPC64)
677 if (env
->mmu_model
& POWERPC_MMU_64
) {
678 return find_pte2(env
, ctx
, 1, h
, rw
, type
, target_page_bits
);
682 return find_pte2(env
, ctx
, 0, h
, rw
, type
, target_page_bits
);
685 #if defined(TARGET_PPC64)
686 static inline ppc_slb_t
*slb_lookup(CPUPPCState
*env
, target_ulong eaddr
)
688 uint64_t esid_256M
, esid_1T
;
691 LOG_SLB("%s: eaddr " TARGET_FMT_lx
"\n", __func__
, eaddr
);
693 esid_256M
= (eaddr
& SEGMENT_MASK_256M
) | SLB_ESID_V
;
694 esid_1T
= (eaddr
& SEGMENT_MASK_1T
) | SLB_ESID_V
;
696 for (n
= 0; n
< env
->slb_nr
; n
++) {
697 ppc_slb_t
*slb
= &env
->slb
[n
];
699 LOG_SLB("%s: slot %d %016" PRIx64
" %016"
700 PRIx64
"\n", __func__
, n
, slb
->esid
, slb
->vsid
);
701 /* We check for 1T matches on all MMUs here - if the MMU
702 * doesn't have 1T segment support, we will have prevented 1T
703 * entries from being inserted in the slbmte code. */
704 if (((slb
->esid
== esid_256M
) &&
705 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_256M
))
706 || ((slb
->esid
== esid_1T
) &&
707 ((slb
->vsid
& SLB_VSID_B
) == SLB_VSID_B_1T
))) {
715 /*****************************************************************************/
718 void helper_slbia(CPUPPCState
*env
)
720 int n
, do_invalidate
;
723 /* XXX: Warning: slbia never invalidates the first segment */
724 for (n
= 1; n
< env
->slb_nr
; n
++) {
725 ppc_slb_t
*slb
= &env
->slb
[n
];
727 if (slb
->esid
& SLB_ESID_V
) {
728 slb
->esid
&= ~SLB_ESID_V
;
729 /* XXX: given the fact that segment size is 256 MB or 1TB,
730 * and we still don't have a tlb_flush_mask(env, n, mask)
731 * in QEMU, we just invalidate all TLBs
741 void helper_slbie(CPUPPCState
*env
, target_ulong addr
)
745 slb
= slb_lookup(env
, addr
);
750 if (slb
->esid
& SLB_ESID_V
) {
751 slb
->esid
&= ~SLB_ESID_V
;
753 /* XXX: given the fact that segment size is 256 MB or 1TB,
754 * and we still don't have a tlb_flush_mask(env, n, mask)
755 * in QEMU, we just invalidate all TLBs
761 int ppc_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
763 int slot
= rb
& 0xfff;
764 ppc_slb_t
*slb
= &env
->slb
[slot
];
766 if (rb
& (0x1000 - env
->slb_nr
)) {
767 return -1; /* Reserved bits set or slot too high */
769 if (rs
& (SLB_VSID_B
& ~SLB_VSID_B_1T
)) {
770 return -1; /* Bad segment size */
772 if ((rs
& SLB_VSID_B
) && !(env
->mmu_model
& POWERPC_MMU_1TSEG
)) {
773 return -1; /* 1T segment on MMU that doesn't support it */
776 /* Mask out the slot number as we store the entry */
777 slb
->esid
= rb
& (SLB_ESID_ESID
| SLB_ESID_V
);
780 LOG_SLB("%s: %d " TARGET_FMT_lx
" - " TARGET_FMT_lx
" => %016" PRIx64
781 " %016" PRIx64
"\n", __func__
, slot
, rb
, rs
,
782 slb
->esid
, slb
->vsid
);
787 static int ppc_load_slb_esid(CPUPPCState
*env
, target_ulong rb
,
790 int slot
= rb
& 0xfff;
791 ppc_slb_t
*slb
= &env
->slb
[slot
];
793 if (slot
>= env
->slb_nr
) {
801 static int ppc_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
,
804 int slot
= rb
& 0xfff;
805 ppc_slb_t
*slb
= &env
->slb
[slot
];
807 if (slot
>= env
->slb_nr
) {
814 #endif /* defined(TARGET_PPC64) */
816 /* Perform segment based translation */
817 static inline int get_segment(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
818 target_ulong eaddr
, int rw
, int type
)
820 target_phys_addr_t hash
;
822 int ds
, pr
, target_page_bits
;
827 #if defined(TARGET_PPC64)
828 if (env
->mmu_model
& POWERPC_MMU_64
) {
830 target_ulong pageaddr
;
833 LOG_MMU("Check SLBs\n");
834 slb
= slb_lookup(env
, eaddr
);
839 if (slb
->vsid
& SLB_VSID_B
) {
840 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT_1T
;
843 vsid
= (slb
->vsid
& SLB_VSID_VSID
) >> SLB_VSID_SHIFT
;
847 target_page_bits
= (slb
->vsid
& SLB_VSID_L
)
848 ? TARGET_PAGE_BITS_16M
: TARGET_PAGE_BITS
;
849 ctx
->key
= !!(pr
? (slb
->vsid
& SLB_VSID_KP
)
850 : (slb
->vsid
& SLB_VSID_KS
));
852 ctx
->nx
= !!(slb
->vsid
& SLB_VSID_N
);
854 pageaddr
= eaddr
& ((1ULL << segment_bits
)
855 - (1ULL << target_page_bits
));
856 if (slb
->vsid
& SLB_VSID_B
) {
857 hash
= vsid
^ (vsid
<< 25) ^ (pageaddr
>> target_page_bits
);
859 hash
= vsid
^ (pageaddr
>> target_page_bits
);
861 /* Only 5 bits of the page index are used in the AVPN */
862 ctx
->ptem
= (slb
->vsid
& SLB_VSID_PTEM
) |
863 ((pageaddr
>> 16) & ((1ULL << segment_bits
) - 0x80));
865 #endif /* defined(TARGET_PPC64) */
867 target_ulong sr
, pgidx
;
869 sr
= env
->sr
[eaddr
>> 28];
870 ctx
->key
= (((sr
& 0x20000000) && (pr
!= 0)) ||
871 ((sr
& 0x40000000) && (pr
== 0))) ? 1 : 0;
872 ds
= sr
& 0x80000000 ? 1 : 0;
873 ctx
->nx
= sr
& 0x10000000 ? 1 : 0;
874 vsid
= sr
& 0x00FFFFFF;
875 target_page_bits
= TARGET_PAGE_BITS
;
876 LOG_MMU("Check segment v=" TARGET_FMT_lx
" %d " TARGET_FMT_lx
" nip="
877 TARGET_FMT_lx
" lr=" TARGET_FMT_lx
878 " ir=%d dr=%d pr=%d %d t=%d\n",
879 eaddr
, (int)(eaddr
>> 28), sr
, env
->nip
, env
->lr
, (int)msr_ir
,
880 (int)msr_dr
, pr
!= 0 ? 1 : 0, rw
, type
);
881 pgidx
= (eaddr
& ~SEGMENT_MASK_256M
) >> target_page_bits
;
883 ctx
->ptem
= (vsid
<< 7) | (pgidx
>> 10);
885 LOG_MMU("pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx
"\n",
886 ctx
->key
, ds
, ctx
->nx
, vsid
);
889 /* Check if instruction fetch is allowed, if needed */
890 if (type
!= ACCESS_CODE
|| ctx
->nx
== 0) {
891 /* Page address translation */
892 LOG_MMU("htab_base " TARGET_FMT_plx
" htab_mask " TARGET_FMT_plx
893 " hash " TARGET_FMT_plx
"\n",
894 env
->htab_base
, env
->htab_mask
, hash
);
896 ctx
->hash
[1] = ~hash
;
898 /* Initialize real address with an invalid value */
899 ctx
->raddr
= (target_phys_addr_t
)-1ULL;
900 if (unlikely(env
->mmu_model
== POWERPC_MMU_SOFT_6xx
||
901 env
->mmu_model
== POWERPC_MMU_SOFT_74xx
)) {
902 /* Software TLB search */
903 ret
= ppc6xx_tlb_check(env
, ctx
, eaddr
, rw
, type
);
905 LOG_MMU("0 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
906 " vsid=" TARGET_FMT_lx
" ptem=" TARGET_FMT_lx
907 " hash=" TARGET_FMT_plx
"\n",
908 env
->htab_base
, env
->htab_mask
, vsid
, ctx
->ptem
,
910 /* Primary table lookup */
911 ret
= find_pte(env
, ctx
, 0, rw
, type
, target_page_bits
);
913 /* Secondary table lookup */
914 if (eaddr
!= 0xEFFFFFFF) {
915 LOG_MMU("1 htab=" TARGET_FMT_plx
"/" TARGET_FMT_plx
916 " vsid=" TARGET_FMT_lx
" api=" TARGET_FMT_lx
917 " hash=" TARGET_FMT_plx
"\n", env
->htab_base
,
918 env
->htab_mask
, vsid
, ctx
->ptem
, ctx
->hash
[1]);
920 ret2
= find_pte(env
, ctx
, 1, rw
, type
,
927 #if defined(DUMP_PAGE_TABLES)
928 if (qemu_log_enabled()) {
929 target_phys_addr_t curaddr
;
930 uint32_t a0
, a1
, a2
, a3
;
932 qemu_log("Page table: " TARGET_FMT_plx
" len " TARGET_FMT_plx
933 "\n", sdr
, mask
+ 0x80);
934 for (curaddr
= sdr
; curaddr
< (sdr
+ mask
+ 0x80);
936 a0
= ldl_phys(curaddr
);
937 a1
= ldl_phys(curaddr
+ 4);
938 a2
= ldl_phys(curaddr
+ 8);
939 a3
= ldl_phys(curaddr
+ 12);
940 if (a0
!= 0 || a1
!= 0 || a2
!= 0 || a3
!= 0) {
941 qemu_log(TARGET_FMT_plx
": %08x %08x %08x %08x\n",
942 curaddr
, a0
, a1
, a2
, a3
);
948 LOG_MMU("No access allowed\n");
954 LOG_MMU("direct store...\n");
955 /* Direct-store segment : absolutely *BUGGY* for now */
957 /* Direct-store implies a 32-bit MMU.
958 * Check the Segment Register's bus unit ID (BUID).
960 sr
= env
->sr
[eaddr
>> 28];
961 if ((sr
& 0x1FF00000) >> 20 == 0x07f) {
962 /* Memory-forced I/O controller interface access */
963 /* If T=1 and BUID=x'07F', the 601 performs a memory access
964 * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
966 ctx
->raddr
= ((sr
& 0xF) << 28) | (eaddr
& 0x0FFFFFFF);
967 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
973 /* Integer load/store : only access allowed */
976 /* No code fetch is allowed in direct-store areas */
979 /* Floating point load/store */
982 /* lwarx, ldarx or srwcx. */
985 /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
986 /* Should make the instruction do no-op.
987 * As it already do no-op, it's quite easy :-)
995 qemu_log("ERROR: instruction should not need "
996 "address translation\n");
999 if ((rw
== 1 || ctx
->key
!= 1) && (rw
== 0 || ctx
->key
!= 0)) {
1010 /* Generic TLB check function for embedded PowerPC implementations */
1011 static int ppcemb_tlb_check(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1012 target_phys_addr_t
*raddrp
,
1013 target_ulong address
, uint32_t pid
, int ext
,
1018 /* Check valid flag */
1019 if (!(tlb
->prot
& PAGE_VALID
)) {
1022 mask
= ~(tlb
->size
- 1);
1023 LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx
" PID %u <=> " TARGET_FMT_lx
1024 " " TARGET_FMT_lx
" %u %x\n", __func__
, i
, address
, pid
, tlb
->EPN
,
1025 mask
, (uint32_t)tlb
->PID
, tlb
->prot
);
1027 if (tlb
->PID
!= 0 && tlb
->PID
!= pid
) {
1030 /* Check effective address */
1031 if ((address
& mask
) != tlb
->EPN
) {
1034 *raddrp
= (tlb
->RPN
& mask
) | (address
& ~mask
);
1035 #if (TARGET_PHYS_ADDR_BITS >= 36)
1037 /* Extend the physical address to 36 bits */
1038 *raddrp
|= (target_phys_addr_t
)(tlb
->RPN
& 0xF) << 32;
1045 /* Generic TLB search function for PowerPC embedded implementations */
1046 static int ppcemb_tlb_search(CPUPPCState
*env
, target_ulong address
,
1050 target_phys_addr_t raddr
;
1053 /* Default return value is no match */
1055 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1056 tlb
= &env
->tlb
.tlbe
[i
];
1057 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
, pid
, 0, i
) == 0) {
1066 /* Helpers specific to PowerPC 40x implementations */
1067 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState
*env
)
1072 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1073 tlb
= &env
->tlb
.tlbe
[i
];
1074 tlb
->prot
&= ~PAGE_VALID
;
1079 static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState
*env
,
1080 target_ulong eaddr
, uint32_t pid
)
1082 #if !defined(FLUSH_ALL_TLBS)
1084 target_phys_addr_t raddr
;
1085 target_ulong page
, end
;
1088 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1089 tlb
= &env
->tlb
.tlbe
[i
];
1090 if (ppcemb_tlb_check(env
, tlb
, &raddr
, eaddr
, pid
, 0, i
) == 0) {
1091 end
= tlb
->EPN
+ tlb
->size
;
1092 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
1093 tlb_flush_page(env
, page
);
1095 tlb
->prot
&= ~PAGE_VALID
;
1100 ppc4xx_tlb_invalidate_all(env
);
1104 static int mmu40x_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1105 target_ulong address
, int rw
,
1109 target_phys_addr_t raddr
;
1110 int i
, ret
, zsel
, zpr
, pr
;
1113 raddr
= (target_phys_addr_t
)-1ULL;
1115 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1116 tlb
= &env
->tlb
.tlbe
[i
];
1117 if (ppcemb_tlb_check(env
, tlb
, &raddr
, address
,
1118 env
->spr
[SPR_40x_PID
], 0, i
) < 0) {
1121 zsel
= (tlb
->attr
>> 4) & 0xF;
1122 zpr
= (env
->spr
[SPR_40x_ZPR
] >> (30 - (2 * zsel
))) & 0x3;
1123 LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
1124 __func__
, i
, zsel
, zpr
, rw
, tlb
->attr
);
1125 /* Check execute enable bit */
1133 /* All accesses granted */
1134 ctx
->prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
1139 /* Raise Zone protection fault. */
1140 env
->spr
[SPR_40x_ESR
] = 1 << 22;
1148 /* Check from TLB entry */
1149 ctx
->prot
= tlb
->prot
;
1150 ret
= check_prot(ctx
->prot
, rw
, access_type
);
1152 env
->spr
[SPR_40x_ESR
] = 0;
1158 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1159 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1164 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1165 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1170 void store_40x_sler(CPUPPCState
*env
, uint32_t val
)
1172 /* XXX: TO BE FIXED */
1173 if (val
!= 0x00000000) {
1174 cpu_abort(env
, "Little-endian regions are not supported by now\n");
1176 env
->spr
[SPR_405_SLER
] = val
;
1179 static inline int mmubooke_check_tlb(CPUPPCState
*env
, ppcemb_tlb_t
*tlb
,
1180 target_phys_addr_t
*raddr
, int *prot
,
1181 target_ulong address
, int rw
,
1182 int access_type
, int i
)
1186 if (ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1187 env
->spr
[SPR_BOOKE_PID
],
1188 !env
->nb_pids
, i
) >= 0) {
1192 if (env
->spr
[SPR_BOOKE_PID1
] &&
1193 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1194 env
->spr
[SPR_BOOKE_PID1
], 0, i
) >= 0) {
1198 if (env
->spr
[SPR_BOOKE_PID2
] &&
1199 ppcemb_tlb_check(env
, tlb
, raddr
, address
,
1200 env
->spr
[SPR_BOOKE_PID2
], 0, i
) >= 0) {
1204 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1210 prot2
= tlb
->prot
& 0xF;
1212 prot2
= (tlb
->prot
>> 4) & 0xF;
1215 /* Check the address space */
1216 if (access_type
== ACCESS_CODE
) {
1217 if (msr_ir
!= (tlb
->attr
& 1)) {
1218 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1223 if (prot2
& PAGE_EXEC
) {
1224 LOG_SWTLB("%s: good TLB!\n", __func__
);
1228 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1231 if (msr_dr
!= (tlb
->attr
& 1)) {
1232 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1237 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1238 LOG_SWTLB("%s: found TLB!\n", __func__
);
1242 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1249 static int mmubooke_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1250 target_ulong address
, int rw
,
1254 target_phys_addr_t raddr
;
1258 raddr
= (target_phys_addr_t
)-1ULL;
1259 for (i
= 0; i
< env
->nb_tlb
; i
++) {
1260 tlb
= &env
->tlb
.tlbe
[i
];
1261 ret
= mmubooke_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
, rw
,
1270 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1271 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1274 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1275 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1281 void booke206_flush_tlb(CPUPPCState
*env
, int flags
, const int check_iprot
)
1285 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
1287 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1288 if (flags
& (1 << i
)) {
1289 tlb_size
= booke206_tlb_size(env
, i
);
1290 for (j
= 0; j
< tlb_size
; j
++) {
1291 if (!check_iprot
|| !(tlb
[j
].mas1
& MAS1_IPROT
)) {
1292 tlb
[j
].mas1
&= ~MAS1_VALID
;
1296 tlb
+= booke206_tlb_size(env
, i
);
1302 target_phys_addr_t
booke206_tlb_to_page_size(CPUPPCState
*env
,
1307 tlbm_size
= (tlb
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1309 return 1024ULL << tlbm_size
;
1312 /* TLB check function for MAS based SoftTLBs */
1313 int ppcmas_tlb_check(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1314 target_phys_addr_t
*raddrp
,
1315 target_ulong address
, uint32_t pid
)
1320 /* Check valid flag */
1321 if (!(tlb
->mas1
& MAS1_VALID
)) {
1325 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
1326 LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx
" PID=0x%x MAS1=0x%x MAS2=0x%"
1327 PRIx64
" mask=0x" TARGET_FMT_lx
" MAS7_3=0x%" PRIx64
" MAS8=%x\n",
1328 __func__
, address
, pid
, tlb
->mas1
, tlb
->mas2
, mask
, tlb
->mas7_3
,
1332 tlb_pid
= (tlb
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
;
1333 if (tlb_pid
!= 0 && tlb_pid
!= pid
) {
1337 /* Check effective address */
1338 if ((address
& mask
) != (tlb
->mas2
& MAS2_EPN_MASK
)) {
1343 *raddrp
= (tlb
->mas7_3
& mask
) | (address
& ~mask
);
1349 static int mmubooke206_check_tlb(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
,
1350 target_phys_addr_t
*raddr
, int *prot
,
1351 target_ulong address
, int rw
,
1357 if (ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1358 env
->spr
[SPR_BOOKE_PID
]) >= 0) {
1362 if (env
->spr
[SPR_BOOKE_PID1
] &&
1363 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1364 env
->spr
[SPR_BOOKE_PID1
]) >= 0) {
1368 if (env
->spr
[SPR_BOOKE_PID2
] &&
1369 ppcmas_tlb_check(env
, tlb
, raddr
, address
,
1370 env
->spr
[SPR_BOOKE_PID2
]) >= 0) {
1374 LOG_SWTLB("%s: TLB entry not found\n", __func__
);
1380 if (tlb
->mas7_3
& MAS3_UR
) {
1383 if (tlb
->mas7_3
& MAS3_UW
) {
1384 prot2
|= PAGE_WRITE
;
1386 if (tlb
->mas7_3
& MAS3_UX
) {
1390 if (tlb
->mas7_3
& MAS3_SR
) {
1393 if (tlb
->mas7_3
& MAS3_SW
) {
1394 prot2
|= PAGE_WRITE
;
1396 if (tlb
->mas7_3
& MAS3_SX
) {
1401 /* Check the address space and permissions */
1402 if (access_type
== ACCESS_CODE
) {
1403 if (msr_ir
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1404 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1409 if (prot2
& PAGE_EXEC
) {
1410 LOG_SWTLB("%s: good TLB!\n", __func__
);
1414 LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__
, prot2
);
1417 if (msr_dr
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
1418 LOG_SWTLB("%s: AS doesn't match\n", __func__
);
1423 if ((!rw
&& prot2
& PAGE_READ
) || (rw
&& (prot2
& PAGE_WRITE
))) {
1424 LOG_SWTLB("%s: found TLB!\n", __func__
);
1428 LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__
, prot2
);
1435 static int mmubooke206_get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1436 target_ulong address
, int rw
,
1440 target_phys_addr_t raddr
;
1444 raddr
= (target_phys_addr_t
)-1ULL;
1446 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1447 int ways
= booke206_tlb_ways(env
, i
);
1449 for (j
= 0; j
< ways
; j
++) {
1450 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
1454 ret
= mmubooke206_check_tlb(env
, tlb
, &raddr
, &ctx
->prot
, address
,
1466 LOG_SWTLB("%s: access granted " TARGET_FMT_lx
" => " TARGET_FMT_plx
1467 " %d %d\n", __func__
, address
, ctx
->raddr
, ctx
->prot
,
1470 LOG_SWTLB("%s: access refused " TARGET_FMT_lx
" => " TARGET_FMT_plx
1471 " %d %d\n", __func__
, address
, raddr
, ctx
->prot
, ret
);
1477 static const char *book3e_tsize_to_str
[32] = {
1478 "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
1479 "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
1480 "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
1484 static void mmubooke_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1487 ppcemb_tlb_t
*entry
;
1490 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1491 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1495 cpu_fprintf(f
, "\nTLB:\n");
1496 cpu_fprintf(f
, "Effective Physical Size PID Prot "
1499 entry
= &env
->tlb
.tlbe
[0];
1500 for (i
= 0; i
< env
->nb_tlb
; i
++, entry
++) {
1501 target_phys_addr_t ea
, pa
;
1503 uint64_t size
= (uint64_t)entry
->size
;
1506 /* Check valid flag */
1507 if (!(entry
->prot
& PAGE_VALID
)) {
1511 mask
= ~(entry
->size
- 1);
1512 ea
= entry
->EPN
& mask
;
1513 pa
= entry
->RPN
& mask
;
1514 #if (TARGET_PHYS_ADDR_BITS >= 36)
1515 /* Extend the physical address to 36 bits */
1516 pa
|= (target_phys_addr_t
)(entry
->RPN
& 0xF) << 32;
1520 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"M", size
/ 1024);
1522 snprintf(size_buf
, sizeof(size_buf
), "%3" PRId64
"k", size
);
1524 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %s %-5u %08x %08x\n",
1525 (uint64_t)ea
, (uint64_t)pa
, size_buf
, (uint32_t)entry
->PID
,
1526 entry
->prot
, entry
->attr
);
1531 static void mmubooke206_dump_one_tlb(FILE *f
, fprintf_function cpu_fprintf
,
1532 CPUPPCState
*env
, int tlbn
, int offset
,
1535 ppcmas_tlb_t
*entry
;
1538 cpu_fprintf(f
, "\nTLB%d:\n", tlbn
);
1539 cpu_fprintf(f
, "Effective Physical Size TID TS SRWX"
1540 " URWX WIMGE U0123\n");
1542 entry
= &env
->tlb
.tlbm
[offset
];
1543 for (i
= 0; i
< tlbsize
; i
++, entry
++) {
1544 target_phys_addr_t ea
, pa
, size
;
1547 if (!(entry
->mas1
& MAS1_VALID
)) {
1551 tsize
= (entry
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
1552 size
= 1024ULL << tsize
;
1553 ea
= entry
->mas2
& ~(size
- 1);
1554 pa
= entry
->mas7_3
& ~(size
- 1);
1556 cpu_fprintf(f
, "0x%016" PRIx64
" 0x%016" PRIx64
" %4s %-5u %1u S%c%c%c"
1557 "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
1558 (uint64_t)ea
, (uint64_t)pa
,
1559 book3e_tsize_to_str
[tsize
],
1560 (entry
->mas1
& MAS1_TID_MASK
) >> MAS1_TID_SHIFT
,
1561 (entry
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
,
1562 entry
->mas7_3
& MAS3_SR
? 'R' : '-',
1563 entry
->mas7_3
& MAS3_SW
? 'W' : '-',
1564 entry
->mas7_3
& MAS3_SX
? 'X' : '-',
1565 entry
->mas7_3
& MAS3_UR
? 'R' : '-',
1566 entry
->mas7_3
& MAS3_UW
? 'W' : '-',
1567 entry
->mas7_3
& MAS3_UX
? 'X' : '-',
1568 entry
->mas2
& MAS2_W
? 'W' : '-',
1569 entry
->mas2
& MAS2_I
? 'I' : '-',
1570 entry
->mas2
& MAS2_M
? 'M' : '-',
1571 entry
->mas2
& MAS2_G
? 'G' : '-',
1572 entry
->mas2
& MAS2_E
? 'E' : '-',
1573 entry
->mas7_3
& MAS3_U0
? '0' : '-',
1574 entry
->mas7_3
& MAS3_U1
? '1' : '-',
1575 entry
->mas7_3
& MAS3_U2
? '2' : '-',
1576 entry
->mas7_3
& MAS3_U3
? '3' : '-');
1580 static void mmubooke206_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1586 if (kvm_enabled() && !env
->kvm_sw_tlb
) {
1587 cpu_fprintf(f
, "Cannot access KVM TLB\n");
1591 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
1592 int size
= booke206_tlb_size(env
, i
);
1598 mmubooke206_dump_one_tlb(f
, cpu_fprintf
, env
, i
, offset
, size
);
1603 #if defined(TARGET_PPC64)
1604 static void mmubooks_dump_mmu(FILE *f
, fprintf_function cpu_fprintf
,
1608 uint64_t slbe
, slbv
;
1610 cpu_synchronize_state(env
);
1612 cpu_fprintf(f
, "SLB\tESID\t\t\tVSID\n");
1613 for (i
= 0; i
< env
->slb_nr
; i
++) {
1614 slbe
= env
->slb
[i
].esid
;
1615 slbv
= env
->slb
[i
].vsid
;
1616 if (slbe
== 0 && slbv
== 0) {
1619 cpu_fprintf(f
, "%d\t0x%016" PRIx64
"\t0x%016" PRIx64
"\n",
1625 void dump_mmu(FILE *f
, fprintf_function cpu_fprintf
, CPUPPCState
*env
)
1627 switch (env
->mmu_model
) {
1628 case POWERPC_MMU_BOOKE
:
1629 mmubooke_dump_mmu(f
, cpu_fprintf
, env
);
1631 case POWERPC_MMU_BOOKE206
:
1632 mmubooke206_dump_mmu(f
, cpu_fprintf
, env
);
1634 #if defined(TARGET_PPC64)
1635 case POWERPC_MMU_64B
:
1636 case POWERPC_MMU_2_06
:
1637 mmubooks_dump_mmu(f
, cpu_fprintf
, env
);
1641 qemu_log_mask(LOG_UNIMP
, "%s: unimplemented\n", __func__
);
1645 static inline int check_physical(CPUPPCState
*env
, mmu_ctx_t
*ctx
,
1646 target_ulong eaddr
, int rw
)
1651 ctx
->prot
= PAGE_READ
| PAGE_EXEC
;
1653 switch (env
->mmu_model
) {
1654 case POWERPC_MMU_32B
:
1655 case POWERPC_MMU_601
:
1656 case POWERPC_MMU_SOFT_6xx
:
1657 case POWERPC_MMU_SOFT_74xx
:
1658 case POWERPC_MMU_SOFT_4xx
:
1659 case POWERPC_MMU_REAL
:
1660 case POWERPC_MMU_BOOKE
:
1661 ctx
->prot
|= PAGE_WRITE
;
1663 #if defined(TARGET_PPC64)
1664 case POWERPC_MMU_620
:
1665 case POWERPC_MMU_64B
:
1666 case POWERPC_MMU_2_06
:
1667 /* Real address are 60 bits long */
1668 ctx
->raddr
&= 0x0FFFFFFFFFFFFFFFULL
;
1669 ctx
->prot
|= PAGE_WRITE
;
1672 case POWERPC_MMU_SOFT_4xx_Z
:
1673 if (unlikely(msr_pe
!= 0)) {
1674 /* 403 family add some particular protections,
1675 * using PBL/PBU registers for accesses with no translation.
1678 /* Check PLB validity */
1679 (env
->pb
[0] < env
->pb
[1] &&
1680 /* and address in plb area */
1681 eaddr
>= env
->pb
[0] && eaddr
< env
->pb
[1]) ||
1682 (env
->pb
[2] < env
->pb
[3] &&
1683 eaddr
>= env
->pb
[2] && eaddr
< env
->pb
[3]) ? 1 : 0;
1684 if (in_plb
^ msr_px
) {
1685 /* Access in protected area */
1687 /* Access is not allowed */
1691 /* Read-write access is allowed */
1692 ctx
->prot
|= PAGE_WRITE
;
1696 case POWERPC_MMU_MPC8xx
:
1698 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1700 case POWERPC_MMU_BOOKE206
:
1701 cpu_abort(env
, "BookE 2.06 MMU doesn't have physical real mode\n");
1704 cpu_abort(env
, "Unknown or invalid MMU model\n");
1711 int get_physical_address(CPUPPCState
*env
, mmu_ctx_t
*ctx
, target_ulong eaddr
,
1712 int rw
, int access_type
)
1717 qemu_log("%s\n", __func__
);
1719 if ((access_type
== ACCESS_CODE
&& msr_ir
== 0) ||
1720 (access_type
!= ACCESS_CODE
&& msr_dr
== 0)) {
1721 if (env
->mmu_model
== POWERPC_MMU_BOOKE
) {
1722 /* The BookE MMU always performs address translation. The
1723 IS and DS bits only affect the address space. */
1724 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1726 } else if (env
->mmu_model
== POWERPC_MMU_BOOKE206
) {
1727 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1730 /* No address translation. */
1731 ret
= check_physical(env
, ctx
, eaddr
, rw
);
1735 switch (env
->mmu_model
) {
1736 case POWERPC_MMU_32B
:
1737 case POWERPC_MMU_601
:
1738 case POWERPC_MMU_SOFT_6xx
:
1739 case POWERPC_MMU_SOFT_74xx
:
1740 /* Try to find a BAT */
1741 if (env
->nb_BATs
!= 0) {
1742 ret
= get_bat(env
, ctx
, eaddr
, rw
, access_type
);
1744 #if defined(TARGET_PPC64)
1745 case POWERPC_MMU_620
:
1746 case POWERPC_MMU_64B
:
1747 case POWERPC_MMU_2_06
:
1750 /* We didn't match any BAT entry or don't have BATs */
1751 ret
= get_segment(env
, ctx
, eaddr
, rw
, access_type
);
1754 case POWERPC_MMU_SOFT_4xx
:
1755 case POWERPC_MMU_SOFT_4xx_Z
:
1756 ret
= mmu40x_get_physical_address(env
, ctx
, eaddr
,
1759 case POWERPC_MMU_BOOKE
:
1760 ret
= mmubooke_get_physical_address(env
, ctx
, eaddr
,
1763 case POWERPC_MMU_BOOKE206
:
1764 ret
= mmubooke206_get_physical_address(env
, ctx
, eaddr
, rw
,
1767 case POWERPC_MMU_MPC8xx
:
1769 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1771 case POWERPC_MMU_REAL
:
1772 cpu_abort(env
, "PowerPC in real mode do not do any translation\n");
1775 cpu_abort(env
, "Unknown or invalid MMU model\n");
1780 qemu_log("%s address " TARGET_FMT_lx
" => %d " TARGET_FMT_plx
"\n",
1781 __func__
, eaddr
, ret
, ctx
->raddr
);
1787 target_phys_addr_t
cpu_get_phys_page_debug(CPUPPCState
*env
, target_ulong addr
)
1791 if (unlikely(get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) != 0)) {
1795 return ctx
.raddr
& TARGET_PAGE_MASK
;
1798 static void booke206_update_mas_tlb_miss(CPUPPCState
*env
, target_ulong address
,
1801 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
1802 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
1803 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
1804 env
->spr
[SPR_BOOKE_MAS3
] = 0;
1805 env
->spr
[SPR_BOOKE_MAS6
] = 0;
1806 env
->spr
[SPR_BOOKE_MAS7
] = 0;
1809 if (((rw
== 2) && msr_ir
) || ((rw
!= 2) && msr_dr
)) {
1810 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
1811 env
->spr
[SPR_BOOKE_MAS6
] |= MAS6_SAS
;
1814 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_VALID
;
1815 env
->spr
[SPR_BOOKE_MAS2
] |= address
& MAS2_EPN_MASK
;
1817 switch (env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TIDSELD_PIDZ
) {
1818 case MAS4_TIDSELD_PID0
:
1819 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID
] << MAS1_TID_SHIFT
;
1821 case MAS4_TIDSELD_PID1
:
1822 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID1
] << MAS1_TID_SHIFT
;
1824 case MAS4_TIDSELD_PID2
:
1825 env
->spr
[SPR_BOOKE_MAS1
] |= env
->spr
[SPR_BOOKE_PID2
] << MAS1_TID_SHIFT
;
1829 env
->spr
[SPR_BOOKE_MAS6
] |= env
->spr
[SPR_BOOKE_PID
] << 16;
1831 /* next victim logic */
1832 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
1834 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
1835 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
1838 /* Perform address translation */
1839 int cpu_ppc_handle_mmu_fault(CPUPPCState
*env
, target_ulong address
, int rw
,
1849 access_type
= ACCESS_CODE
;
1852 access_type
= env
->access_type
;
1854 ret
= get_physical_address(env
, &ctx
, address
, rw
, access_type
);
1856 tlb_set_page(env
, address
& TARGET_PAGE_MASK
,
1857 ctx
.raddr
& TARGET_PAGE_MASK
, ctx
.prot
,
1858 mmu_idx
, TARGET_PAGE_SIZE
);
1860 } else if (ret
< 0) {
1862 if (access_type
== ACCESS_CODE
) {
1865 /* No matches in page tables or TLB */
1866 switch (env
->mmu_model
) {
1867 case POWERPC_MMU_SOFT_6xx
:
1868 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1869 env
->error_code
= 1 << 18;
1870 env
->spr
[SPR_IMISS
] = address
;
1871 env
->spr
[SPR_ICMP
] = 0x80000000 | ctx
.ptem
;
1873 case POWERPC_MMU_SOFT_74xx
:
1874 env
->exception_index
= POWERPC_EXCP_IFTLB
;
1876 case POWERPC_MMU_SOFT_4xx
:
1877 case POWERPC_MMU_SOFT_4xx_Z
:
1878 env
->exception_index
= POWERPC_EXCP_ITLB
;
1879 env
->error_code
= 0;
1880 env
->spr
[SPR_40x_DEAR
] = address
;
1881 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1883 case POWERPC_MMU_32B
:
1884 case POWERPC_MMU_601
:
1885 #if defined(TARGET_PPC64)
1886 case POWERPC_MMU_620
:
1887 case POWERPC_MMU_64B
:
1888 case POWERPC_MMU_2_06
:
1890 env
->exception_index
= POWERPC_EXCP_ISI
;
1891 env
->error_code
= 0x40000000;
1893 case POWERPC_MMU_BOOKE206
:
1894 booke206_update_mas_tlb_miss(env
, address
, rw
);
1896 case POWERPC_MMU_BOOKE
:
1897 env
->exception_index
= POWERPC_EXCP_ITLB
;
1898 env
->error_code
= 0;
1899 env
->spr
[SPR_BOOKE_DEAR
] = address
;
1901 case POWERPC_MMU_MPC8xx
:
1903 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
1905 case POWERPC_MMU_REAL
:
1906 cpu_abort(env
, "PowerPC in real mode should never raise "
1907 "any MMU exceptions\n");
1910 cpu_abort(env
, "Unknown or invalid MMU model\n");
1915 /* Access rights violation */
1916 env
->exception_index
= POWERPC_EXCP_ISI
;
1917 env
->error_code
= 0x08000000;
1920 /* No execute protection violation */
1921 if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
1922 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
1923 env
->spr
[SPR_BOOKE_ESR
] = 0x00000000;
1925 env
->exception_index
= POWERPC_EXCP_ISI
;
1926 env
->error_code
= 0x10000000;
1929 /* Direct store exception */
1930 /* No code fetch is allowed in direct-store areas */
1931 env
->exception_index
= POWERPC_EXCP_ISI
;
1932 env
->error_code
= 0x10000000;
1934 #if defined(TARGET_PPC64)
1936 /* No match in segment table */
1937 if (env
->mmu_model
== POWERPC_MMU_620
) {
1938 env
->exception_index
= POWERPC_EXCP_ISI
;
1939 /* XXX: this might be incorrect */
1940 env
->error_code
= 0x40000000;
1942 env
->exception_index
= POWERPC_EXCP_ISEG
;
1943 env
->error_code
= 0;
1951 /* No matches in page tables or TLB */
1952 switch (env
->mmu_model
) {
1953 case POWERPC_MMU_SOFT_6xx
:
1955 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1956 env
->error_code
= 1 << 16;
1958 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1959 env
->error_code
= 0;
1961 env
->spr
[SPR_DMISS
] = address
;
1962 env
->spr
[SPR_DCMP
] = 0x80000000 | ctx
.ptem
;
1964 env
->error_code
|= ctx
.key
<< 19;
1965 env
->spr
[SPR_HASH1
] = env
->htab_base
+
1966 get_pteg_offset(env
, ctx
.hash
[0], HASH_PTE_SIZE_32
);
1967 env
->spr
[SPR_HASH2
] = env
->htab_base
+
1968 get_pteg_offset(env
, ctx
.hash
[1], HASH_PTE_SIZE_32
);
1970 case POWERPC_MMU_SOFT_74xx
:
1972 env
->exception_index
= POWERPC_EXCP_DSTLB
;
1974 env
->exception_index
= POWERPC_EXCP_DLTLB
;
1977 /* Implement LRU algorithm */
1978 env
->error_code
= ctx
.key
<< 19;
1979 env
->spr
[SPR_TLBMISS
] = (address
& ~((target_ulong
)0x3)) |
1980 ((env
->last_way
+ 1) & (env
->nb_ways
- 1));
1981 env
->spr
[SPR_PTEHI
] = 0x80000000 | ctx
.ptem
;
1983 case POWERPC_MMU_SOFT_4xx
:
1984 case POWERPC_MMU_SOFT_4xx_Z
:
1985 env
->exception_index
= POWERPC_EXCP_DTLB
;
1986 env
->error_code
= 0;
1987 env
->spr
[SPR_40x_DEAR
] = address
;
1989 env
->spr
[SPR_40x_ESR
] = 0x00800000;
1991 env
->spr
[SPR_40x_ESR
] = 0x00000000;
1994 case POWERPC_MMU_32B
:
1995 case POWERPC_MMU_601
:
1996 #if defined(TARGET_PPC64)
1997 case POWERPC_MMU_620
:
1998 case POWERPC_MMU_64B
:
1999 case POWERPC_MMU_2_06
:
2001 env
->exception_index
= POWERPC_EXCP_DSI
;
2002 env
->error_code
= 0;
2003 env
->spr
[SPR_DAR
] = address
;
2005 env
->spr
[SPR_DSISR
] = 0x42000000;
2007 env
->spr
[SPR_DSISR
] = 0x40000000;
2010 case POWERPC_MMU_MPC8xx
:
2012 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2014 case POWERPC_MMU_BOOKE206
:
2015 booke206_update_mas_tlb_miss(env
, address
, rw
);
2017 case POWERPC_MMU_BOOKE
:
2018 env
->exception_index
= POWERPC_EXCP_DTLB
;
2019 env
->error_code
= 0;
2020 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2021 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2023 case POWERPC_MMU_REAL
:
2024 cpu_abort(env
, "PowerPC in real mode should never raise "
2025 "any MMU exceptions\n");
2028 cpu_abort(env
, "Unknown or invalid MMU model\n");
2033 /* Access rights violation */
2034 env
->exception_index
= POWERPC_EXCP_DSI
;
2035 env
->error_code
= 0;
2036 if (env
->mmu_model
== POWERPC_MMU_SOFT_4xx
2037 || env
->mmu_model
== POWERPC_MMU_SOFT_4xx_Z
) {
2038 env
->spr
[SPR_40x_DEAR
] = address
;
2040 env
->spr
[SPR_40x_ESR
] |= 0x00800000;
2042 } else if ((env
->mmu_model
== POWERPC_MMU_BOOKE
) ||
2043 (env
->mmu_model
== POWERPC_MMU_BOOKE206
)) {
2044 env
->spr
[SPR_BOOKE_DEAR
] = address
;
2045 env
->spr
[SPR_BOOKE_ESR
] = rw
? ESR_ST
: 0;
2047 env
->spr
[SPR_DAR
] = address
;
2049 env
->spr
[SPR_DSISR
] = 0x0A000000;
2051 env
->spr
[SPR_DSISR
] = 0x08000000;
2056 /* Direct store exception */
2057 switch (access_type
) {
2059 /* Floating point load/store */
2060 env
->exception_index
= POWERPC_EXCP_ALIGN
;
2061 env
->error_code
= POWERPC_EXCP_ALIGN_FP
;
2062 env
->spr
[SPR_DAR
] = address
;
2065 /* lwarx, ldarx or stwcx. */
2066 env
->exception_index
= POWERPC_EXCP_DSI
;
2067 env
->error_code
= 0;
2068 env
->spr
[SPR_DAR
] = address
;
2070 env
->spr
[SPR_DSISR
] = 0x06000000;
2072 env
->spr
[SPR_DSISR
] = 0x04000000;
2076 /* eciwx or ecowx */
2077 env
->exception_index
= POWERPC_EXCP_DSI
;
2078 env
->error_code
= 0;
2079 env
->spr
[SPR_DAR
] = address
;
2081 env
->spr
[SPR_DSISR
] = 0x06100000;
2083 env
->spr
[SPR_DSISR
] = 0x04100000;
2087 printf("DSI: invalid exception (%d)\n", ret
);
2088 env
->exception_index
= POWERPC_EXCP_PROGRAM
;
2090 POWERPC_EXCP_INVAL
| POWERPC_EXCP_INVAL_INVAL
;
2091 env
->spr
[SPR_DAR
] = address
;
2095 #if defined(TARGET_PPC64)
2097 /* No match in segment table */
2098 if (env
->mmu_model
== POWERPC_MMU_620
) {
2099 env
->exception_index
= POWERPC_EXCP_DSI
;
2100 env
->error_code
= 0;
2101 env
->spr
[SPR_DAR
] = address
;
2102 /* XXX: this might be incorrect */
2104 env
->spr
[SPR_DSISR
] = 0x42000000;
2106 env
->spr
[SPR_DSISR
] = 0x40000000;
2109 env
->exception_index
= POWERPC_EXCP_DSEG
;
2110 env
->error_code
= 0;
2111 env
->spr
[SPR_DAR
] = address
;
2118 printf("%s: set exception to %d %02x\n", __func__
,
2119 env
->exception
, env
->error_code
);
2127 /*****************************************************************************/
2128 /* BATs management */
2129 #if !defined(FLUSH_ALL_TLBS)
2130 static inline void do_invalidate_BAT(CPUPPCState
*env
, target_ulong BATu
,
2133 target_ulong base
, end
, page
;
2135 base
= BATu
& ~0x0001FFFF;
2136 end
= base
+ mask
+ 0x00020000;
2137 LOG_BATS("Flush BAT from " TARGET_FMT_lx
" to " TARGET_FMT_lx
" ("
2138 TARGET_FMT_lx
")\n", base
, end
, mask
);
2139 for (page
= base
; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2140 tlb_flush_page(env
, page
);
2142 LOG_BATS("Flush done\n");
2146 static inline void dump_store_bat(CPUPPCState
*env
, char ID
, int ul
, int nr
,
2149 LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n", ID
,
2150 nr
, ul
== 0 ? 'u' : 'l', value
, env
->nip
);
2153 void helper_store_ibatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2157 dump_store_bat(env
, 'I', 0, nr
, value
);
2158 if (env
->IBAT
[0][nr
] != value
) {
2159 mask
= (value
<< 15) & 0x0FFE0000UL
;
2160 #if !defined(FLUSH_ALL_TLBS)
2161 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2163 /* When storing valid upper BAT, mask BEPI and BRPN
2164 * and invalidate all TLBs covered by this BAT
2166 mask
= (value
<< 15) & 0x0FFE0000UL
;
2167 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2168 (value
& ~0x0001FFFFUL
& ~mask
);
2169 env
->IBAT
[1][nr
] = (env
->IBAT
[1][nr
] & 0x0000007B) |
2170 (env
->IBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2171 #if !defined(FLUSH_ALL_TLBS)
2172 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2179 void helper_store_ibatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2181 dump_store_bat(env
, 'I', 1, nr
, value
);
2182 env
->IBAT
[1][nr
] = value
;
2185 void helper_store_dbatu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2189 dump_store_bat(env
, 'D', 0, nr
, value
);
2190 if (env
->DBAT
[0][nr
] != value
) {
2191 /* When storing valid upper BAT, mask BEPI and BRPN
2192 * and invalidate all TLBs covered by this BAT
2194 mask
= (value
<< 15) & 0x0FFE0000UL
;
2195 #if !defined(FLUSH_ALL_TLBS)
2196 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2198 mask
= (value
<< 15) & 0x0FFE0000UL
;
2199 env
->DBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2200 (value
& ~0x0001FFFFUL
& ~mask
);
2201 env
->DBAT
[1][nr
] = (env
->DBAT
[1][nr
] & 0x0000007B) |
2202 (env
->DBAT
[1][nr
] & ~0x0001FFFF & ~mask
);
2203 #if !defined(FLUSH_ALL_TLBS)
2204 do_invalidate_BAT(env
, env
->DBAT
[0][nr
], mask
);
2211 void helper_store_dbatl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2213 dump_store_bat(env
, 'D', 1, nr
, value
);
2214 env
->DBAT
[1][nr
] = value
;
2217 void helper_store_601_batu(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2220 #if defined(FLUSH_ALL_TLBS)
2224 dump_store_bat(env
, 'I', 0, nr
, value
);
2225 if (env
->IBAT
[0][nr
] != value
) {
2226 #if defined(FLUSH_ALL_TLBS)
2229 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2230 if (env
->IBAT
[1][nr
] & 0x40) {
2231 /* Invalidate BAT only if it is valid */
2232 #if !defined(FLUSH_ALL_TLBS)
2233 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2238 /* When storing valid upper BAT, mask BEPI and BRPN
2239 * and invalidate all TLBs covered by this BAT
2241 env
->IBAT
[0][nr
] = (value
& 0x00001FFFUL
) |
2242 (value
& ~0x0001FFFFUL
& ~mask
);
2243 env
->DBAT
[0][nr
] = env
->IBAT
[0][nr
];
2244 if (env
->IBAT
[1][nr
] & 0x40) {
2245 #if !defined(FLUSH_ALL_TLBS)
2246 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2251 #if defined(FLUSH_ALL_TLBS)
2259 void helper_store_601_batl(CPUPPCState
*env
, uint32_t nr
, target_ulong value
)
2262 #if defined(FLUSH_ALL_TLBS)
2266 dump_store_bat(env
, 'I', 1, nr
, value
);
2267 if (env
->IBAT
[1][nr
] != value
) {
2268 #if defined(FLUSH_ALL_TLBS)
2271 if (env
->IBAT
[1][nr
] & 0x40) {
2272 #if !defined(FLUSH_ALL_TLBS)
2273 mask
= (env
->IBAT
[1][nr
] << 17) & 0x0FFE0000UL
;
2274 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2280 #if !defined(FLUSH_ALL_TLBS)
2281 mask
= (value
<< 17) & 0x0FFE0000UL
;
2282 do_invalidate_BAT(env
, env
->IBAT
[0][nr
], mask
);
2287 env
->IBAT
[1][nr
] = value
;
2288 env
->DBAT
[1][nr
] = value
;
2289 #if defined(FLUSH_ALL_TLBS)
2297 /*****************************************************************************/
2298 /* TLB management */
2299 void ppc_tlb_invalidate_all(CPUPPCState
*env
)
2301 switch (env
->mmu_model
) {
2302 case POWERPC_MMU_SOFT_6xx
:
2303 case POWERPC_MMU_SOFT_74xx
:
2304 ppc6xx_tlb_invalidate_all(env
);
2306 case POWERPC_MMU_SOFT_4xx
:
2307 case POWERPC_MMU_SOFT_4xx_Z
:
2308 ppc4xx_tlb_invalidate_all(env
);
2310 case POWERPC_MMU_REAL
:
2311 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2313 case POWERPC_MMU_MPC8xx
:
2315 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2317 case POWERPC_MMU_BOOKE
:
2320 case POWERPC_MMU_BOOKE206
:
2321 booke206_flush_tlb(env
, -1, 0);
2323 case POWERPC_MMU_32B
:
2324 case POWERPC_MMU_601
:
2325 #if defined(TARGET_PPC64)
2326 case POWERPC_MMU_620
:
2327 case POWERPC_MMU_64B
:
2328 case POWERPC_MMU_2_06
:
2329 #endif /* defined(TARGET_PPC64) */
2334 cpu_abort(env
, "Unknown MMU model\n");
2339 void ppc_tlb_invalidate_one(CPUPPCState
*env
, target_ulong addr
)
2341 #if !defined(FLUSH_ALL_TLBS)
2342 addr
&= TARGET_PAGE_MASK
;
2343 switch (env
->mmu_model
) {
2344 case POWERPC_MMU_SOFT_6xx
:
2345 case POWERPC_MMU_SOFT_74xx
:
2346 ppc6xx_tlb_invalidate_virt(env
, addr
, 0);
2347 if (env
->id_tlbs
== 1) {
2348 ppc6xx_tlb_invalidate_virt(env
, addr
, 1);
2351 case POWERPC_MMU_SOFT_4xx
:
2352 case POWERPC_MMU_SOFT_4xx_Z
:
2353 ppc4xx_tlb_invalidate_virt(env
, addr
, env
->spr
[SPR_40x_PID
]);
2355 case POWERPC_MMU_REAL
:
2356 cpu_abort(env
, "No TLB for PowerPC 4xx in real mode\n");
2358 case POWERPC_MMU_MPC8xx
:
2360 cpu_abort(env
, "MPC8xx MMU model is not implemented\n");
2362 case POWERPC_MMU_BOOKE
:
2364 cpu_abort(env
, "BookE MMU model is not implemented\n");
2366 case POWERPC_MMU_BOOKE206
:
2368 cpu_abort(env
, "BookE 2.06 MMU model is not implemented\n");
2370 case POWERPC_MMU_32B
:
2371 case POWERPC_MMU_601
:
2372 /* tlbie invalidate TLBs for all segments */
2373 addr
&= ~((target_ulong
)-1ULL << 28);
2374 /* XXX: this case should be optimized,
2375 * giving a mask to tlb_flush_page
2377 tlb_flush_page(env
, addr
| (0x0 << 28));
2378 tlb_flush_page(env
, addr
| (0x1 << 28));
2379 tlb_flush_page(env
, addr
| (0x2 << 28));
2380 tlb_flush_page(env
, addr
| (0x3 << 28));
2381 tlb_flush_page(env
, addr
| (0x4 << 28));
2382 tlb_flush_page(env
, addr
| (0x5 << 28));
2383 tlb_flush_page(env
, addr
| (0x6 << 28));
2384 tlb_flush_page(env
, addr
| (0x7 << 28));
2385 tlb_flush_page(env
, addr
| (0x8 << 28));
2386 tlb_flush_page(env
, addr
| (0x9 << 28));
2387 tlb_flush_page(env
, addr
| (0xA << 28));
2388 tlb_flush_page(env
, addr
| (0xB << 28));
2389 tlb_flush_page(env
, addr
| (0xC << 28));
2390 tlb_flush_page(env
, addr
| (0xD << 28));
2391 tlb_flush_page(env
, addr
| (0xE << 28));
2392 tlb_flush_page(env
, addr
| (0xF << 28));
2394 #if defined(TARGET_PPC64)
2395 case POWERPC_MMU_620
:
2396 case POWERPC_MMU_64B
:
2397 case POWERPC_MMU_2_06
:
2398 /* tlbie invalidate TLBs for all segments */
2399 /* XXX: given the fact that there are too many segments to invalidate,
2400 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
2401 * we just invalidate all TLBs
2405 #endif /* defined(TARGET_PPC64) */
2408 cpu_abort(env
, "Unknown MMU model\n");
2412 ppc_tlb_invalidate_all(env
);
2416 /*****************************************************************************/
2417 /* Special registers manipulation */
2418 #if defined(TARGET_PPC64)
2419 void ppc_store_asr(CPUPPCState
*env
, target_ulong value
)
2421 if (env
->asr
!= value
) {
2428 void ppc_store_sdr1(CPUPPCState
*env
, target_ulong value
)
2430 LOG_MMU("%s: " TARGET_FMT_lx
"\n", __func__
, value
);
2431 if (env
->spr
[SPR_SDR1
] != value
) {
2432 env
->spr
[SPR_SDR1
] = value
;
2433 #if defined(TARGET_PPC64)
2434 if (env
->mmu_model
& POWERPC_MMU_64
) {
2435 target_ulong htabsize
= value
& SDR_64_HTABSIZE
;
2437 if (htabsize
> 28) {
2438 fprintf(stderr
, "Invalid HTABSIZE 0x" TARGET_FMT_lx
2439 " stored in SDR1\n", htabsize
);
2442 env
->htab_mask
= (1ULL << (htabsize
+ 18)) - 1;
2443 env
->htab_base
= value
& SDR_64_HTABORG
;
2445 #endif /* defined(TARGET_PPC64) */
2447 /* FIXME: Should check for valid HTABMASK values */
2448 env
->htab_mask
= ((value
& SDR_32_HTABMASK
) << 16) | 0xFFFF;
2449 env
->htab_base
= value
& SDR_32_HTABORG
;
2455 /* Segment registers load and store */
2456 target_ulong
helper_load_sr(CPUPPCState
*env
, target_ulong sr_num
)
2458 #if defined(TARGET_PPC64)
2459 if (env
->mmu_model
& POWERPC_MMU_64
) {
2464 return env
->sr
[sr_num
];
2467 void helper_store_sr(CPUPPCState
*env
, target_ulong srnum
, target_ulong value
)
2469 LOG_MMU("%s: reg=%d " TARGET_FMT_lx
" " TARGET_FMT_lx
"\n", __func__
,
2470 (int)srnum
, value
, env
->sr
[srnum
]);
2471 #if defined(TARGET_PPC64)
2472 if (env
->mmu_model
& POWERPC_MMU_64
) {
2473 uint64_t rb
= 0, rs
= 0;
2476 rb
|= ((uint32_t)srnum
& 0xf) << 28;
2477 /* Set the valid bit */
2480 rb
|= (uint32_t)srnum
;
2483 rs
|= (value
& 0xfffffff) << 12;
2485 rs
|= ((value
>> 27) & 0xf) << 8;
2487 ppc_store_slb(env
, rb
, rs
);
2490 if (env
->sr
[srnum
] != value
) {
2491 env
->sr
[srnum
] = value
;
2492 /* Invalidating 256MB of virtual memory in 4kB pages is way longer than
2493 flusing the whole TLB. */
2494 #if !defined(FLUSH_ALL_TLBS) && 0
2496 target_ulong page
, end
;
2497 /* Invalidate 256 MB of virtual memory */
2498 page
= (16 << 20) * srnum
;
2499 end
= page
+ (16 << 20);
2500 for (; page
!= end
; page
+= TARGET_PAGE_SIZE
) {
2501 tlb_flush_page(env
, page
);
2509 #endif /* !defined(CONFIG_USER_ONLY) */
2511 #if !defined(CONFIG_USER_ONLY)
2512 /* SLB management */
2513 #if defined(TARGET_PPC64)
2514 void helper_store_slb(CPUPPCState
*env
, target_ulong rb
, target_ulong rs
)
2516 if (ppc_store_slb(env
, rb
, rs
) < 0) {
2517 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2518 POWERPC_EXCP_INVAL
);
2522 target_ulong
helper_load_slb_esid(CPUPPCState
*env
, target_ulong rb
)
2524 target_ulong rt
= 0;
2526 if (ppc_load_slb_esid(env
, rb
, &rt
) < 0) {
2527 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2528 POWERPC_EXCP_INVAL
);
2533 target_ulong
helper_load_slb_vsid(CPUPPCState
*env
, target_ulong rb
)
2535 target_ulong rt
= 0;
2537 if (ppc_load_slb_vsid(env
, rb
, &rt
) < 0) {
2538 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
2539 POWERPC_EXCP_INVAL
);
2543 #endif /* defined(TARGET_PPC64) */
2545 /* TLB management */
2546 void helper_tlbia(CPUPPCState
*env
)
2548 ppc_tlb_invalidate_all(env
);
2551 void helper_tlbie(CPUPPCState
*env
, target_ulong addr
)
2553 ppc_tlb_invalidate_one(env
, addr
);
2556 /* Software driven TLBs management */
2557 /* PowerPC 602/603 software TLB load instructions helpers */
2558 static void do_6xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2560 target_ulong RPN
, CMP
, EPN
;
2563 RPN
= env
->spr
[SPR_RPA
];
2565 CMP
= env
->spr
[SPR_ICMP
];
2566 EPN
= env
->spr
[SPR_IMISS
];
2568 CMP
= env
->spr
[SPR_DCMP
];
2569 EPN
= env
->spr
[SPR_DMISS
];
2571 way
= (env
->spr
[SPR_SRR1
] >> 17) & 1;
2572 (void)EPN
; /* avoid a compiler warning */
2573 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2574 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2576 /* Store this TLB */
2577 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2578 way
, is_code
, CMP
, RPN
);
2581 void helper_6xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2583 do_6xx_tlb(env
, EPN
, 0);
2586 void helper_6xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2588 do_6xx_tlb(env
, EPN
, 1);
2591 /* PowerPC 74xx software TLB load instructions helpers */
2592 static void do_74xx_tlb(CPUPPCState
*env
, target_ulong new_EPN
, int is_code
)
2594 target_ulong RPN
, CMP
, EPN
;
2597 RPN
= env
->spr
[SPR_PTELO
];
2598 CMP
= env
->spr
[SPR_PTEHI
];
2599 EPN
= env
->spr
[SPR_TLBMISS
] & ~0x3;
2600 way
= env
->spr
[SPR_TLBMISS
] & 0x3;
2601 (void)EPN
; /* avoid a compiler warning */
2602 LOG_SWTLB("%s: EPN " TARGET_FMT_lx
" " TARGET_FMT_lx
" PTE0 " TARGET_FMT_lx
2603 " PTE1 " TARGET_FMT_lx
" way %d\n", __func__
, new_EPN
, EPN
, CMP
,
2605 /* Store this TLB */
2606 ppc6xx_tlb_store(env
, (uint32_t)(new_EPN
& TARGET_PAGE_MASK
),
2607 way
, is_code
, CMP
, RPN
);
2610 void helper_74xx_tlbd(CPUPPCState
*env
, target_ulong EPN
)
2612 do_74xx_tlb(env
, EPN
, 0);
2615 void helper_74xx_tlbi(CPUPPCState
*env
, target_ulong EPN
)
2617 do_74xx_tlb(env
, EPN
, 1);
2620 /*****************************************************************************/
2621 /* PowerPC 601 specific instructions (POWER bridge) */
2623 target_ulong
helper_rac(CPUPPCState
*env
, target_ulong addr
)
2627 target_ulong ret
= 0;
2629 /* We don't have to generate many instances of this instruction,
2630 * as rac is supervisor only.
2632 /* XXX: FIX THIS: Pretend we have no BAT */
2633 nb_BATs
= env
->nb_BATs
;
2635 if (get_physical_address(env
, &ctx
, addr
, 0, ACCESS_INT
) == 0) {
2638 env
->nb_BATs
= nb_BATs
;
2642 static inline target_ulong
booke_tlb_to_page_size(int size
)
2644 return 1024 << (2 * size
);
2647 static inline int booke_page_size_to_tlb(target_ulong page_size
)
2651 switch (page_size
) {
2685 #if defined(TARGET_PPC64)
2686 case 0x000100000000ULL
:
2689 case 0x000400000000ULL
:
2692 case 0x001000000000ULL
:
2695 case 0x004000000000ULL
:
2698 case 0x010000000000ULL
:
2710 /* Helpers for 4xx TLB management */
2711 #define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
2713 #define PPC4XX_TLBHI_V 0x00000040
2714 #define PPC4XX_TLBHI_E 0x00000020
2715 #define PPC4XX_TLBHI_SIZE_MIN 0
2716 #define PPC4XX_TLBHI_SIZE_MAX 7
2717 #define PPC4XX_TLBHI_SIZE_DEFAULT 1
2718 #define PPC4XX_TLBHI_SIZE_SHIFT 7
2719 #define PPC4XX_TLBHI_SIZE_MASK 0x00000007
2721 #define PPC4XX_TLBLO_EX 0x00000200
2722 #define PPC4XX_TLBLO_WR 0x00000100
2723 #define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
2724 #define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
2726 target_ulong
helper_4xx_tlbre_hi(CPUPPCState
*env
, target_ulong entry
)
2732 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2733 tlb
= &env
->tlb
.tlbe
[entry
];
2735 if (tlb
->prot
& PAGE_VALID
) {
2736 ret
|= PPC4XX_TLBHI_V
;
2738 size
= booke_page_size_to_tlb(tlb
->size
);
2739 if (size
< PPC4XX_TLBHI_SIZE_MIN
|| size
> PPC4XX_TLBHI_SIZE_MAX
) {
2740 size
= PPC4XX_TLBHI_SIZE_DEFAULT
;
2742 ret
|= size
<< PPC4XX_TLBHI_SIZE_SHIFT
;
2743 env
->spr
[SPR_40x_PID
] = tlb
->PID
;
2747 target_ulong
helper_4xx_tlbre_lo(CPUPPCState
*env
, target_ulong entry
)
2752 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2753 tlb
= &env
->tlb
.tlbe
[entry
];
2755 if (tlb
->prot
& PAGE_EXEC
) {
2756 ret
|= PPC4XX_TLBLO_EX
;
2758 if (tlb
->prot
& PAGE_WRITE
) {
2759 ret
|= PPC4XX_TLBLO_WR
;
2764 void helper_4xx_tlbwe_hi(CPUPPCState
*env
, target_ulong entry
,
2768 target_ulong page
, end
;
2770 LOG_SWTLB("%s entry %d val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2772 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2773 tlb
= &env
->tlb
.tlbe
[entry
];
2774 /* Invalidate previous TLB (if it's valid) */
2775 if (tlb
->prot
& PAGE_VALID
) {
2776 end
= tlb
->EPN
+ tlb
->size
;
2777 LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx
" end "
2778 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2779 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2780 tlb_flush_page(env
, page
);
2783 tlb
->size
= booke_tlb_to_page_size((val
>> PPC4XX_TLBHI_SIZE_SHIFT
)
2784 & PPC4XX_TLBHI_SIZE_MASK
);
2785 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2786 * If this ever occurs, one should use the ppcemb target instead
2787 * of the ppc or ppc64 one
2789 if ((val
& PPC4XX_TLBHI_V
) && tlb
->size
< TARGET_PAGE_SIZE
) {
2790 cpu_abort(env
, "TLB size " TARGET_FMT_lu
" < %u "
2791 "are not supported (%d)\n",
2792 tlb
->size
, TARGET_PAGE_SIZE
, (int)((val
>> 7) & 0x7));
2794 tlb
->EPN
= val
& ~(tlb
->size
- 1);
2795 if (val
& PPC4XX_TLBHI_V
) {
2796 tlb
->prot
|= PAGE_VALID
;
2797 if (val
& PPC4XX_TLBHI_E
) {
2798 /* XXX: TO BE FIXED */
2800 "Little-endian TLB entries are not supported by now\n");
2803 tlb
->prot
&= ~PAGE_VALID
;
2805 tlb
->PID
= env
->spr
[SPR_40x_PID
]; /* PID */
2806 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2807 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2808 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2809 tlb
->prot
& PAGE_READ
? 'r' : '-',
2810 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2811 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2812 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2813 /* Invalidate new TLB (if valid) */
2814 if (tlb
->prot
& PAGE_VALID
) {
2815 end
= tlb
->EPN
+ tlb
->size
;
2816 LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx
" end "
2817 TARGET_FMT_lx
"\n", __func__
, (int)entry
, tlb
->EPN
, end
);
2818 for (page
= tlb
->EPN
; page
< end
; page
+= TARGET_PAGE_SIZE
) {
2819 tlb_flush_page(env
, page
);
2824 void helper_4xx_tlbwe_lo(CPUPPCState
*env
, target_ulong entry
,
2829 LOG_SWTLB("%s entry %i val " TARGET_FMT_lx
"\n", __func__
, (int)entry
,
2831 entry
&= PPC4XX_TLB_ENTRY_MASK
;
2832 tlb
= &env
->tlb
.tlbe
[entry
];
2833 tlb
->attr
= val
& PPC4XX_TLBLO_ATTR_MASK
;
2834 tlb
->RPN
= val
& PPC4XX_TLBLO_RPN_MASK
;
2835 tlb
->prot
= PAGE_READ
;
2836 if (val
& PPC4XX_TLBLO_EX
) {
2837 tlb
->prot
|= PAGE_EXEC
;
2839 if (val
& PPC4XX_TLBLO_WR
) {
2840 tlb
->prot
|= PAGE_WRITE
;
2842 LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx
" EPN " TARGET_FMT_lx
2843 " size " TARGET_FMT_lx
" prot %c%c%c%c PID %d\n", __func__
,
2844 (int)entry
, tlb
->RPN
, tlb
->EPN
, tlb
->size
,
2845 tlb
->prot
& PAGE_READ
? 'r' : '-',
2846 tlb
->prot
& PAGE_WRITE
? 'w' : '-',
2847 tlb
->prot
& PAGE_EXEC
? 'x' : '-',
2848 tlb
->prot
& PAGE_VALID
? 'v' : '-', (int)tlb
->PID
);
2851 target_ulong
helper_4xx_tlbsx(CPUPPCState
*env
, target_ulong address
)
2853 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_40x_PID
]);
2856 /* PowerPC 440 TLB management */
2857 void helper_440_tlbwe(CPUPPCState
*env
, uint32_t word
, target_ulong entry
,
2861 target_ulong EPN
, RPN
, size
;
2864 LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx
"\n",
2865 __func__
, word
, (int)entry
, value
);
2868 tlb
= &env
->tlb
.tlbe
[entry
];
2871 /* Just here to please gcc */
2873 EPN
= value
& 0xFFFFFC00;
2874 if ((tlb
->prot
& PAGE_VALID
) && EPN
!= tlb
->EPN
) {
2878 size
= booke_tlb_to_page_size((value
>> 4) & 0xF);
2879 if ((tlb
->prot
& PAGE_VALID
) && tlb
->size
< size
) {
2884 tlb
->attr
|= (value
>> 8) & 1;
2885 if (value
& 0x200) {
2886 tlb
->prot
|= PAGE_VALID
;
2888 if (tlb
->prot
& PAGE_VALID
) {
2889 tlb
->prot
&= ~PAGE_VALID
;
2893 tlb
->PID
= env
->spr
[SPR_440_MMUCR
] & 0x000000FF;
2894 if (do_flush_tlbs
) {
2899 RPN
= value
& 0xFFFFFC0F;
2900 if ((tlb
->prot
& PAGE_VALID
) && tlb
->RPN
!= RPN
) {
2906 tlb
->attr
= (tlb
->attr
& 0x1) | (value
& 0x0000FF00);
2907 tlb
->prot
= tlb
->prot
& PAGE_VALID
;
2909 tlb
->prot
|= PAGE_READ
<< 4;
2912 tlb
->prot
|= PAGE_WRITE
<< 4;
2915 tlb
->prot
|= PAGE_EXEC
<< 4;
2918 tlb
->prot
|= PAGE_READ
;
2921 tlb
->prot
|= PAGE_WRITE
;
2924 tlb
->prot
|= PAGE_EXEC
;
2930 target_ulong
helper_440_tlbre(CPUPPCState
*env
, uint32_t word
,
2938 tlb
= &env
->tlb
.tlbe
[entry
];
2941 /* Just here to please gcc */
2944 size
= booke_page_size_to_tlb(tlb
->size
);
2945 if (size
< 0 || size
> 0xF) {
2949 if (tlb
->attr
& 0x1) {
2952 if (tlb
->prot
& PAGE_VALID
) {
2955 env
->spr
[SPR_440_MMUCR
] &= ~0x000000FF;
2956 env
->spr
[SPR_440_MMUCR
] |= tlb
->PID
;
2962 ret
= tlb
->attr
& ~0x1;
2963 if (tlb
->prot
& (PAGE_READ
<< 4)) {
2966 if (tlb
->prot
& (PAGE_WRITE
<< 4)) {
2969 if (tlb
->prot
& (PAGE_EXEC
<< 4)) {
2972 if (tlb
->prot
& PAGE_READ
) {
2975 if (tlb
->prot
& PAGE_WRITE
) {
2978 if (tlb
->prot
& PAGE_EXEC
) {
2986 target_ulong
helper_440_tlbsx(CPUPPCState
*env
, target_ulong address
)
2988 return ppcemb_tlb_search(env
, address
, env
->spr
[SPR_440_MMUCR
] & 0xFF);
2991 /* PowerPC BookE 2.06 TLB management */
2993 static ppcmas_tlb_t
*booke206_cur_tlb(CPUPPCState
*env
)
2995 uint32_t tlbncfg
= 0;
2996 int esel
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ESEL_MASK
) >> MAS0_ESEL_SHIFT
;
2997 int ea
= (env
->spr
[SPR_BOOKE_MAS2
] & MAS2_EPN_MASK
);
3000 tlb
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3001 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlb
];
3003 if ((tlbncfg
& TLBnCFG_HES
) && (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_HES
)) {
3004 cpu_abort(env
, "we don't support HES yet\n");
3007 return booke206_get_tlbm(env
, tlb
, ea
, esel
);
3010 void helper_booke_setpid(CPUPPCState
*env
, uint32_t pidn
, target_ulong pid
)
3012 env
->spr
[pidn
] = pid
;
3013 /* changing PIDs mean we're in a different address space now */
3017 void helper_booke206_tlbwe(CPUPPCState
*env
)
3019 uint32_t tlbncfg
, tlbn
;
3021 uint32_t size_tlb
, size_ps
;
3025 switch (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_WQ_MASK
) {
3026 case MAS0_WQ_ALWAYS
:
3027 /* good to go, write that entry */
3030 /* XXX check if reserved */
3035 case MAS0_WQ_CLR_RSRV
:
3036 /* XXX clear entry */
3039 /* no idea what to do */
3043 if (((env
->spr
[SPR_BOOKE_MAS0
] & MAS0_ATSEL
) == MAS0_ATSEL_LRAT
) &&
3045 /* XXX we don't support direct LRAT setting yet */
3046 fprintf(stderr
, "cpu: don't support LRAT setting yet\n");
3050 tlbn
= (env
->spr
[SPR_BOOKE_MAS0
] & MAS0_TLBSEL_MASK
) >> MAS0_TLBSEL_SHIFT
;
3051 tlbncfg
= env
->spr
[SPR_BOOKE_TLB0CFG
+ tlbn
];
3053 tlb
= booke206_cur_tlb(env
);
3056 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3057 POWERPC_EXCP_INVAL
|
3058 POWERPC_EXCP_INVAL_INVAL
);
3061 /* check that we support the targeted size */
3062 size_tlb
= (env
->spr
[SPR_BOOKE_MAS1
] & MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
3063 size_ps
= booke206_tlbnps(env
, tlbn
);
3064 if ((env
->spr
[SPR_BOOKE_MAS1
] & MAS1_VALID
) && (tlbncfg
& TLBnCFG_AVAIL
) &&
3065 !(size_ps
& (1 << size_tlb
))) {
3066 helper_raise_exception_err(env
, POWERPC_EXCP_PROGRAM
,
3067 POWERPC_EXCP_INVAL
|
3068 POWERPC_EXCP_INVAL_INVAL
);
3072 cpu_abort(env
, "missing HV implementation\n");
3074 tlb
->mas7_3
= ((uint64_t)env
->spr
[SPR_BOOKE_MAS7
] << 32) |
3075 env
->spr
[SPR_BOOKE_MAS3
];
3076 tlb
->mas1
= env
->spr
[SPR_BOOKE_MAS1
];
3079 if (!(tlbncfg
& TLBnCFG_AVAIL
)) {
3080 /* force !AVAIL TLB entries to correct page size */
3081 tlb
->mas1
&= ~MAS1_TSIZE_MASK
;
3082 /* XXX can be configured in MMUCSR0 */
3083 tlb
->mas1
|= (tlbncfg
& TLBnCFG_MINSIZE
) >> 12;
3086 /* Make a mask from TLB size to discard invalid bits in EPN field */
3087 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3088 /* Add a mask for page attributes */
3089 mask
|= MAS2_ACM
| MAS2_VLE
| MAS2_W
| MAS2_I
| MAS2_M
| MAS2_G
| MAS2_E
;
3092 /* Executing a tlbwe instruction in 32-bit mode will set
3093 * bits 0:31 of the TLB EPN field to zero.
3098 tlb
->mas2
= env
->spr
[SPR_BOOKE_MAS2
] & mask
;
3100 if (!(tlbncfg
& TLBnCFG_IPROT
)) {
3101 /* no IPROT supported by TLB */
3102 tlb
->mas1
&= ~MAS1_IPROT
;
3105 if (booke206_tlb_to_page_size(env
, tlb
) == TARGET_PAGE_SIZE
) {
3106 tlb_flush_page(env
, tlb
->mas2
& MAS2_EPN_MASK
);
3112 static inline void booke206_tlb_to_mas(CPUPPCState
*env
, ppcmas_tlb_t
*tlb
)
3114 int tlbn
= booke206_tlbm_to_tlbn(env
, tlb
);
3115 int way
= booke206_tlbm_to_way(env
, tlb
);
3117 env
->spr
[SPR_BOOKE_MAS0
] = tlbn
<< MAS0_TLBSEL_SHIFT
;
3118 env
->spr
[SPR_BOOKE_MAS0
] |= way
<< MAS0_ESEL_SHIFT
;
3119 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3121 env
->spr
[SPR_BOOKE_MAS1
] = tlb
->mas1
;
3122 env
->spr
[SPR_BOOKE_MAS2
] = tlb
->mas2
;
3123 env
->spr
[SPR_BOOKE_MAS3
] = tlb
->mas7_3
;
3124 env
->spr
[SPR_BOOKE_MAS7
] = tlb
->mas7_3
>> 32;
3127 void helper_booke206_tlbre(CPUPPCState
*env
)
3129 ppcmas_tlb_t
*tlb
= NULL
;
3131 tlb
= booke206_cur_tlb(env
);
3133 env
->spr
[SPR_BOOKE_MAS1
] = 0;
3135 booke206_tlb_to_mas(env
, tlb
);
3139 void helper_booke206_tlbsx(CPUPPCState
*env
, target_ulong address
)
3141 ppcmas_tlb_t
*tlb
= NULL
;
3143 target_phys_addr_t raddr
;
3146 spid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID_MASK
) >> MAS6_SPID_SHIFT
;
3147 sas
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
;
3149 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3150 int ways
= booke206_tlb_ways(env
, i
);
3152 for (j
= 0; j
< ways
; j
++) {
3153 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3159 if (ppcmas_tlb_check(env
, tlb
, &raddr
, address
, spid
)) {
3163 if (sas
!= ((tlb
->mas1
& MAS1_TS
) >> MAS1_TS_SHIFT
)) {
3167 booke206_tlb_to_mas(env
, tlb
);
3172 /* no entry found, fill with defaults */
3173 env
->spr
[SPR_BOOKE_MAS0
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TLBSELD_MASK
;
3174 env
->spr
[SPR_BOOKE_MAS1
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_TSIZED_MASK
;
3175 env
->spr
[SPR_BOOKE_MAS2
] = env
->spr
[SPR_BOOKE_MAS4
] & MAS4_WIMGED_MASK
;
3176 env
->spr
[SPR_BOOKE_MAS3
] = 0;
3177 env
->spr
[SPR_BOOKE_MAS7
] = 0;
3179 if (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SAS
) {
3180 env
->spr
[SPR_BOOKE_MAS1
] |= MAS1_TS
;
3183 env
->spr
[SPR_BOOKE_MAS1
] |= (env
->spr
[SPR_BOOKE_MAS6
] >> 16)
3186 /* next victim logic */
3187 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_ESEL_SHIFT
;
3189 env
->last_way
&= booke206_tlb_ways(env
, 0) - 1;
3190 env
->spr
[SPR_BOOKE_MAS0
] |= env
->last_way
<< MAS0_NV_SHIFT
;
3193 static inline void booke206_invalidate_ea_tlb(CPUPPCState
*env
, int tlbn
,
3197 int ways
= booke206_tlb_ways(env
, tlbn
);
3200 for (i
= 0; i
< ways
; i
++) {
3201 ppcmas_tlb_t
*tlb
= booke206_get_tlbm(env
, tlbn
, ea
, i
);
3205 mask
= ~(booke206_tlb_to_page_size(env
, tlb
) - 1);
3206 if (((tlb
->mas2
& MAS2_EPN_MASK
) == (ea
& mask
)) &&
3207 !(tlb
->mas1
& MAS1_IPROT
)) {
3208 tlb
->mas1
&= ~MAS1_VALID
;
3213 void helper_booke206_tlbivax(CPUPPCState
*env
, target_ulong address
)
3215 if (address
& 0x4) {
3216 /* flush all entries */
3217 if (address
& 0x8) {
3218 /* flush all of TLB1 */
3219 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB1
, 1);
3221 /* flush all of TLB0 */
3222 booke206_flush_tlb(env
, BOOKE206_FLUSH_TLB0
, 0);
3227 if (address
& 0x8) {
3228 /* flush TLB1 entries */
3229 booke206_invalidate_ea_tlb(env
, 1, address
);
3232 /* flush TLB0 entries */
3233 booke206_invalidate_ea_tlb(env
, 0, address
);
3234 tlb_flush_page(env
, address
& MAS2_EPN_MASK
);
3238 void helper_booke206_tlbilx0(CPUPPCState
*env
, target_ulong address
)
3240 /* XXX missing LPID handling */
3241 booke206_flush_tlb(env
, -1, 1);
3244 void helper_booke206_tlbilx1(CPUPPCState
*env
, target_ulong address
)
3247 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3248 ppcmas_tlb_t
*tlb
= env
->tlb
.tlbm
;
3251 /* XXX missing LPID handling */
3252 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3253 tlb_size
= booke206_tlb_size(env
, i
);
3254 for (j
= 0; j
< tlb_size
; j
++) {
3255 if (!(tlb
[j
].mas1
& MAS1_IPROT
) &&
3256 ((tlb
[j
].mas1
& MAS1_TID_MASK
) == tid
)) {
3257 tlb
[j
].mas1
&= ~MAS1_VALID
;
3260 tlb
+= booke206_tlb_size(env
, i
);
3265 void helper_booke206_tlbilx3(CPUPPCState
*env
, target_ulong address
)
3269 int tid
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SPID
);
3270 int pid
= tid
>> MAS6_SPID_SHIFT
;
3271 int sgs
= env
->spr
[SPR_BOOKE_MAS5
] & MAS5_SGS
;
3272 int ind
= (env
->spr
[SPR_BOOKE_MAS6
] & MAS6_SIND
) ? MAS1_IND
: 0;
3273 /* XXX check for unsupported isize and raise an invalid opcode then */
3274 int size
= env
->spr
[SPR_BOOKE_MAS6
] & MAS6_ISIZE_MASK
;
3275 /* XXX implement MAV2 handling */
3278 /* XXX missing LPID handling */
3279 /* flush by pid and ea */
3280 for (i
= 0; i
< BOOKE206_MAX_TLBN
; i
++) {
3281 int ways
= booke206_tlb_ways(env
, i
);
3283 for (j
= 0; j
< ways
; j
++) {
3284 tlb
= booke206_get_tlbm(env
, i
, address
, j
);
3288 if ((ppcmas_tlb_check(env
, tlb
, NULL
, address
, pid
) != 0) ||
3289 (tlb
->mas1
& MAS1_IPROT
) ||
3290 ((tlb
->mas1
& MAS1_IND
) != ind
) ||
3291 ((tlb
->mas8
& MAS8_TGS
) != sgs
)) {
3294 if (mav2
&& ((tlb
->mas1
& MAS1_TSIZE_MASK
) != size
)) {
3295 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
3298 /* XXX e500mc doesn't match SAS, but other cores might */
3299 tlb
->mas1
&= ~MAS1_VALID
;
3305 void helper_booke206_tlbflush(CPUPPCState
*env
, uint32_t type
)
3310 flags
|= BOOKE206_FLUSH_TLB1
;
3314 flags
|= BOOKE206_FLUSH_TLB0
;
3317 booke206_flush_tlb(env
, flags
, 1);