]>
git.proxmox.com Git - qemu.git/blob - target-sparc/op_helper.c
2 #include "dyngen-exec.h"
5 #if !defined(CONFIG_USER_ONLY)
6 #include "softmmu_exec.h"
11 //#define DEBUG_UNALIGNED
12 //#define DEBUG_UNASSIGNED
14 //#define DEBUG_PSTATE
15 //#define DEBUG_CACHE_CONTROL
18 #define DPRINTF_MMU(fmt, ...) \
19 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
21 #define DPRINTF_MMU(fmt, ...) do {} while (0)
25 #define DPRINTF_MXCC(fmt, ...) \
26 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
28 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
32 #define DPRINTF_ASI(fmt, ...) \
33 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
37 #define DPRINTF_PSTATE(fmt, ...) \
38 do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
40 #define DPRINTF_PSTATE(fmt, ...) do {} while (0)
43 #ifdef DEBUG_CACHE_CONTROL
44 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
45 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
47 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
52 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
54 #define AM_CHECK(env1) (1)
58 #define DT0 (env->dt0)
59 #define DT1 (env->dt1)
60 #define QT0 (env->qt0)
61 #define QT1 (env->qt1)
63 /* Leon3 cache control */
65 /* Cache control: emulate the behavior of cache control registers but without
66 any effect on the emulated */
68 #define CACHE_STATE_MASK 0x3
69 #define CACHE_DISABLED 0x0
70 #define CACHE_FROZEN 0x1
71 #define CACHE_ENABLED 0x3
73 /* Cache Control register fields */
75 #define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */
76 #define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */
77 #define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */
78 #define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */
79 #define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */
80 #define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */
81 #define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
82 #define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
84 #if !defined(CONFIG_USER_ONLY)
85 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
86 int is_exec
, int is_asi
, int size
);
89 static void do_unassigned_access(target_ulong addr
, int is_write
, int is_exec
,
90 int is_asi
, int size
);
94 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
95 /* Calculates TSB pointer value for fault page size 8k or 64k */
96 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register
,
97 uint64_t tag_access_register
,
100 uint64_t tsb_base
= tsb_register
& ~0x1fffULL
;
101 int tsb_split
= (tsb_register
& 0x1000ULL
) ? 1 : 0;
102 int tsb_size
= tsb_register
& 0xf;
104 /* discard lower 13 bits which hold tag access context */
105 uint64_t tag_access_va
= tag_access_register
& ~0x1fffULL
;
107 /* now reorder bits */
108 uint64_t tsb_base_mask
= ~0x1fffULL
;
109 uint64_t va
= tag_access_va
;
111 /* move va bits to correct position */
112 if (page_size
== 8*1024) {
114 } else if (page_size
== 64*1024) {
119 tsb_base_mask
<<= tsb_size
;
122 /* calculate tsb_base mask and adjust va if split is in use */
124 if (page_size
== 8*1024) {
125 va
&= ~(1ULL << (13 + tsb_size
));
126 } else if (page_size
== 64*1024) {
127 va
|= (1ULL << (13 + tsb_size
));
132 return ((tsb_base
& tsb_base_mask
) | (va
& ~tsb_base_mask
)) & ~0xfULL
;
135 /* Calculates tag target register value by reordering bits
136 in tag access register */
137 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register
)
139 return ((tag_access_register
& 0x1fff) << 48) | (tag_access_register
>> 22);
142 static void replace_tlb_entry(SparcTLBEntry
*tlb
,
143 uint64_t tlb_tag
, uint64_t tlb_tte
,
146 target_ulong mask
, size
, va
, offset
;
148 /* flush page range if translation is valid */
149 if (TTE_IS_VALID(tlb
->tte
)) {
151 mask
= 0xffffffffffffe000ULL
;
152 mask
<<= 3 * ((tlb
->tte
>> 61) & 3);
155 va
= tlb
->tag
& mask
;
157 for (offset
= 0; offset
< size
; offset
+= TARGET_PAGE_SIZE
) {
158 tlb_flush_page(env1
, va
+ offset
);
166 static void demap_tlb(SparcTLBEntry
*tlb
, target_ulong demap_addr
,
167 const char *strmmu
, CPUState
*env1
)
173 int is_demap_context
= (demap_addr
>> 6) & 1;
176 switch ((demap_addr
>> 4) & 3) {
177 case 0: /* primary */
178 context
= env1
->dmmu
.mmu_primary_context
;
180 case 1: /* secondary */
181 context
= env1
->dmmu
.mmu_secondary_context
;
183 case 2: /* nucleus */
186 case 3: /* reserved */
191 for (i
= 0; i
< 64; i
++) {
192 if (TTE_IS_VALID(tlb
[i
].tte
)) {
194 if (is_demap_context
) {
195 /* will remove non-global entries matching context value */
196 if (TTE_IS_GLOBAL(tlb
[i
].tte
) ||
197 !tlb_compare_context(&tlb
[i
], context
)) {
202 will remove any entry matching VA */
203 mask
= 0xffffffffffffe000ULL
;
204 mask
<<= 3 * ((tlb
[i
].tte
>> 61) & 3);
206 if (!compare_masked(demap_addr
, tlb
[i
].tag
, mask
)) {
210 /* entry should be global or matching context value */
211 if (!TTE_IS_GLOBAL(tlb
[i
].tte
) &&
212 !tlb_compare_context(&tlb
[i
], context
)) {
217 replace_tlb_entry(&tlb
[i
], 0, 0, env1
);
219 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu
, i
);
220 dump_mmu(stdout
, fprintf
, env1
);
226 static void replace_tlb_1bit_lru(SparcTLBEntry
*tlb
,
227 uint64_t tlb_tag
, uint64_t tlb_tte
,
228 const char *strmmu
, CPUState
*env1
)
230 unsigned int i
, replace_used
;
232 /* Try replacing invalid entry */
233 for (i
= 0; i
< 64; i
++) {
234 if (!TTE_IS_VALID(tlb
[i
].tte
)) {
235 replace_tlb_entry(&tlb
[i
], tlb_tag
, tlb_tte
, env1
);
237 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu
, i
);
238 dump_mmu(stdout
, fprintf
, env1
);
244 /* All entries are valid, try replacing unlocked entry */
246 for (replace_used
= 0; replace_used
< 2; ++replace_used
) {
248 /* Used entries are not replaced on first pass */
250 for (i
= 0; i
< 64; i
++) {
251 if (!TTE_IS_LOCKED(tlb
[i
].tte
) && !TTE_IS_USED(tlb
[i
].tte
)) {
253 replace_tlb_entry(&tlb
[i
], tlb_tag
, tlb_tte
, env1
);
255 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
256 strmmu
, (replace_used
? "used" : "unused"), i
);
257 dump_mmu(stdout
, fprintf
, env1
);
263 /* Now reset used bit and search for unused entries again */
265 for (i
= 0; i
< 64; i
++) {
266 TTE_SET_UNUSED(tlb
[i
].tte
);
271 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu
);
278 static inline target_ulong
address_mask(CPUState
*env1
, target_ulong addr
)
280 #ifdef TARGET_SPARC64
281 if (AM_CHECK(env1
)) {
282 addr
&= 0xffffffffULL
;
288 /* returns true if access using this ASI is to have address translated by MMU
289 otherwise access is to raw physical address */
290 static inline int is_translating_asi(int asi
)
292 #ifdef TARGET_SPARC64
293 /* Ultrasparc IIi translating asi
294 - note this list is defined by cpu implementation
310 /* TODO: check sparc32 bits */
315 static inline target_ulong
asi_address_mask(CPUState
*env1
,
316 int asi
, target_ulong addr
)
318 if (is_translating_asi(asi
)) {
319 return address_mask(env
, addr
);
325 void helper_check_align(target_ulong addr
, uint32_t align
)
328 #ifdef DEBUG_UNALIGNED
329 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
330 "\n", addr
, env
->pc
);
332 helper_raise_exception(env
, TT_UNALIGNED
);
336 static uint32_t compute_all_flags(void)
338 return env
->psr
& PSR_ICC
;
341 static uint32_t compute_C_flags(void)
343 return env
->psr
& PSR_CARRY
;
346 static inline uint32_t get_NZ_icc(int32_t dst
)
352 } else if (dst
< 0) {
358 #ifdef TARGET_SPARC64
359 static uint32_t compute_all_flags_xcc(void)
361 return env
->xcc
& PSR_ICC
;
364 static uint32_t compute_C_flags_xcc(void)
366 return env
->xcc
& PSR_CARRY
;
369 static inline uint32_t get_NZ_xcc(target_long dst
)
375 } else if (dst
< 0) {
382 static inline uint32_t get_V_div_icc(target_ulong src2
)
392 static uint32_t compute_all_div(void)
396 ret
= get_NZ_icc(CC_DST
);
397 ret
|= get_V_div_icc(CC_SRC2
);
401 static uint32_t compute_C_div(void)
406 static inline uint32_t get_C_add_icc(uint32_t dst
, uint32_t src1
)
416 static inline uint32_t get_C_addx_icc(uint32_t dst
, uint32_t src1
,
421 if (((src1
& src2
) | (~dst
& (src1
| src2
))) & (1U << 31)) {
427 static inline uint32_t get_V_add_icc(uint32_t dst
, uint32_t src1
,
432 if (((src1
^ src2
^ -1) & (src1
^ dst
)) & (1U << 31)) {
438 #ifdef TARGET_SPARC64
439 static inline uint32_t get_C_add_xcc(target_ulong dst
, target_ulong src1
)
449 static inline uint32_t get_C_addx_xcc(target_ulong dst
, target_ulong src1
,
454 if (((src1
& src2
) | (~dst
& (src1
| src2
))) & (1ULL << 63)) {
460 static inline uint32_t get_V_add_xcc(target_ulong dst
, target_ulong src1
,
465 if (((src1
^ src2
^ -1) & (src1
^ dst
)) & (1ULL << 63)) {
471 static uint32_t compute_all_add_xcc(void)
475 ret
= get_NZ_xcc(CC_DST
);
476 ret
|= get_C_add_xcc(CC_DST
, CC_SRC
);
477 ret
|= get_V_add_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
481 static uint32_t compute_C_add_xcc(void)
483 return get_C_add_xcc(CC_DST
, CC_SRC
);
487 static uint32_t compute_all_add(void)
491 ret
= get_NZ_icc(CC_DST
);
492 ret
|= get_C_add_icc(CC_DST
, CC_SRC
);
493 ret
|= get_V_add_icc(CC_DST
, CC_SRC
, CC_SRC2
);
497 static uint32_t compute_C_add(void)
499 return get_C_add_icc(CC_DST
, CC_SRC
);
502 #ifdef TARGET_SPARC64
503 static uint32_t compute_all_addx_xcc(void)
507 ret
= get_NZ_xcc(CC_DST
);
508 ret
|= get_C_addx_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
509 ret
|= get_V_add_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
513 static uint32_t compute_C_addx_xcc(void)
517 ret
= get_C_addx_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
522 static uint32_t compute_all_addx(void)
526 ret
= get_NZ_icc(CC_DST
);
527 ret
|= get_C_addx_icc(CC_DST
, CC_SRC
, CC_SRC2
);
528 ret
|= get_V_add_icc(CC_DST
, CC_SRC
, CC_SRC2
);
532 static uint32_t compute_C_addx(void)
536 ret
= get_C_addx_icc(CC_DST
, CC_SRC
, CC_SRC2
);
540 static inline uint32_t get_V_tag_icc(target_ulong src1
, target_ulong src2
)
544 if ((src1
| src2
) & 0x3) {
550 static uint32_t compute_all_tadd(void)
554 ret
= get_NZ_icc(CC_DST
);
555 ret
|= get_C_add_icc(CC_DST
, CC_SRC
);
556 ret
|= get_V_add_icc(CC_DST
, CC_SRC
, CC_SRC2
);
557 ret
|= get_V_tag_icc(CC_SRC
, CC_SRC2
);
561 static uint32_t compute_all_taddtv(void)
565 ret
= get_NZ_icc(CC_DST
);
566 ret
|= get_C_add_icc(CC_DST
, CC_SRC
);
570 static inline uint32_t get_C_sub_icc(uint32_t src1
, uint32_t src2
)
580 static inline uint32_t get_C_subx_icc(uint32_t dst
, uint32_t src1
,
585 if (((~src1
& src2
) | (dst
& (~src1
| src2
))) & (1U << 31)) {
591 static inline uint32_t get_V_sub_icc(uint32_t dst
, uint32_t src1
,
596 if (((src1
^ src2
) & (src1
^ dst
)) & (1U << 31)) {
603 #ifdef TARGET_SPARC64
604 static inline uint32_t get_C_sub_xcc(target_ulong src1
, target_ulong src2
)
614 static inline uint32_t get_C_subx_xcc(target_ulong dst
, target_ulong src1
,
619 if (((~src1
& src2
) | (dst
& (~src1
| src2
))) & (1ULL << 63)) {
625 static inline uint32_t get_V_sub_xcc(target_ulong dst
, target_ulong src1
,
630 if (((src1
^ src2
) & (src1
^ dst
)) & (1ULL << 63)) {
636 static uint32_t compute_all_sub_xcc(void)
640 ret
= get_NZ_xcc(CC_DST
);
641 ret
|= get_C_sub_xcc(CC_SRC
, CC_SRC2
);
642 ret
|= get_V_sub_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
646 static uint32_t compute_C_sub_xcc(void)
648 return get_C_sub_xcc(CC_SRC
, CC_SRC2
);
652 static uint32_t compute_all_sub(void)
656 ret
= get_NZ_icc(CC_DST
);
657 ret
|= get_C_sub_icc(CC_SRC
, CC_SRC2
);
658 ret
|= get_V_sub_icc(CC_DST
, CC_SRC
, CC_SRC2
);
662 static uint32_t compute_C_sub(void)
664 return get_C_sub_icc(CC_SRC
, CC_SRC2
);
667 #ifdef TARGET_SPARC64
668 static uint32_t compute_all_subx_xcc(void)
672 ret
= get_NZ_xcc(CC_DST
);
673 ret
|= get_C_subx_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
674 ret
|= get_V_sub_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
678 static uint32_t compute_C_subx_xcc(void)
682 ret
= get_C_subx_xcc(CC_DST
, CC_SRC
, CC_SRC2
);
687 static uint32_t compute_all_subx(void)
691 ret
= get_NZ_icc(CC_DST
);
692 ret
|= get_C_subx_icc(CC_DST
, CC_SRC
, CC_SRC2
);
693 ret
|= get_V_sub_icc(CC_DST
, CC_SRC
, CC_SRC2
);
697 static uint32_t compute_C_subx(void)
701 ret
= get_C_subx_icc(CC_DST
, CC_SRC
, CC_SRC2
);
705 static uint32_t compute_all_tsub(void)
709 ret
= get_NZ_icc(CC_DST
);
710 ret
|= get_C_sub_icc(CC_SRC
, CC_SRC2
);
711 ret
|= get_V_sub_icc(CC_DST
, CC_SRC
, CC_SRC2
);
712 ret
|= get_V_tag_icc(CC_SRC
, CC_SRC2
);
716 static uint32_t compute_all_tsubtv(void)
720 ret
= get_NZ_icc(CC_DST
);
721 ret
|= get_C_sub_icc(CC_SRC
, CC_SRC2
);
725 static uint32_t compute_all_logic(void)
727 return get_NZ_icc(CC_DST
);
730 static uint32_t compute_C_logic(void)
735 #ifdef TARGET_SPARC64
736 static uint32_t compute_all_logic_xcc(void)
738 return get_NZ_xcc(CC_DST
);
742 typedef struct CCTable
{
743 uint32_t (*compute_all
)(void); /* return all the flags */
744 uint32_t (*compute_c
)(void); /* return the C flag */
747 static const CCTable icc_table
[CC_OP_NB
] = {
748 /* CC_OP_DYNAMIC should never happen */
749 [CC_OP_FLAGS
] = { compute_all_flags
, compute_C_flags
},
750 [CC_OP_DIV
] = { compute_all_div
, compute_C_div
},
751 [CC_OP_ADD
] = { compute_all_add
, compute_C_add
},
752 [CC_OP_ADDX
] = { compute_all_addx
, compute_C_addx
},
753 [CC_OP_TADD
] = { compute_all_tadd
, compute_C_add
},
754 [CC_OP_TADDTV
] = { compute_all_taddtv
, compute_C_add
},
755 [CC_OP_SUB
] = { compute_all_sub
, compute_C_sub
},
756 [CC_OP_SUBX
] = { compute_all_subx
, compute_C_subx
},
757 [CC_OP_TSUB
] = { compute_all_tsub
, compute_C_sub
},
758 [CC_OP_TSUBTV
] = { compute_all_tsubtv
, compute_C_sub
},
759 [CC_OP_LOGIC
] = { compute_all_logic
, compute_C_logic
},
762 #ifdef TARGET_SPARC64
763 static const CCTable xcc_table
[CC_OP_NB
] = {
764 /* CC_OP_DYNAMIC should never happen */
765 [CC_OP_FLAGS
] = { compute_all_flags_xcc
, compute_C_flags_xcc
},
766 [CC_OP_DIV
] = { compute_all_logic_xcc
, compute_C_logic
},
767 [CC_OP_ADD
] = { compute_all_add_xcc
, compute_C_add_xcc
},
768 [CC_OP_ADDX
] = { compute_all_addx_xcc
, compute_C_addx_xcc
},
769 [CC_OP_TADD
] = { compute_all_add_xcc
, compute_C_add_xcc
},
770 [CC_OP_TADDTV
] = { compute_all_add_xcc
, compute_C_add_xcc
},
771 [CC_OP_SUB
] = { compute_all_sub_xcc
, compute_C_sub_xcc
},
772 [CC_OP_SUBX
] = { compute_all_subx_xcc
, compute_C_subx_xcc
},
773 [CC_OP_TSUB
] = { compute_all_sub_xcc
, compute_C_sub_xcc
},
774 [CC_OP_TSUBTV
] = { compute_all_sub_xcc
, compute_C_sub_xcc
},
775 [CC_OP_LOGIC
] = { compute_all_logic_xcc
, compute_C_logic
},
779 void helper_compute_psr(void)
783 new_psr
= icc_table
[CC_OP
].compute_all();
785 #ifdef TARGET_SPARC64
786 new_psr
= xcc_table
[CC_OP
].compute_all();
792 uint32_t helper_compute_C_icc(void)
796 ret
= icc_table
[CC_OP
].compute_c() >> PSR_CARRY_SHIFT
;
800 static inline void memcpy32(target_ulong
*dst
, const target_ulong
*src
)
812 static void set_cwp(int new_cwp
)
814 /* put the modified wrap registers at their proper location */
815 if (env
->cwp
== env
->nwindows
- 1) {
816 memcpy32(env
->regbase
, env
->regbase
+ env
->nwindows
* 16);
820 /* put the wrap registers at their temporary location */
821 if (new_cwp
== env
->nwindows
- 1) {
822 memcpy32(env
->regbase
+ env
->nwindows
* 16, env
->regbase
);
824 env
->regwptr
= env
->regbase
+ (new_cwp
* 16);
827 void cpu_set_cwp(CPUState
*env1
, int new_cwp
)
837 static target_ulong
get_psr(void)
839 helper_compute_psr();
841 #if !defined (TARGET_SPARC64)
842 return env
->version
| (env
->psr
& PSR_ICC
) |
843 (env
->psref
? PSR_EF
: 0) |
845 (env
->psrs
? PSR_S
: 0) |
846 (env
->psrps
? PSR_PS
: 0) |
847 (env
->psret
? PSR_ET
: 0) | env
->cwp
;
849 return env
->psr
& PSR_ICC
;
853 target_ulong
cpu_get_psr(CPUState
*env1
)
865 static void put_psr(target_ulong val
)
867 env
->psr
= val
& PSR_ICC
;
868 #if !defined (TARGET_SPARC64)
869 env
->psref
= (val
& PSR_EF
) ? 1 : 0;
870 env
->psrpil
= (val
& PSR_PIL
) >> 8;
872 #if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
875 #if !defined (TARGET_SPARC64)
876 env
->psrs
= (val
& PSR_S
) ? 1 : 0;
877 env
->psrps
= (val
& PSR_PS
) ? 1 : 0;
878 env
->psret
= (val
& PSR_ET
) ? 1 : 0;
879 set_cwp(val
& PSR_CWP
);
881 env
->cc_op
= CC_OP_FLAGS
;
884 void cpu_put_psr(CPUState
*env1
, target_ulong val
)
894 static int cwp_inc(int cwp
)
896 if (unlikely(cwp
>= env
->nwindows
)) {
897 cwp
-= env
->nwindows
;
902 int cpu_cwp_inc(CPUState
*env1
, int cwp
)
914 static int cwp_dec(int cwp
)
916 if (unlikely(cwp
< 0)) {
917 cwp
+= env
->nwindows
;
922 int cpu_cwp_dec(CPUState
*env1
, int cwp
)
934 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
936 static void dump_mxcc(CPUState
*env
)
938 printf("mxccdata: %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
940 env
->mxccdata
[0], env
->mxccdata
[1],
941 env
->mxccdata
[2], env
->mxccdata
[3]);
942 printf("mxccregs: %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
944 " %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
946 env
->mxccregs
[0], env
->mxccregs
[1],
947 env
->mxccregs
[2], env
->mxccregs
[3],
948 env
->mxccregs
[4], env
->mxccregs
[5],
949 env
->mxccregs
[6], env
->mxccregs
[7]);
953 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
954 && defined(DEBUG_ASI)
955 static void dump_asi(const char *txt
, target_ulong addr
, int asi
, int size
,
960 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %02" PRIx64
"\n", txt
,
961 addr
, asi
, r1
& 0xff);
964 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %04" PRIx64
"\n", txt
,
965 addr
, asi
, r1
& 0xffff);
968 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %08" PRIx64
"\n", txt
,
969 addr
, asi
, r1
& 0xffffffff);
972 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %016" PRIx64
"\n", txt
,
979 #ifndef TARGET_SPARC64
980 #ifndef CONFIG_USER_ONLY
983 /* Leon3 cache control */
985 static void leon3_cache_control_int(void)
989 if (env
->cache_control
& CACHE_CTRL_IF
) {
990 /* Instruction cache state */
991 state
= env
->cache_control
& CACHE_STATE_MASK
;
992 if (state
== CACHE_ENABLED
) {
993 state
= CACHE_FROZEN
;
994 DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
997 env
->cache_control
&= ~CACHE_STATE_MASK
;
998 env
->cache_control
|= state
;
1001 if (env
->cache_control
& CACHE_CTRL_DF
) {
1002 /* Data cache state */
1003 state
= (env
->cache_control
>> 2) & CACHE_STATE_MASK
;
1004 if (state
== CACHE_ENABLED
) {
1005 state
= CACHE_FROZEN
;
1006 DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1009 env
->cache_control
&= ~(CACHE_STATE_MASK
<< 2);
1010 env
->cache_control
|= (state
<< 2);
1014 static void leon3_cache_control_st(target_ulong addr
, uint64_t val
, int size
)
1016 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64
", size:%d\n",
1020 DPRINTF_CACHE_CONTROL("32bits only\n");
1025 case 0x00: /* Cache control */
1027 /* These values must always be read as zeros */
1028 val
&= ~CACHE_CTRL_FD
;
1029 val
&= ~CACHE_CTRL_FI
;
1030 val
&= ~CACHE_CTRL_IB
;
1031 val
&= ~CACHE_CTRL_IP
;
1032 val
&= ~CACHE_CTRL_DP
;
1034 env
->cache_control
= val
;
1036 case 0x04: /* Instruction cache configuration */
1037 case 0x08: /* Data cache configuration */
1041 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr
);
1046 static uint64_t leon3_cache_control_ld(target_ulong addr
, int size
)
1051 DPRINTF_CACHE_CONTROL("32bits only\n");
1056 case 0x00: /* Cache control */
1057 ret
= env
->cache_control
;
1060 /* Configuration registers are read and only always keep those
1061 predefined values */
1063 case 0x04: /* Instruction cache configuration */
1066 case 0x08: /* Data cache configuration */
1070 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr
);
1073 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64
", size:%d\n",
1078 void leon3_irq_manager(void *irq_manager
, int intno
)
1080 leon3_irq_ack(irq_manager
, intno
);
1081 leon3_cache_control_int();
1084 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1087 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1088 uint32_t last_addr
= addr
;
1091 helper_check_align(addr
, size
- 1);
1093 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1095 case 0x00: /* Leon3 Cache Control */
1096 case 0x08: /* Leon3 Instruction Cache config */
1097 case 0x0C: /* Leon3 Date Cache config */
1098 if (env
->def
->features
& CPU_FEATURE_CACHE_CTRL
) {
1099 ret
= leon3_cache_control_ld(addr
, size
);
1102 case 0x01c00a00: /* MXCC control register */
1104 ret
= env
->mxccregs
[3];
1106 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1110 case 0x01c00a04: /* MXCC control register */
1112 ret
= env
->mxccregs
[3];
1114 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1118 case 0x01c00c00: /* Module reset register */
1120 ret
= env
->mxccregs
[5];
1121 /* should we do something here? */
1123 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1127 case 0x01c00f00: /* MBus port address register */
1129 ret
= env
->mxccregs
[7];
1131 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1136 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
1140 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1141 "addr = %08x -> ret = %" PRIx64
","
1142 "addr = %08x\n", asi
, size
, sign
, last_addr
, ret
, addr
);
1147 case 3: /* MMU probe */
1151 mmulev
= (addr
>> 8) & 15;
1155 ret
= mmu_probe(env
, addr
, mmulev
);
1157 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64
"\n",
1161 case 4: /* read MMU regs */
1163 int reg
= (addr
>> 8) & 0x1f;
1165 ret
= env
->mmuregs
[reg
];
1166 if (reg
== 3) { /* Fault status cleared on read */
1167 env
->mmuregs
[3] = 0;
1168 } else if (reg
== 0x13) { /* Fault status read */
1169 ret
= env
->mmuregs
[3];
1170 } else if (reg
== 0x14) { /* Fault address read */
1171 ret
= env
->mmuregs
[4];
1173 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64
"\n", reg
, ret
);
1176 case 5: /* Turbosparc ITLB Diagnostic */
1177 case 6: /* Turbosparc DTLB Diagnostic */
1178 case 7: /* Turbosparc IOTLB Diagnostic */
1180 case 9: /* Supervisor code access */
1183 ret
= ldub_code(addr
);
1186 ret
= lduw_code(addr
);
1190 ret
= ldl_code(addr
);
1193 ret
= ldq_code(addr
);
1197 case 0xa: /* User data access */
1200 ret
= ldub_user(addr
);
1203 ret
= lduw_user(addr
);
1207 ret
= ldl_user(addr
);
1210 ret
= ldq_user(addr
);
1214 case 0xb: /* Supervisor data access */
1217 ret
= ldub_kernel(addr
);
1220 ret
= lduw_kernel(addr
);
1224 ret
= ldl_kernel(addr
);
1227 ret
= ldq_kernel(addr
);
1231 case 0xc: /* I-cache tag */
1232 case 0xd: /* I-cache data */
1233 case 0xe: /* D-cache tag */
1234 case 0xf: /* D-cache data */
1236 case 0x20: /* MMU passthrough */
1239 ret
= ldub_phys(addr
);
1242 ret
= lduw_phys(addr
);
1246 ret
= ldl_phys(addr
);
1249 ret
= ldq_phys(addr
);
1253 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1256 ret
= ldub_phys((target_phys_addr_t
)addr
1257 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
1260 ret
= lduw_phys((target_phys_addr_t
)addr
1261 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
1265 ret
= ldl_phys((target_phys_addr_t
)addr
1266 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
1269 ret
= ldq_phys((target_phys_addr_t
)addr
1270 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
1274 case 0x30: /* Turbosparc secondary cache diagnostic */
1275 case 0x31: /* Turbosparc RAM snoop */
1276 case 0x32: /* Turbosparc page table descriptor diagnostic */
1277 case 0x39: /* data cache diagnostic register */
1280 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1282 int reg
= (addr
>> 8) & 3;
1285 case 0: /* Breakpoint Value (Addr) */
1286 ret
= env
->mmubpregs
[reg
];
1288 case 1: /* Breakpoint Mask */
1289 ret
= env
->mmubpregs
[reg
];
1291 case 2: /* Breakpoint Control */
1292 ret
= env
->mmubpregs
[reg
];
1294 case 3: /* Breakpoint Status */
1295 ret
= env
->mmubpregs
[reg
];
1296 env
->mmubpregs
[reg
] = 0ULL;
1299 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64
"\n", reg
,
1303 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1304 ret
= env
->mmubpctrv
;
1306 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1307 ret
= env
->mmubpctrc
;
1309 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1310 ret
= env
->mmubpctrs
;
1312 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1313 ret
= env
->mmubpaction
;
1315 case 8: /* User code access, XXX */
1317 do_unassigned_access(addr
, 0, 0, asi
, size
);
1327 ret
= (int16_t) ret
;
1330 ret
= (int32_t) ret
;
1337 dump_asi("read ", last_addr
, asi
, size
, ret
);
1342 void helper_st_asi(target_ulong addr
, uint64_t val
, int asi
, int size
)
1344 helper_check_align(addr
, size
- 1);
1346 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1348 case 0x00: /* Leon3 Cache Control */
1349 case 0x08: /* Leon3 Instruction Cache config */
1350 case 0x0C: /* Leon3 Date Cache config */
1351 if (env
->def
->features
& CPU_FEATURE_CACHE_CTRL
) {
1352 leon3_cache_control_st(addr
, val
, size
);
1356 case 0x01c00000: /* MXCC stream data register 0 */
1358 env
->mxccdata
[0] = val
;
1360 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1364 case 0x01c00008: /* MXCC stream data register 1 */
1366 env
->mxccdata
[1] = val
;
1368 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1372 case 0x01c00010: /* MXCC stream data register 2 */
1374 env
->mxccdata
[2] = val
;
1376 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1380 case 0x01c00018: /* MXCC stream data register 3 */
1382 env
->mxccdata
[3] = val
;
1384 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1388 case 0x01c00100: /* MXCC stream source */
1390 env
->mxccregs
[0] = val
;
1392 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1395 env
->mxccdata
[0] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1397 env
->mxccdata
[1] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1399 env
->mxccdata
[2] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1401 env
->mxccdata
[3] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
1404 case 0x01c00200: /* MXCC stream destination */
1406 env
->mxccregs
[1] = val
;
1408 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1411 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 0,
1413 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 8,
1415 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 16,
1417 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 24,
1420 case 0x01c00a00: /* MXCC control register */
1422 env
->mxccregs
[3] = val
;
1424 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1428 case 0x01c00a04: /* MXCC control register */
1430 env
->mxccregs
[3] = (env
->mxccregs
[3] & 0xffffffff00000000ULL
)
1433 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1437 case 0x01c00e00: /* MXCC error register */
1438 /* writing a 1 bit clears the error */
1440 env
->mxccregs
[6] &= ~val
;
1442 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1446 case 0x01c00f00: /* MBus port address register */
1448 env
->mxccregs
[7] = val
;
1450 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
1455 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
1459 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64
"\n",
1460 asi
, size
, addr
, val
);
1465 case 3: /* MMU flush */
1469 mmulev
= (addr
>> 8) & 15;
1470 DPRINTF_MMU("mmu flush level %d\n", mmulev
);
1472 case 0: /* flush page */
1473 tlb_flush_page(env
, addr
& 0xfffff000);
1475 case 1: /* flush segment (256k) */
1476 case 2: /* flush region (16M) */
1477 case 3: /* flush context (4G) */
1478 case 4: /* flush entire */
1485 dump_mmu(stdout
, fprintf
, env
);
1489 case 4: /* write MMU regs */
1491 int reg
= (addr
>> 8) & 0x1f;
1494 oldreg
= env
->mmuregs
[reg
];
1496 case 0: /* Control Register */
1497 env
->mmuregs
[reg
] = (env
->mmuregs
[reg
] & 0xff000000) |
1499 /* Mappings generated during no-fault mode or MMU
1500 disabled mode are invalid in normal mode */
1501 if ((oldreg
& (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)) !=
1502 (env
->mmuregs
[reg
] & (MMU_E
| MMU_NF
| env
->def
->mmu_bm
))) {
1506 case 1: /* Context Table Pointer Register */
1507 env
->mmuregs
[reg
] = val
& env
->def
->mmu_ctpr_mask
;
1509 case 2: /* Context Register */
1510 env
->mmuregs
[reg
] = val
& env
->def
->mmu_cxr_mask
;
1511 if (oldreg
!= env
->mmuregs
[reg
]) {
1512 /* we flush when the MMU context changes because
1513 QEMU has no MMU context support */
1517 case 3: /* Synchronous Fault Status Register with Clear */
1518 case 4: /* Synchronous Fault Address Register */
1520 case 0x10: /* TLB Replacement Control Register */
1521 env
->mmuregs
[reg
] = val
& env
->def
->mmu_trcr_mask
;
1523 case 0x13: /* Synchronous Fault Status Register with Read
1525 env
->mmuregs
[3] = val
& env
->def
->mmu_sfsr_mask
;
1527 case 0x14: /* Synchronous Fault Address Register */
1528 env
->mmuregs
[4] = val
;
1531 env
->mmuregs
[reg
] = val
;
1534 if (oldreg
!= env
->mmuregs
[reg
]) {
1535 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1536 reg
, oldreg
, env
->mmuregs
[reg
]);
1539 dump_mmu(stdout
, fprintf
, env
);
1543 case 5: /* Turbosparc ITLB Diagnostic */
1544 case 6: /* Turbosparc DTLB Diagnostic */
1545 case 7: /* Turbosparc IOTLB Diagnostic */
1547 case 0xa: /* User data access */
1550 stb_user(addr
, val
);
1553 stw_user(addr
, val
);
1557 stl_user(addr
, val
);
1560 stq_user(addr
, val
);
1564 case 0xb: /* Supervisor data access */
1567 stb_kernel(addr
, val
);
1570 stw_kernel(addr
, val
);
1574 stl_kernel(addr
, val
);
1577 stq_kernel(addr
, val
);
1581 case 0xc: /* I-cache tag */
1582 case 0xd: /* I-cache data */
1583 case 0xe: /* D-cache tag */
1584 case 0xf: /* D-cache data */
1585 case 0x10: /* I/D-cache flush page */
1586 case 0x11: /* I/D-cache flush segment */
1587 case 0x12: /* I/D-cache flush region */
1588 case 0x13: /* I/D-cache flush context */
1589 case 0x14: /* I/D-cache flush user */
1591 case 0x17: /* Block copy, sta access */
1597 uint32_t src
= val
& ~3, dst
= addr
& ~3, temp
;
1599 for (i
= 0; i
< 32; i
+= 4, src
+= 4, dst
+= 4) {
1600 temp
= ldl_kernel(src
);
1601 stl_kernel(dst
, temp
);
1605 case 0x1f: /* Block fill, stda access */
1608 fill 32 bytes with val */
1610 uint32_t dst
= addr
& 7;
1612 for (i
= 0; i
< 32; i
+= 8, dst
+= 8) {
1613 stq_kernel(dst
, val
);
1617 case 0x20: /* MMU passthrough */
1621 stb_phys(addr
, val
);
1624 stw_phys(addr
, val
);
1628 stl_phys(addr
, val
);
1631 stq_phys(addr
, val
);
1636 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1640 stb_phys((target_phys_addr_t
)addr
1641 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1644 stw_phys((target_phys_addr_t
)addr
1645 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1649 stl_phys((target_phys_addr_t
)addr
1650 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1653 stq_phys((target_phys_addr_t
)addr
1654 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
1659 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
1660 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
1661 Turbosparc snoop RAM */
1662 case 0x32: /* store buffer control or Turbosparc page table
1663 descriptor diagnostic */
1664 case 0x36: /* I-cache flash clear */
1665 case 0x37: /* D-cache flash clear */
1667 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1669 int reg
= (addr
>> 8) & 3;
1672 case 0: /* Breakpoint Value (Addr) */
1673 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1675 case 1: /* Breakpoint Mask */
1676 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1678 case 2: /* Breakpoint Control */
1679 env
->mmubpregs
[reg
] = (val
& 0x7fULL
);
1681 case 3: /* Breakpoint Status */
1682 env
->mmubpregs
[reg
] = (val
& 0xfULL
);
1685 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg
,
1689 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1690 env
->mmubpctrv
= val
& 0xffffffff;
1692 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1693 env
->mmubpctrc
= val
& 0x3;
1695 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1696 env
->mmubpctrs
= val
& 0x3;
1698 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1699 env
->mmubpaction
= val
& 0x1fff;
1701 case 8: /* User code access, XXX */
1702 case 9: /* Supervisor code access, XXX */
1704 do_unassigned_access(addr
, 1, 0, asi
, size
);
1708 dump_asi("write", addr
, asi
, size
, val
);
1712 #endif /* CONFIG_USER_ONLY */
1713 #else /* TARGET_SPARC64 */
1715 #ifdef CONFIG_USER_ONLY
1716 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1719 #if defined(DEBUG_ASI)
1720 target_ulong last_addr
= addr
;
1724 helper_raise_exception(env
, TT_PRIV_ACT
);
1727 helper_check_align(addr
, size
- 1);
1728 addr
= asi_address_mask(env
, asi
, addr
);
1731 case 0x82: /* Primary no-fault */
1732 case 0x8a: /* Primary no-fault LE */
1733 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1735 dump_asi("read ", last_addr
, asi
, size
, ret
);
1740 case 0x80: /* Primary */
1741 case 0x88: /* Primary LE */
1745 ret
= ldub_raw(addr
);
1748 ret
= lduw_raw(addr
);
1751 ret
= ldl_raw(addr
);
1755 ret
= ldq_raw(addr
);
1760 case 0x83: /* Secondary no-fault */
1761 case 0x8b: /* Secondary no-fault LE */
1762 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1764 dump_asi("read ", last_addr
, asi
, size
, ret
);
1769 case 0x81: /* Secondary */
1770 case 0x89: /* Secondary LE */
1777 /* Convert from little endian */
1779 case 0x88: /* Primary LE */
1780 case 0x89: /* Secondary LE */
1781 case 0x8a: /* Primary no-fault LE */
1782 case 0x8b: /* Secondary no-fault LE */
1800 /* Convert to signed number */
1807 ret
= (int16_t) ret
;
1810 ret
= (int32_t) ret
;
1817 dump_asi("read ", last_addr
, asi
, size
, ret
);
1822 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1825 dump_asi("write", addr
, asi
, size
, val
);
1828 helper_raise_exception(env
, TT_PRIV_ACT
);
1831 helper_check_align(addr
, size
- 1);
1832 addr
= asi_address_mask(env
, asi
, addr
);
1834 /* Convert to little endian */
1836 case 0x88: /* Primary LE */
1837 case 0x89: /* Secondary LE */
1856 case 0x80: /* Primary */
1857 case 0x88: /* Primary LE */
1876 case 0x81: /* Secondary */
1877 case 0x89: /* Secondary LE */
1881 case 0x82: /* Primary no-fault, RO */
1882 case 0x83: /* Secondary no-fault, RO */
1883 case 0x8a: /* Primary no-fault LE, RO */
1884 case 0x8b: /* Secondary no-fault LE, RO */
1886 do_unassigned_access(addr
, 1, 0, 1, size
);
1891 #else /* CONFIG_USER_ONLY */
1893 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1896 #if defined(DEBUG_ASI)
1897 target_ulong last_addr
= addr
;
1902 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1903 || (cpu_has_hypervisor(env
)
1904 && asi
>= 0x30 && asi
< 0x80
1905 && !(env
->hpstate
& HS_PRIV
))) {
1906 helper_raise_exception(env
, TT_PRIV_ACT
);
1909 helper_check_align(addr
, size
- 1);
1910 addr
= asi_address_mask(env
, asi
, addr
);
1912 /* process nonfaulting loads first */
1913 if ((asi
& 0xf6) == 0x82) {
1916 /* secondary space access has lowest asi bit equal to 1 */
1917 if (env
->pstate
& PS_PRIV
) {
1918 mmu_idx
= (asi
& 1) ? MMU_KERNEL_SECONDARY_IDX
: MMU_KERNEL_IDX
;
1920 mmu_idx
= (asi
& 1) ? MMU_USER_SECONDARY_IDX
: MMU_USER_IDX
;
1923 if (cpu_get_phys_page_nofault(env
, addr
, mmu_idx
) == -1ULL) {
1925 dump_asi("read ", last_addr
, asi
, size
, ret
);
1927 /* env->exception_index is set in get_physical_address_data(). */
1928 helper_raise_exception(env
, env
->exception_index
);
1931 /* convert nonfaulting load ASIs to normal load ASIs */
1936 case 0x10: /* As if user primary */
1937 case 0x11: /* As if user secondary */
1938 case 0x18: /* As if user primary LE */
1939 case 0x19: /* As if user secondary LE */
1940 case 0x80: /* Primary */
1941 case 0x81: /* Secondary */
1942 case 0x88: /* Primary LE */
1943 case 0x89: /* Secondary LE */
1944 case 0xe2: /* UA2007 Primary block init */
1945 case 0xe3: /* UA2007 Secondary block init */
1946 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1947 if (cpu_hypervisor_mode(env
)) {
1950 ret
= ldub_hypv(addr
);
1953 ret
= lduw_hypv(addr
);
1956 ret
= ldl_hypv(addr
);
1960 ret
= ldq_hypv(addr
);
1964 /* secondary space access has lowest asi bit equal to 1 */
1968 ret
= ldub_kernel_secondary(addr
);
1971 ret
= lduw_kernel_secondary(addr
);
1974 ret
= ldl_kernel_secondary(addr
);
1978 ret
= ldq_kernel_secondary(addr
);
1984 ret
= ldub_kernel(addr
);
1987 ret
= lduw_kernel(addr
);
1990 ret
= ldl_kernel(addr
);
1994 ret
= ldq_kernel(addr
);
2000 /* secondary space access has lowest asi bit equal to 1 */
2004 ret
= ldub_user_secondary(addr
);
2007 ret
= lduw_user_secondary(addr
);
2010 ret
= ldl_user_secondary(addr
);
2014 ret
= ldq_user_secondary(addr
);
2020 ret
= ldub_user(addr
);
2023 ret
= lduw_user(addr
);
2026 ret
= ldl_user(addr
);
2030 ret
= ldq_user(addr
);
2036 case 0x14: /* Bypass */
2037 case 0x15: /* Bypass, non-cacheable */
2038 case 0x1c: /* Bypass LE */
2039 case 0x1d: /* Bypass, non-cacheable LE */
2043 ret
= ldub_phys(addr
);
2046 ret
= lduw_phys(addr
);
2049 ret
= ldl_phys(addr
);
2053 ret
= ldq_phys(addr
);
2058 case 0x24: /* Nucleus quad LDD 128 bit atomic */
2059 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
2060 Only ldda allowed */
2061 helper_raise_exception(env
, TT_ILL_INSN
);
2063 case 0x04: /* Nucleus */
2064 case 0x0c: /* Nucleus Little Endian (LE) */
2068 ret
= ldub_nucleus(addr
);
2071 ret
= lduw_nucleus(addr
);
2074 ret
= ldl_nucleus(addr
);
2078 ret
= ldq_nucleus(addr
);
2083 case 0x4a: /* UPA config */
2086 case 0x45: /* LSU */
2089 case 0x50: /* I-MMU regs */
2091 int reg
= (addr
>> 3) & 0xf;
2094 /* I-TSB Tag Target register */
2095 ret
= ultrasparc_tag_target(env
->immu
.tag_access
);
2097 ret
= env
->immuregs
[reg
];
2102 case 0x51: /* I-MMU 8k TSB pointer */
2104 /* env->immuregs[5] holds I-MMU TSB register value
2105 env->immuregs[6] holds I-MMU Tag Access register value */
2106 ret
= ultrasparc_tsb_pointer(env
->immu
.tsb
, env
->immu
.tag_access
,
2110 case 0x52: /* I-MMU 64k TSB pointer */
2112 /* env->immuregs[5] holds I-MMU TSB register value
2113 env->immuregs[6] holds I-MMU Tag Access register value */
2114 ret
= ultrasparc_tsb_pointer(env
->immu
.tsb
, env
->immu
.tag_access
,
2118 case 0x55: /* I-MMU data access */
2120 int reg
= (addr
>> 3) & 0x3f;
2122 ret
= env
->itlb
[reg
].tte
;
2125 case 0x56: /* I-MMU tag read */
2127 int reg
= (addr
>> 3) & 0x3f;
2129 ret
= env
->itlb
[reg
].tag
;
2132 case 0x58: /* D-MMU regs */
2134 int reg
= (addr
>> 3) & 0xf;
2137 /* D-TSB Tag Target register */
2138 ret
= ultrasparc_tag_target(env
->dmmu
.tag_access
);
2140 ret
= env
->dmmuregs
[reg
];
2144 case 0x59: /* D-MMU 8k TSB pointer */
2146 /* env->dmmuregs[5] holds D-MMU TSB register value
2147 env->dmmuregs[6] holds D-MMU Tag Access register value */
2148 ret
= ultrasparc_tsb_pointer(env
->dmmu
.tsb
, env
->dmmu
.tag_access
,
2152 case 0x5a: /* D-MMU 64k TSB pointer */
2154 /* env->dmmuregs[5] holds D-MMU TSB register value
2155 env->dmmuregs[6] holds D-MMU Tag Access register value */
2156 ret
= ultrasparc_tsb_pointer(env
->dmmu
.tsb
, env
->dmmu
.tag_access
,
2160 case 0x5d: /* D-MMU data access */
2162 int reg
= (addr
>> 3) & 0x3f;
2164 ret
= env
->dtlb
[reg
].tte
;
2167 case 0x5e: /* D-MMU tag read */
2169 int reg
= (addr
>> 3) & 0x3f;
2171 ret
= env
->dtlb
[reg
].tag
;
2174 case 0x46: /* D-cache data */
2175 case 0x47: /* D-cache tag access */
2176 case 0x4b: /* E-cache error enable */
2177 case 0x4c: /* E-cache asynchronous fault status */
2178 case 0x4d: /* E-cache asynchronous fault address */
2179 case 0x4e: /* E-cache tag data */
2180 case 0x66: /* I-cache instruction access */
2181 case 0x67: /* I-cache tag access */
2182 case 0x6e: /* I-cache predecode */
2183 case 0x6f: /* I-cache LRU etc. */
2184 case 0x76: /* E-cache tag */
2185 case 0x7e: /* E-cache tag */
2187 case 0x5b: /* D-MMU data pointer */
2188 case 0x48: /* Interrupt dispatch, RO */
2189 case 0x49: /* Interrupt data receive */
2190 case 0x7f: /* Incoming interrupt vector, RO */
2193 case 0x54: /* I-MMU data in, WO */
2194 case 0x57: /* I-MMU demap, WO */
2195 case 0x5c: /* D-MMU data in, WO */
2196 case 0x5f: /* D-MMU demap, WO */
2197 case 0x77: /* Interrupt vector, WO */
2199 do_unassigned_access(addr
, 0, 0, 1, size
);
2204 /* Convert from little endian */
2206 case 0x0c: /* Nucleus Little Endian (LE) */
2207 case 0x18: /* As if user primary LE */
2208 case 0x19: /* As if user secondary LE */
2209 case 0x1c: /* Bypass LE */
2210 case 0x1d: /* Bypass, non-cacheable LE */
2211 case 0x88: /* Primary LE */
2212 case 0x89: /* Secondary LE */
2230 /* Convert to signed number */
2237 ret
= (int16_t) ret
;
2240 ret
= (int32_t) ret
;
2247 dump_asi("read ", last_addr
, asi
, size
, ret
);
2252 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
2255 dump_asi("write", addr
, asi
, size
, val
);
2260 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
2261 || (cpu_has_hypervisor(env
)
2262 && asi
>= 0x30 && asi
< 0x80
2263 && !(env
->hpstate
& HS_PRIV
))) {
2264 helper_raise_exception(env
, TT_PRIV_ACT
);
2267 helper_check_align(addr
, size
- 1);
2268 addr
= asi_address_mask(env
, asi
, addr
);
2270 /* Convert to little endian */
2272 case 0x0c: /* Nucleus Little Endian (LE) */
2273 case 0x18: /* As if user primary LE */
2274 case 0x19: /* As if user secondary LE */
2275 case 0x1c: /* Bypass LE */
2276 case 0x1d: /* Bypass, non-cacheable LE */
2277 case 0x88: /* Primary LE */
2278 case 0x89: /* Secondary LE */
2297 case 0x10: /* As if user primary */
2298 case 0x11: /* As if user secondary */
2299 case 0x18: /* As if user primary LE */
2300 case 0x19: /* As if user secondary LE */
2301 case 0x80: /* Primary */
2302 case 0x81: /* Secondary */
2303 case 0x88: /* Primary LE */
2304 case 0x89: /* Secondary LE */
2305 case 0xe2: /* UA2007 Primary block init */
2306 case 0xe3: /* UA2007 Secondary block init */
2307 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
2308 if (cpu_hypervisor_mode(env
)) {
2311 stb_hypv(addr
, val
);
2314 stw_hypv(addr
, val
);
2317 stl_hypv(addr
, val
);
2321 stq_hypv(addr
, val
);
2325 /* secondary space access has lowest asi bit equal to 1 */
2329 stb_kernel_secondary(addr
, val
);
2332 stw_kernel_secondary(addr
, val
);
2335 stl_kernel_secondary(addr
, val
);
2339 stq_kernel_secondary(addr
, val
);
2345 stb_kernel(addr
, val
);
2348 stw_kernel(addr
, val
);
2351 stl_kernel(addr
, val
);
2355 stq_kernel(addr
, val
);
2361 /* secondary space access has lowest asi bit equal to 1 */
2365 stb_user_secondary(addr
, val
);
2368 stw_user_secondary(addr
, val
);
2371 stl_user_secondary(addr
, val
);
2375 stq_user_secondary(addr
, val
);
2381 stb_user(addr
, val
);
2384 stw_user(addr
, val
);
2387 stl_user(addr
, val
);
2391 stq_user(addr
, val
);
2397 case 0x14: /* Bypass */
2398 case 0x15: /* Bypass, non-cacheable */
2399 case 0x1c: /* Bypass LE */
2400 case 0x1d: /* Bypass, non-cacheable LE */
2404 stb_phys(addr
, val
);
2407 stw_phys(addr
, val
);
2410 stl_phys(addr
, val
);
2414 stq_phys(addr
, val
);
2419 case 0x24: /* Nucleus quad LDD 128 bit atomic */
2420 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
2421 Only ldda allowed */
2422 helper_raise_exception(env
, TT_ILL_INSN
);
2424 case 0x04: /* Nucleus */
2425 case 0x0c: /* Nucleus Little Endian (LE) */
2429 stb_nucleus(addr
, val
);
2432 stw_nucleus(addr
, val
);
2435 stl_nucleus(addr
, val
);
2439 stq_nucleus(addr
, val
);
2445 case 0x4a: /* UPA config */
2448 case 0x45: /* LSU */
2453 env
->lsu
= val
& (DMMU_E
| IMMU_E
);
2454 /* Mappings generated during D/I MMU disabled mode are
2455 invalid in normal mode */
2456 if (oldreg
!= env
->lsu
) {
2457 DPRINTF_MMU("LSU change: 0x%" PRIx64
" -> 0x%" PRIx64
"\n",
2460 dump_mmu(stdout
, fprintf
, env1
);
2466 case 0x50: /* I-MMU regs */
2468 int reg
= (addr
>> 3) & 0xf;
2471 oldreg
= env
->immuregs
[reg
];
2475 case 1: /* Not in I-MMU */
2479 if ((val
& 1) == 0) {
2480 val
= 0; /* Clear SFSR */
2482 env
->immu
.sfsr
= val
;
2486 case 5: /* TSB access */
2487 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64
" -> 0x%016"
2488 PRIx64
"\n", env
->immu
.tsb
, val
);
2489 env
->immu
.tsb
= val
;
2491 case 6: /* Tag access */
2492 env
->immu
.tag_access
= val
;
2501 if (oldreg
!= env
->immuregs
[reg
]) {
2502 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64
" -> 0x%016"
2503 PRIx64
"\n", reg
, oldreg
, env
->immuregs
[reg
]);
2506 dump_mmu(stdout
, fprintf
, env
);
2510 case 0x54: /* I-MMU data in */
2511 replace_tlb_1bit_lru(env
->itlb
, env
->immu
.tag_access
, val
, "immu", env
);
2513 case 0x55: /* I-MMU data access */
2515 /* TODO: auto demap */
2517 unsigned int i
= (addr
>> 3) & 0x3f;
2519 replace_tlb_entry(&env
->itlb
[i
], env
->immu
.tag_access
, val
, env
);
2522 DPRINTF_MMU("immu data access replaced entry [%i]\n", i
);
2523 dump_mmu(stdout
, fprintf
, env
);
2527 case 0x57: /* I-MMU demap */
2528 demap_tlb(env
->itlb
, addr
, "immu", env
);
2530 case 0x58: /* D-MMU regs */
2532 int reg
= (addr
>> 3) & 0xf;
2535 oldreg
= env
->dmmuregs
[reg
];
2541 if ((val
& 1) == 0) {
2542 val
= 0; /* Clear SFSR, Fault address */
2545 env
->dmmu
.sfsr
= val
;
2547 case 1: /* Primary context */
2548 env
->dmmu
.mmu_primary_context
= val
;
2549 /* can be optimized to only flush MMU_USER_IDX
2550 and MMU_KERNEL_IDX entries */
2553 case 2: /* Secondary context */
2554 env
->dmmu
.mmu_secondary_context
= val
;
2555 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
2556 and MMU_KERNEL_SECONDARY_IDX entries */
2559 case 5: /* TSB access */
2560 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64
" -> 0x%016"
2561 PRIx64
"\n", env
->dmmu
.tsb
, val
);
2562 env
->dmmu
.tsb
= val
;
2564 case 6: /* Tag access */
2565 env
->dmmu
.tag_access
= val
;
2567 case 7: /* Virtual Watchpoint */
2568 case 8: /* Physical Watchpoint */
2570 env
->dmmuregs
[reg
] = val
;
2574 if (oldreg
!= env
->dmmuregs
[reg
]) {
2575 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64
" -> 0x%016"
2576 PRIx64
"\n", reg
, oldreg
, env
->dmmuregs
[reg
]);
2579 dump_mmu(stdout
, fprintf
, env
);
2583 case 0x5c: /* D-MMU data in */
2584 replace_tlb_1bit_lru(env
->dtlb
, env
->dmmu
.tag_access
, val
, "dmmu", env
);
2586 case 0x5d: /* D-MMU data access */
2588 unsigned int i
= (addr
>> 3) & 0x3f;
2590 replace_tlb_entry(&env
->dtlb
[i
], env
->dmmu
.tag_access
, val
, env
);
2593 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i
);
2594 dump_mmu(stdout
, fprintf
, env
);
2598 case 0x5f: /* D-MMU demap */
2599 demap_tlb(env
->dtlb
, addr
, "dmmu", env
);
2601 case 0x49: /* Interrupt data receive */
2604 case 0x46: /* D-cache data */
2605 case 0x47: /* D-cache tag access */
2606 case 0x4b: /* E-cache error enable */
2607 case 0x4c: /* E-cache asynchronous fault status */
2608 case 0x4d: /* E-cache asynchronous fault address */
2609 case 0x4e: /* E-cache tag data */
2610 case 0x66: /* I-cache instruction access */
2611 case 0x67: /* I-cache tag access */
2612 case 0x6e: /* I-cache predecode */
2613 case 0x6f: /* I-cache LRU etc. */
2614 case 0x76: /* E-cache tag */
2615 case 0x7e: /* E-cache tag */
2617 case 0x51: /* I-MMU 8k TSB pointer, RO */
2618 case 0x52: /* I-MMU 64k TSB pointer, RO */
2619 case 0x56: /* I-MMU tag read, RO */
2620 case 0x59: /* D-MMU 8k TSB pointer, RO */
2621 case 0x5a: /* D-MMU 64k TSB pointer, RO */
2622 case 0x5b: /* D-MMU data pointer, RO */
2623 case 0x5e: /* D-MMU tag read, RO */
2624 case 0x48: /* Interrupt dispatch, RO */
2625 case 0x7f: /* Incoming interrupt vector, RO */
2626 case 0x82: /* Primary no-fault, RO */
2627 case 0x83: /* Secondary no-fault, RO */
2628 case 0x8a: /* Primary no-fault LE, RO */
2629 case 0x8b: /* Secondary no-fault LE, RO */
2631 do_unassigned_access(addr
, 1, 0, 1, size
);
2635 #endif /* CONFIG_USER_ONLY */
2637 void helper_ldda_asi(target_ulong addr
, int asi
, int rd
)
2639 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
2640 || (cpu_has_hypervisor(env
)
2641 && asi
>= 0x30 && asi
< 0x80
2642 && !(env
->hpstate
& HS_PRIV
))) {
2643 helper_raise_exception(env
, TT_PRIV_ACT
);
2646 addr
= asi_address_mask(env
, asi
, addr
);
2649 #if !defined(CONFIG_USER_ONLY)
2650 case 0x24: /* Nucleus quad LDD 128 bit atomic */
2651 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */
2652 helper_check_align(addr
, 0xf);
2654 env
->gregs
[1] = ldq_nucleus(addr
+ 8);
2656 bswap64s(&env
->gregs
[1]);
2658 } else if (rd
< 8) {
2659 env
->gregs
[rd
] = ldq_nucleus(addr
);
2660 env
->gregs
[rd
+ 1] = ldq_nucleus(addr
+ 8);
2662 bswap64s(&env
->gregs
[rd
]);
2663 bswap64s(&env
->gregs
[rd
+ 1]);
2666 env
->regwptr
[rd
] = ldq_nucleus(addr
);
2667 env
->regwptr
[rd
+ 1] = ldq_nucleus(addr
+ 8);
2669 bswap64s(&env
->regwptr
[rd
]);
2670 bswap64s(&env
->regwptr
[rd
+ 1]);
2676 helper_check_align(addr
, 0x3);
2678 env
->gregs
[1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2679 } else if (rd
< 8) {
2680 env
->gregs
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2681 env
->gregs
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2683 env
->regwptr
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2684 env
->regwptr
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2690 void helper_ldf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2695 helper_check_align(addr
, 3);
2696 addr
= asi_address_mask(env
, asi
, addr
);
2699 case 0xf0: /* UA2007/JPS1 Block load primary */
2700 case 0xf1: /* UA2007/JPS1 Block load secondary */
2701 case 0xf8: /* UA2007/JPS1 Block load primary LE */
2702 case 0xf9: /* UA2007/JPS1 Block load secondary LE */
2704 helper_raise_exception(env
, TT_ILL_INSN
);
2707 helper_check_align(addr
, 0x3f);
2708 for (i
= 0; i
< 16; i
++) {
2709 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x8f, 4,
2715 case 0x16: /* UA2007 Block load primary, user privilege */
2716 case 0x17: /* UA2007 Block load secondary, user privilege */
2717 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2718 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2719 case 0x70: /* JPS1 Block load primary, user privilege */
2720 case 0x71: /* JPS1 Block load secondary, user privilege */
2721 case 0x78: /* JPS1 Block load primary LE, user privilege */
2722 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2724 helper_raise_exception(env
, TT_ILL_INSN
);
2727 helper_check_align(addr
, 0x3f);
2728 for (i
= 0; i
< 16; i
++) {
2729 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x19, 4,
2742 *((uint32_t *)&env
->fpr
[rd
]) = helper_ld_asi(addr
, asi
, size
, 0);
2745 u
.ll
= helper_ld_asi(addr
, asi
, size
, 0);
2746 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2747 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2750 u
.ll
= helper_ld_asi(addr
, asi
, 8, 0);
2751 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2752 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2753 u
.ll
= helper_ld_asi(addr
+ 8, asi
, 8, 0);
2754 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2755 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2760 void helper_stf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2763 target_ulong val
= 0;
2766 helper_check_align(addr
, 3);
2767 addr
= asi_address_mask(env
, asi
, addr
);
2770 case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
2771 case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
2772 case 0xf0: /* UA2007/JPS1 Block store primary */
2773 case 0xf1: /* UA2007/JPS1 Block store secondary */
2774 case 0xf8: /* UA2007/JPS1 Block store primary LE */
2775 case 0xf9: /* UA2007/JPS1 Block store secondary LE */
2777 helper_raise_exception(env
, TT_ILL_INSN
);
2780 helper_check_align(addr
, 0x3f);
2781 for (i
= 0; i
< 16; i
++) {
2782 val
= *(uint32_t *)&env
->fpr
[rd
++];
2783 helper_st_asi(addr
, val
, asi
& 0x8f, 4);
2788 case 0x16: /* UA2007 Block load primary, user privilege */
2789 case 0x17: /* UA2007 Block load secondary, user privilege */
2790 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2791 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2792 case 0x70: /* JPS1 Block store primary, user privilege */
2793 case 0x71: /* JPS1 Block store secondary, user privilege */
2794 case 0x78: /* JPS1 Block load primary LE, user privilege */
2795 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2797 helper_raise_exception(env
, TT_ILL_INSN
);
2800 helper_check_align(addr
, 0x3f);
2801 for (i
= 0; i
< 16; i
++) {
2802 val
= *(uint32_t *)&env
->fpr
[rd
++];
2803 helper_st_asi(addr
, val
, asi
& 0x19, 4);
2815 helper_st_asi(addr
, *(uint32_t *)&env
->fpr
[rd
], asi
, size
);
2818 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2819 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2820 helper_st_asi(addr
, u
.ll
, asi
, size
);
2823 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2824 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2825 helper_st_asi(addr
, u
.ll
, asi
, 8);
2826 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2827 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2828 helper_st_asi(addr
+ 8, u
.ll
, asi
, 8);
2833 target_ulong
helper_cas_asi(target_ulong addr
, target_ulong val1
,
2834 target_ulong val2
, uint32_t asi
)
2838 val2
&= 0xffffffffUL
;
2839 ret
= helper_ld_asi(addr
, asi
, 4, 0);
2840 ret
&= 0xffffffffUL
;
2842 helper_st_asi(addr
, val1
& 0xffffffffUL
, asi
, 4);
2847 target_ulong
helper_casx_asi(target_ulong addr
, target_ulong val1
,
2848 target_ulong val2
, uint32_t asi
)
2852 ret
= helper_ld_asi(addr
, asi
, 8, 0);
2854 helper_st_asi(addr
, val1
, asi
, 8);
2858 #endif /* TARGET_SPARC64 */
2860 #ifndef TARGET_SPARC64
2861 void helper_rett(void)
2865 if (env
->psret
== 1) {
2866 helper_raise_exception(env
, TT_ILL_INSN
);
2870 cwp
= cwp_inc(env
->cwp
+ 1) ;
2871 if (env
->wim
& (1 << cwp
)) {
2872 helper_raise_exception(env
, TT_WIN_UNF
);
2875 env
->psrs
= env
->psrps
;
2879 static target_ulong
helper_udiv_common(target_ulong a
, target_ulong b
, int cc
)
2885 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2886 x1
= (b
& 0xffffffff);
2889 helper_raise_exception(env
, TT_DIV_ZERO
);
2893 if (x0
> 0xffffffff) {
2900 env
->cc_src2
= overflow
;
2901 env
->cc_op
= CC_OP_DIV
;
2906 target_ulong
helper_udiv(target_ulong a
, target_ulong b
)
2908 return helper_udiv_common(a
, b
, 0);
2911 target_ulong
helper_udiv_cc(target_ulong a
, target_ulong b
)
2913 return helper_udiv_common(a
, b
, 1);
2916 static target_ulong
helper_sdiv_common(target_ulong a
, target_ulong b
, int cc
)
2922 x0
= (a
& 0xffffffff) | ((int64_t) (env
->y
) << 32);
2923 x1
= (b
& 0xffffffff);
2926 helper_raise_exception(env
, TT_DIV_ZERO
);
2930 if ((int32_t) x0
!= x0
) {
2931 x0
= x0
< 0 ? 0x80000000 : 0x7fffffff;
2937 env
->cc_src2
= overflow
;
2938 env
->cc_op
= CC_OP_DIV
;
2943 target_ulong
helper_sdiv(target_ulong a
, target_ulong b
)
2945 return helper_sdiv_common(a
, b
, 0);
2948 target_ulong
helper_sdiv_cc(target_ulong a
, target_ulong b
)
2950 return helper_sdiv_common(a
, b
, 1);
2953 void helper_stdf(target_ulong addr
, int mem_idx
)
2955 helper_check_align(addr
, 7);
2956 #if !defined(CONFIG_USER_ONLY)
2959 stfq_user(addr
, DT0
);
2961 case MMU_KERNEL_IDX
:
2962 stfq_kernel(addr
, DT0
);
2964 #ifdef TARGET_SPARC64
2966 stfq_hypv(addr
, DT0
);
2970 DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx
);
2974 stfq_raw(address_mask(env
, addr
), DT0
);
2978 void helper_lddf(target_ulong addr
, int mem_idx
)
2980 helper_check_align(addr
, 7);
2981 #if !defined(CONFIG_USER_ONLY)
2984 DT0
= ldfq_user(addr
);
2986 case MMU_KERNEL_IDX
:
2987 DT0
= ldfq_kernel(addr
);
2989 #ifdef TARGET_SPARC64
2991 DT0
= ldfq_hypv(addr
);
2995 DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx
);
2999 DT0
= ldfq_raw(address_mask(env
, addr
));
3003 void helper_ldqf(target_ulong addr
, int mem_idx
)
3005 /* XXX add 128 bit load */
3008 helper_check_align(addr
, 7);
3009 #if !defined(CONFIG_USER_ONLY)
3012 u
.ll
.upper
= ldq_user(addr
);
3013 u
.ll
.lower
= ldq_user(addr
+ 8);
3016 case MMU_KERNEL_IDX
:
3017 u
.ll
.upper
= ldq_kernel(addr
);
3018 u
.ll
.lower
= ldq_kernel(addr
+ 8);
3021 #ifdef TARGET_SPARC64
3023 u
.ll
.upper
= ldq_hypv(addr
);
3024 u
.ll
.lower
= ldq_hypv(addr
+ 8);
3029 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx
);
3033 u
.ll
.upper
= ldq_raw(address_mask(env
, addr
));
3034 u
.ll
.lower
= ldq_raw(address_mask(env
, addr
+ 8));
3039 void helper_stqf(target_ulong addr
, int mem_idx
)
3041 /* XXX add 128 bit store */
3044 helper_check_align(addr
, 7);
3045 #if !defined(CONFIG_USER_ONLY)
3049 stq_user(addr
, u
.ll
.upper
);
3050 stq_user(addr
+ 8, u
.ll
.lower
);
3052 case MMU_KERNEL_IDX
:
3054 stq_kernel(addr
, u
.ll
.upper
);
3055 stq_kernel(addr
+ 8, u
.ll
.lower
);
3057 #ifdef TARGET_SPARC64
3060 stq_hypv(addr
, u
.ll
.upper
);
3061 stq_hypv(addr
+ 8, u
.ll
.lower
);
3065 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx
);
3070 stq_raw(address_mask(env
, addr
), u
.ll
.upper
);
3071 stq_raw(address_mask(env
, addr
+ 8), u
.ll
.lower
);
3075 #ifndef TARGET_SPARC64
3076 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3078 void helper_save(void)
3082 cwp
= cwp_dec(env
->cwp
- 1);
3083 if (env
->wim
& (1 << cwp
)) {
3084 helper_raise_exception(env
, TT_WIN_OVF
);
3089 void helper_restore(void)
3093 cwp
= cwp_inc(env
->cwp
+ 1);
3094 if (env
->wim
& (1 << cwp
)) {
3095 helper_raise_exception(env
, TT_WIN_UNF
);
3100 void helper_wrpsr(target_ulong new_psr
)
3102 if ((new_psr
& PSR_CWP
) >= env
->nwindows
) {
3103 helper_raise_exception(env
, TT_ILL_INSN
);
3105 cpu_put_psr(env
, new_psr
);
3109 target_ulong
helper_rdpsr(void)
3115 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3117 void helper_save(void)
3121 cwp
= cwp_dec(env
->cwp
- 1);
3122 if (env
->cansave
== 0) {
3123 helper_raise_exception(env
, TT_SPILL
| (env
->otherwin
!= 0 ?
3125 ((env
->wstate
& 0x38) >> 1)) :
3126 ((env
->wstate
& 0x7) << 2)));
3128 if (env
->cleanwin
- env
->canrestore
== 0) {
3129 /* XXX Clean windows without trap */
3130 helper_raise_exception(env
, TT_CLRWIN
);
3139 void helper_restore(void)
3143 cwp
= cwp_inc(env
->cwp
+ 1);
3144 if (env
->canrestore
== 0) {
3145 helper_raise_exception(env
, TT_FILL
| (env
->otherwin
!= 0 ?
3147 ((env
->wstate
& 0x38) >> 1)) :
3148 ((env
->wstate
& 0x7) << 2)));
3156 void helper_flushw(void)
3158 if (env
->cansave
!= env
->nwindows
- 2) {
3159 helper_raise_exception(env
, TT_SPILL
| (env
->otherwin
!= 0 ?
3161 ((env
->wstate
& 0x38) >> 1)) :
3162 ((env
->wstate
& 0x7) << 2)));
3166 void helper_saved(void)
3169 if (env
->otherwin
== 0) {
3176 void helper_restored(void)
3179 if (env
->cleanwin
< env
->nwindows
- 1) {
3182 if (env
->otherwin
== 0) {
3189 static target_ulong
get_ccr(void)
3195 return ((env
->xcc
>> 20) << 4) | ((psr
& PSR_ICC
) >> 20);
3198 target_ulong
cpu_get_ccr(CPUState
*env1
)
3200 CPUState
*saved_env
;
3210 static void put_ccr(target_ulong val
)
3212 env
->xcc
= (val
>> 4) << 20;
3213 env
->psr
= (val
& 0xf) << 20;
3214 CC_OP
= CC_OP_FLAGS
;
3217 void cpu_put_ccr(CPUState
*env1
, target_ulong val
)
3219 CPUState
*saved_env
;
3227 static target_ulong
get_cwp64(void)
3229 return env
->nwindows
- 1 - env
->cwp
;
3232 target_ulong
cpu_get_cwp64(CPUState
*env1
)
3234 CPUState
*saved_env
;
3244 static void put_cwp64(int cwp
)
3246 if (unlikely(cwp
>= env
->nwindows
|| cwp
< 0)) {
3247 cwp
%= env
->nwindows
;
3249 set_cwp(env
->nwindows
- 1 - cwp
);
3252 void cpu_put_cwp64(CPUState
*env1
, int cwp
)
3254 CPUState
*saved_env
;
3262 target_ulong
helper_rdccr(void)
3267 void helper_wrccr(target_ulong new_ccr
)
3272 /* CWP handling is reversed in V9, but we still use the V8 register
3274 target_ulong
helper_rdcwp(void)
3279 void helper_wrcwp(target_ulong new_cwp
)
3284 static inline uint64_t *get_gregset(uint32_t pstate
)
3288 DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3290 (pstate
& PS_IG
) ? " IG" : "",
3291 (pstate
& PS_MG
) ? " MG" : "",
3292 (pstate
& PS_AG
) ? " AG" : "");
3293 /* pass through to normal set of global registers */
3305 static inline void change_pstate(uint32_t new_pstate
)
3307 uint32_t pstate_regs
, new_pstate_regs
;
3308 uint64_t *src
, *dst
;
3310 if (env
->def
->features
& CPU_FEATURE_GL
) {
3311 /* PS_AG is not implemented in this case */
3312 new_pstate
&= ~PS_AG
;
3315 pstate_regs
= env
->pstate
& 0xc01;
3316 new_pstate_regs
= new_pstate
& 0xc01;
3318 if (new_pstate_regs
!= pstate_regs
) {
3319 DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3320 pstate_regs
, new_pstate_regs
);
3321 /* Switch global register bank */
3322 src
= get_gregset(new_pstate_regs
);
3323 dst
= get_gregset(pstate_regs
);
3324 memcpy32(dst
, env
->gregs
);
3325 memcpy32(env
->gregs
, src
);
3327 DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3330 env
->pstate
= new_pstate
;
3333 void helper_wrpstate(target_ulong new_state
)
3335 change_pstate(new_state
& 0xf3f);
3337 #if !defined(CONFIG_USER_ONLY)
3338 if (cpu_interrupts_enabled(env
)) {
3339 cpu_check_irqs(env
);
3344 void cpu_change_pstate(CPUState
*env1
, uint32_t new_pstate
)
3346 CPUState
*saved_env
;
3350 change_pstate(new_pstate
);
3354 void helper_wrpil(target_ulong new_pil
)
3356 #if !defined(CONFIG_USER_ONLY)
3357 DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
3358 env
->psrpil
, (uint32_t)new_pil
);
3360 env
->psrpil
= new_pil
;
3362 if (cpu_interrupts_enabled(env
)) {
3363 cpu_check_irqs(env
);
3368 void helper_done(void)
3370 trap_state
*tsptr
= cpu_tsptr(env
);
3372 env
->pc
= tsptr
->tnpc
;
3373 env
->npc
= tsptr
->tnpc
+ 4;
3374 put_ccr(tsptr
->tstate
>> 32);
3375 env
->asi
= (tsptr
->tstate
>> 24) & 0xff;
3376 change_pstate((tsptr
->tstate
>> 8) & 0xf3f);
3377 put_cwp64(tsptr
->tstate
& 0xff);
3380 DPRINTF_PSTATE("... helper_done tl=%d\n", env
->tl
);
3382 #if !defined(CONFIG_USER_ONLY)
3383 if (cpu_interrupts_enabled(env
)) {
3384 cpu_check_irqs(env
);
3389 void helper_retry(void)
3391 trap_state
*tsptr
= cpu_tsptr(env
);
3393 env
->pc
= tsptr
->tpc
;
3394 env
->npc
= tsptr
->tnpc
;
3395 put_ccr(tsptr
->tstate
>> 32);
3396 env
->asi
= (tsptr
->tstate
>> 24) & 0xff;
3397 change_pstate((tsptr
->tstate
>> 8) & 0xf3f);
3398 put_cwp64(tsptr
->tstate
& 0xff);
3401 DPRINTF_PSTATE("... helper_retry tl=%d\n", env
->tl
);
3403 #if !defined(CONFIG_USER_ONLY)
3404 if (cpu_interrupts_enabled(env
)) {
3405 cpu_check_irqs(env
);
3410 static void do_modify_softint(const char *operation
, uint32_t value
)
3412 if (env
->softint
!= value
) {
3413 env
->softint
= value
;
3414 DPRINTF_PSTATE(": %s new %08x\n", operation
, env
->softint
);
3415 #if !defined(CONFIG_USER_ONLY)
3416 if (cpu_interrupts_enabled(env
)) {
3417 cpu_check_irqs(env
);
3423 void helper_set_softint(uint64_t value
)
3425 do_modify_softint("helper_set_softint", env
->softint
| (uint32_t)value
);
3428 void helper_clear_softint(uint64_t value
)
3430 do_modify_softint("helper_clear_softint", env
->softint
& (uint32_t)~value
);
3433 void helper_write_softint(uint64_t value
)
3435 do_modify_softint("helper_write_softint", (uint32_t)value
);
3439 #if !defined(CONFIG_USER_ONLY)
3441 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
3444 #define MMUSUFFIX _mmu
3445 #define ALIGNED_ONLY
3448 #include "softmmu_template.h"
3451 #include "softmmu_template.h"
3454 #include "softmmu_template.h"
3457 #include "softmmu_template.h"
3459 /* XXX: make it generic ? */
3460 static void cpu_restore_state2(void *retaddr
)
3462 TranslationBlock
*tb
;
3466 /* now we have a real cpu fault */
3467 pc
= (unsigned long)retaddr
;
3468 tb
= tb_find_pc(pc
);
3470 /* the PC is inside the translated code. It means that we have
3471 a virtual CPU fault */
3472 cpu_restore_state(tb
, env
, pc
);
3477 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
3480 #ifdef DEBUG_UNALIGNED
3481 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
3482 "\n", addr
, env
->pc
);
3484 cpu_restore_state2(retaddr
);
3485 helper_raise_exception(env
, TT_UNALIGNED
);
3488 /* try to fill the TLB and return an exception if error. If retaddr is
3489 NULL, it means that the function was called in C code (i.e. not
3490 from generated code or from helper.c) */
3491 /* XXX: fix it to restore all registers */
3492 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
3496 CPUState
*saved_env
;
3501 ret
= cpu_sparc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
3503 cpu_restore_state2(retaddr
);
3509 #endif /* !CONFIG_USER_ONLY */
3511 #ifndef TARGET_SPARC64
3512 #if !defined(CONFIG_USER_ONLY)
3513 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
3514 int is_exec
, int is_asi
, int size
)
3518 #ifdef DEBUG_UNASSIGNED
3520 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3521 " asi 0x%02x from " TARGET_FMT_lx
"\n",
3522 is_exec
? "exec" : is_write
? "write" : "read", size
,
3523 size
== 1 ? "" : "s", addr
, is_asi
, env
->pc
);
3525 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
3526 " from " TARGET_FMT_lx
"\n",
3527 is_exec
? "exec" : is_write
? "write" : "read", size
,
3528 size
== 1 ? "" : "s", addr
, env
->pc
);
3531 /* Don't overwrite translation and access faults */
3532 fault_type
= (env
->mmuregs
[3] & 0x1c) >> 2;
3533 if ((fault_type
> 4) || (fault_type
== 0)) {
3534 env
->mmuregs
[3] = 0; /* Fault status register */
3536 env
->mmuregs
[3] |= 1 << 16;
3539 env
->mmuregs
[3] |= 1 << 5;
3542 env
->mmuregs
[3] |= 1 << 6;
3545 env
->mmuregs
[3] |= 1 << 7;
3547 env
->mmuregs
[3] |= (5 << 2) | 2;
3548 /* SuperSPARC will never place instruction fault addresses in the FAR */
3550 env
->mmuregs
[4] = addr
; /* Fault address register */
3553 /* overflow (same type fault was not read before another fault) */
3554 if (fault_type
== ((env
->mmuregs
[3] & 0x1c)) >> 2) {
3555 env
->mmuregs
[3] |= 1;
3558 if ((env
->mmuregs
[0] & MMU_E
) && !(env
->mmuregs
[0] & MMU_NF
)) {
3560 helper_raise_exception(env
, TT_CODE_ACCESS
);
3562 helper_raise_exception(env
, TT_DATA_ACCESS
);
3566 /* flush neverland mappings created during no-fault mode,
3567 so the sequential MMU faults report proper fault types */
3568 if (env
->mmuregs
[0] & MMU_NF
) {
3574 #if defined(CONFIG_USER_ONLY)
3575 static void do_unassigned_access(target_ulong addr
, int is_write
, int is_exec
,
3576 int is_asi
, int size
)
3578 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
3579 int is_exec
, int is_asi
, int size
)
3582 #ifdef DEBUG_UNASSIGNED
3583 printf("Unassigned mem access to " TARGET_FMT_plx
" from " TARGET_FMT_lx
3584 "\n", addr
, env
->pc
);
3588 helper_raise_exception(env
, TT_CODE_ACCESS
);
3590 helper_raise_exception(env
, TT_DATA_ACCESS
);
3595 #if !defined(CONFIG_USER_ONLY)
3596 void cpu_unassigned_access(CPUState
*env1
, target_phys_addr_t addr
,
3597 int is_write
, int is_exec
, int is_asi
, int size
)
3599 CPUState
*saved_env
;
3603 /* Ignore unassigned accesses outside of CPU context */
3605 do_unassigned_access(addr
, is_write
, is_exec
, is_asi
, size
);