]>
git.proxmox.com Git - qemu.git/blob - target-sparc/op_helper.c
2 #include "dyngen-exec.h"
5 #if !defined(CONFIG_USER_ONLY)
6 #include "softmmu_exec.h"
11 //#define DEBUG_UNALIGNED
12 //#define DEBUG_UNASSIGNED
14 //#define DEBUG_CACHE_CONTROL
17 #define DPRINTF_MMU(fmt, ...) \
18 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
20 #define DPRINTF_MMU(fmt, ...) do {} while (0)
24 #define DPRINTF_MXCC(fmt, ...) \
25 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
27 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
31 #define DPRINTF_ASI(fmt, ...) \
32 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
35 #ifdef DEBUG_CACHE_CONTROL
36 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
37 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
39 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
44 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
46 #define AM_CHECK(env1) (1)
50 #define DT0 (env->dt0)
51 #define DT1 (env->dt1)
52 #define QT0 (env->qt0)
53 #define QT1 (env->qt1)
55 #if !defined(CONFIG_USER_ONLY)
56 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
57 int is_exec
, int is_asi
, int size
);
60 static void do_unassigned_access(target_ulong addr
, int is_write
, int is_exec
,
61 int is_asi
, int size
);
65 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
66 /* Calculates TSB pointer value for fault page size 8k or 64k */
67 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register
,
68 uint64_t tag_access_register
,
71 uint64_t tsb_base
= tsb_register
& ~0x1fffULL
;
72 int tsb_split
= (tsb_register
& 0x1000ULL
) ? 1 : 0;
73 int tsb_size
= tsb_register
& 0xf;
75 /* discard lower 13 bits which hold tag access context */
76 uint64_t tag_access_va
= tag_access_register
& ~0x1fffULL
;
78 /* now reorder bits */
79 uint64_t tsb_base_mask
= ~0x1fffULL
;
80 uint64_t va
= tag_access_va
;
82 /* move va bits to correct position */
83 if (page_size
== 8*1024) {
85 } else if (page_size
== 64*1024) {
90 tsb_base_mask
<<= tsb_size
;
93 /* calculate tsb_base mask and adjust va if split is in use */
95 if (page_size
== 8*1024) {
96 va
&= ~(1ULL << (13 + tsb_size
));
97 } else if (page_size
== 64*1024) {
98 va
|= (1ULL << (13 + tsb_size
));
103 return ((tsb_base
& tsb_base_mask
) | (va
& ~tsb_base_mask
)) & ~0xfULL
;
106 /* Calculates tag target register value by reordering bits
107 in tag access register */
108 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register
)
110 return ((tag_access_register
& 0x1fff) << 48) | (tag_access_register
>> 22);
113 static void replace_tlb_entry(SparcTLBEntry
*tlb
,
114 uint64_t tlb_tag
, uint64_t tlb_tte
,
117 target_ulong mask
, size
, va
, offset
;
119 /* flush page range if translation is valid */
120 if (TTE_IS_VALID(tlb
->tte
)) {
122 mask
= 0xffffffffffffe000ULL
;
123 mask
<<= 3 * ((tlb
->tte
>> 61) & 3);
126 va
= tlb
->tag
& mask
;
128 for (offset
= 0; offset
< size
; offset
+= TARGET_PAGE_SIZE
) {
129 tlb_flush_page(env1
, va
+ offset
);
137 static void demap_tlb(SparcTLBEntry
*tlb
, target_ulong demap_addr
,
138 const char *strmmu
, CPUState
*env1
)
144 int is_demap_context
= (demap_addr
>> 6) & 1;
147 switch ((demap_addr
>> 4) & 3) {
148 case 0: /* primary */
149 context
= env1
->dmmu
.mmu_primary_context
;
151 case 1: /* secondary */
152 context
= env1
->dmmu
.mmu_secondary_context
;
154 case 2: /* nucleus */
157 case 3: /* reserved */
162 for (i
= 0; i
< 64; i
++) {
163 if (TTE_IS_VALID(tlb
[i
].tte
)) {
165 if (is_demap_context
) {
166 /* will remove non-global entries matching context value */
167 if (TTE_IS_GLOBAL(tlb
[i
].tte
) ||
168 !tlb_compare_context(&tlb
[i
], context
)) {
173 will remove any entry matching VA */
174 mask
= 0xffffffffffffe000ULL
;
175 mask
<<= 3 * ((tlb
[i
].tte
>> 61) & 3);
177 if (!compare_masked(demap_addr
, tlb
[i
].tag
, mask
)) {
181 /* entry should be global or matching context value */
182 if (!TTE_IS_GLOBAL(tlb
[i
].tte
) &&
183 !tlb_compare_context(&tlb
[i
], context
)) {
188 replace_tlb_entry(&tlb
[i
], 0, 0, env1
);
190 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu
, i
);
191 dump_mmu(stdout
, fprintf
, env1
);
197 static void replace_tlb_1bit_lru(SparcTLBEntry
*tlb
,
198 uint64_t tlb_tag
, uint64_t tlb_tte
,
199 const char *strmmu
, CPUState
*env1
)
201 unsigned int i
, replace_used
;
203 /* Try replacing invalid entry */
204 for (i
= 0; i
< 64; i
++) {
205 if (!TTE_IS_VALID(tlb
[i
].tte
)) {
206 replace_tlb_entry(&tlb
[i
], tlb_tag
, tlb_tte
, env1
);
208 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu
, i
);
209 dump_mmu(stdout
, fprintf
, env1
);
215 /* All entries are valid, try replacing unlocked entry */
217 for (replace_used
= 0; replace_used
< 2; ++replace_used
) {
219 /* Used entries are not replaced on first pass */
221 for (i
= 0; i
< 64; i
++) {
222 if (!TTE_IS_LOCKED(tlb
[i
].tte
) && !TTE_IS_USED(tlb
[i
].tte
)) {
224 replace_tlb_entry(&tlb
[i
], tlb_tag
, tlb_tte
, env1
);
226 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
227 strmmu
, (replace_used
? "used" : "unused"), i
);
228 dump_mmu(stdout
, fprintf
, env1
);
234 /* Now reset used bit and search for unused entries again */
236 for (i
= 0; i
< 64; i
++) {
237 TTE_SET_UNUSED(tlb
[i
].tte
);
242 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu
);
249 static inline target_ulong
address_mask(CPUState
*env1
, target_ulong addr
)
251 #ifdef TARGET_SPARC64
252 if (AM_CHECK(env1
)) {
253 addr
&= 0xffffffffULL
;
259 /* returns true if access using this ASI is to have address translated by MMU
260 otherwise access is to raw physical address */
261 static inline int is_translating_asi(int asi
)
263 #ifdef TARGET_SPARC64
264 /* Ultrasparc IIi translating asi
265 - note this list is defined by cpu implementation
281 /* TODO: check sparc32 bits */
286 static inline target_ulong
asi_address_mask(CPUState
*env1
,
287 int asi
, target_ulong addr
)
289 if (is_translating_asi(asi
)) {
290 return address_mask(env
, addr
);
296 void helper_check_align(target_ulong addr
, uint32_t align
)
299 #ifdef DEBUG_UNALIGNED
300 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
301 "\n", addr
, env
->pc
);
303 helper_raise_exception(env
, TT_UNALIGNED
);
307 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
309 static void dump_mxcc(CPUState
*env
)
311 printf("mxccdata: %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
313 env
->mxccdata
[0], env
->mxccdata
[1],
314 env
->mxccdata
[2], env
->mxccdata
[3]);
315 printf("mxccregs: %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
317 " %016" PRIx64
" %016" PRIx64
" %016" PRIx64
" %016" PRIx64
319 env
->mxccregs
[0], env
->mxccregs
[1],
320 env
->mxccregs
[2], env
->mxccregs
[3],
321 env
->mxccregs
[4], env
->mxccregs
[5],
322 env
->mxccregs
[6], env
->mxccregs
[7]);
326 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
327 && defined(DEBUG_ASI)
328 static void dump_asi(const char *txt
, target_ulong addr
, int asi
, int size
,
333 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %02" PRIx64
"\n", txt
,
334 addr
, asi
, r1
& 0xff);
337 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %04" PRIx64
"\n", txt
,
338 addr
, asi
, r1
& 0xffff);
341 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %08" PRIx64
"\n", txt
,
342 addr
, asi
, r1
& 0xffffffff);
345 DPRINTF_ASI("%s "TARGET_FMT_lx
" asi 0x%02x = %016" PRIx64
"\n", txt
,
352 #ifndef TARGET_SPARC64
353 #ifndef CONFIG_USER_ONLY
356 /* Leon3 cache control */
358 static void leon3_cache_control_st(target_ulong addr
, uint64_t val
, int size
)
360 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64
", size:%d\n",
364 DPRINTF_CACHE_CONTROL("32bits only\n");
369 case 0x00: /* Cache control */
371 /* These values must always be read as zeros */
372 val
&= ~CACHE_CTRL_FD
;
373 val
&= ~CACHE_CTRL_FI
;
374 val
&= ~CACHE_CTRL_IB
;
375 val
&= ~CACHE_CTRL_IP
;
376 val
&= ~CACHE_CTRL_DP
;
378 env
->cache_control
= val
;
380 case 0x04: /* Instruction cache configuration */
381 case 0x08: /* Data cache configuration */
385 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr
);
390 static uint64_t leon3_cache_control_ld(target_ulong addr
, int size
)
395 DPRINTF_CACHE_CONTROL("32bits only\n");
400 case 0x00: /* Cache control */
401 ret
= env
->cache_control
;
404 /* Configuration registers are read and only always keep those
407 case 0x04: /* Instruction cache configuration */
410 case 0x08: /* Data cache configuration */
414 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr
);
417 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64
", size:%d\n",
422 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
425 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
426 uint32_t last_addr
= addr
;
429 helper_check_align(addr
, size
- 1);
431 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
433 case 0x00: /* Leon3 Cache Control */
434 case 0x08: /* Leon3 Instruction Cache config */
435 case 0x0C: /* Leon3 Date Cache config */
436 if (env
->def
->features
& CPU_FEATURE_CACHE_CTRL
) {
437 ret
= leon3_cache_control_ld(addr
, size
);
440 case 0x01c00a00: /* MXCC control register */
442 ret
= env
->mxccregs
[3];
444 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
448 case 0x01c00a04: /* MXCC control register */
450 ret
= env
->mxccregs
[3];
452 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
456 case 0x01c00c00: /* Module reset register */
458 ret
= env
->mxccregs
[5];
459 /* should we do something here? */
461 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
465 case 0x01c00f00: /* MBus port address register */
467 ret
= env
->mxccregs
[7];
469 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
474 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
478 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
479 "addr = %08x -> ret = %" PRIx64
","
480 "addr = %08x\n", asi
, size
, sign
, last_addr
, ret
, addr
);
485 case 3: /* MMU probe */
489 mmulev
= (addr
>> 8) & 15;
493 ret
= mmu_probe(env
, addr
, mmulev
);
495 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64
"\n",
499 case 4: /* read MMU regs */
501 int reg
= (addr
>> 8) & 0x1f;
503 ret
= env
->mmuregs
[reg
];
504 if (reg
== 3) { /* Fault status cleared on read */
506 } else if (reg
== 0x13) { /* Fault status read */
507 ret
= env
->mmuregs
[3];
508 } else if (reg
== 0x14) { /* Fault address read */
509 ret
= env
->mmuregs
[4];
511 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64
"\n", reg
, ret
);
514 case 5: /* Turbosparc ITLB Diagnostic */
515 case 6: /* Turbosparc DTLB Diagnostic */
516 case 7: /* Turbosparc IOTLB Diagnostic */
518 case 9: /* Supervisor code access */
521 ret
= ldub_code(addr
);
524 ret
= lduw_code(addr
);
528 ret
= ldl_code(addr
);
531 ret
= ldq_code(addr
);
535 case 0xa: /* User data access */
538 ret
= ldub_user(addr
);
541 ret
= lduw_user(addr
);
545 ret
= ldl_user(addr
);
548 ret
= ldq_user(addr
);
552 case 0xb: /* Supervisor data access */
555 ret
= ldub_kernel(addr
);
558 ret
= lduw_kernel(addr
);
562 ret
= ldl_kernel(addr
);
565 ret
= ldq_kernel(addr
);
569 case 0xc: /* I-cache tag */
570 case 0xd: /* I-cache data */
571 case 0xe: /* D-cache tag */
572 case 0xf: /* D-cache data */
574 case 0x20: /* MMU passthrough */
577 ret
= ldub_phys(addr
);
580 ret
= lduw_phys(addr
);
584 ret
= ldl_phys(addr
);
587 ret
= ldq_phys(addr
);
591 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
594 ret
= ldub_phys((target_phys_addr_t
)addr
595 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
598 ret
= lduw_phys((target_phys_addr_t
)addr
599 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
603 ret
= ldl_phys((target_phys_addr_t
)addr
604 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
607 ret
= ldq_phys((target_phys_addr_t
)addr
608 | ((target_phys_addr_t
)(asi
& 0xf) << 32));
612 case 0x30: /* Turbosparc secondary cache diagnostic */
613 case 0x31: /* Turbosparc RAM snoop */
614 case 0x32: /* Turbosparc page table descriptor diagnostic */
615 case 0x39: /* data cache diagnostic register */
618 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
620 int reg
= (addr
>> 8) & 3;
623 case 0: /* Breakpoint Value (Addr) */
624 ret
= env
->mmubpregs
[reg
];
626 case 1: /* Breakpoint Mask */
627 ret
= env
->mmubpregs
[reg
];
629 case 2: /* Breakpoint Control */
630 ret
= env
->mmubpregs
[reg
];
632 case 3: /* Breakpoint Status */
633 ret
= env
->mmubpregs
[reg
];
634 env
->mmubpregs
[reg
] = 0ULL;
637 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64
"\n", reg
,
641 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
642 ret
= env
->mmubpctrv
;
644 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
645 ret
= env
->mmubpctrc
;
647 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
648 ret
= env
->mmubpctrs
;
650 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
651 ret
= env
->mmubpaction
;
653 case 8: /* User code access, XXX */
655 do_unassigned_access(addr
, 0, 0, asi
, size
);
675 dump_asi("read ", last_addr
, asi
, size
, ret
);
680 void helper_st_asi(target_ulong addr
, uint64_t val
, int asi
, int size
)
682 helper_check_align(addr
, size
- 1);
684 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
686 case 0x00: /* Leon3 Cache Control */
687 case 0x08: /* Leon3 Instruction Cache config */
688 case 0x0C: /* Leon3 Date Cache config */
689 if (env
->def
->features
& CPU_FEATURE_CACHE_CTRL
) {
690 leon3_cache_control_st(addr
, val
, size
);
694 case 0x01c00000: /* MXCC stream data register 0 */
696 env
->mxccdata
[0] = val
;
698 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
702 case 0x01c00008: /* MXCC stream data register 1 */
704 env
->mxccdata
[1] = val
;
706 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
710 case 0x01c00010: /* MXCC stream data register 2 */
712 env
->mxccdata
[2] = val
;
714 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
718 case 0x01c00018: /* MXCC stream data register 3 */
720 env
->mxccdata
[3] = val
;
722 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
726 case 0x01c00100: /* MXCC stream source */
728 env
->mxccregs
[0] = val
;
730 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
733 env
->mxccdata
[0] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
735 env
->mxccdata
[1] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
737 env
->mxccdata
[2] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
739 env
->mxccdata
[3] = ldq_phys((env
->mxccregs
[0] & 0xffffffffULL
) +
742 case 0x01c00200: /* MXCC stream destination */
744 env
->mxccregs
[1] = val
;
746 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
749 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 0,
751 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 8,
753 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 16,
755 stq_phys((env
->mxccregs
[1] & 0xffffffffULL
) + 24,
758 case 0x01c00a00: /* MXCC control register */
760 env
->mxccregs
[3] = val
;
762 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
766 case 0x01c00a04: /* MXCC control register */
768 env
->mxccregs
[3] = (env
->mxccregs
[3] & 0xffffffff00000000ULL
)
771 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
775 case 0x01c00e00: /* MXCC error register */
776 /* writing a 1 bit clears the error */
778 env
->mxccregs
[6] &= ~val
;
780 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
784 case 0x01c00f00: /* MBus port address register */
786 env
->mxccregs
[7] = val
;
788 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr
,
793 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr
,
797 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64
"\n",
798 asi
, size
, addr
, val
);
803 case 3: /* MMU flush */
807 mmulev
= (addr
>> 8) & 15;
808 DPRINTF_MMU("mmu flush level %d\n", mmulev
);
810 case 0: /* flush page */
811 tlb_flush_page(env
, addr
& 0xfffff000);
813 case 1: /* flush segment (256k) */
814 case 2: /* flush region (16M) */
815 case 3: /* flush context (4G) */
816 case 4: /* flush entire */
823 dump_mmu(stdout
, fprintf
, env
);
827 case 4: /* write MMU regs */
829 int reg
= (addr
>> 8) & 0x1f;
832 oldreg
= env
->mmuregs
[reg
];
834 case 0: /* Control Register */
835 env
->mmuregs
[reg
] = (env
->mmuregs
[reg
] & 0xff000000) |
837 /* Mappings generated during no-fault mode or MMU
838 disabled mode are invalid in normal mode */
839 if ((oldreg
& (MMU_E
| MMU_NF
| env
->def
->mmu_bm
)) !=
840 (env
->mmuregs
[reg
] & (MMU_E
| MMU_NF
| env
->def
->mmu_bm
))) {
844 case 1: /* Context Table Pointer Register */
845 env
->mmuregs
[reg
] = val
& env
->def
->mmu_ctpr_mask
;
847 case 2: /* Context Register */
848 env
->mmuregs
[reg
] = val
& env
->def
->mmu_cxr_mask
;
849 if (oldreg
!= env
->mmuregs
[reg
]) {
850 /* we flush when the MMU context changes because
851 QEMU has no MMU context support */
855 case 3: /* Synchronous Fault Status Register with Clear */
856 case 4: /* Synchronous Fault Address Register */
858 case 0x10: /* TLB Replacement Control Register */
859 env
->mmuregs
[reg
] = val
& env
->def
->mmu_trcr_mask
;
861 case 0x13: /* Synchronous Fault Status Register with Read
863 env
->mmuregs
[3] = val
& env
->def
->mmu_sfsr_mask
;
865 case 0x14: /* Synchronous Fault Address Register */
866 env
->mmuregs
[4] = val
;
869 env
->mmuregs
[reg
] = val
;
872 if (oldreg
!= env
->mmuregs
[reg
]) {
873 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
874 reg
, oldreg
, env
->mmuregs
[reg
]);
877 dump_mmu(stdout
, fprintf
, env
);
881 case 5: /* Turbosparc ITLB Diagnostic */
882 case 6: /* Turbosparc DTLB Diagnostic */
883 case 7: /* Turbosparc IOTLB Diagnostic */
885 case 0xa: /* User data access */
902 case 0xb: /* Supervisor data access */
905 stb_kernel(addr
, val
);
908 stw_kernel(addr
, val
);
912 stl_kernel(addr
, val
);
915 stq_kernel(addr
, val
);
919 case 0xc: /* I-cache tag */
920 case 0xd: /* I-cache data */
921 case 0xe: /* D-cache tag */
922 case 0xf: /* D-cache data */
923 case 0x10: /* I/D-cache flush page */
924 case 0x11: /* I/D-cache flush segment */
925 case 0x12: /* I/D-cache flush region */
926 case 0x13: /* I/D-cache flush context */
927 case 0x14: /* I/D-cache flush user */
929 case 0x17: /* Block copy, sta access */
935 uint32_t src
= val
& ~3, dst
= addr
& ~3, temp
;
937 for (i
= 0; i
< 32; i
+= 4, src
+= 4, dst
+= 4) {
938 temp
= ldl_kernel(src
);
939 stl_kernel(dst
, temp
);
943 case 0x1f: /* Block fill, stda access */
946 fill 32 bytes with val */
948 uint32_t dst
= addr
& 7;
950 for (i
= 0; i
< 32; i
+= 8, dst
+= 8) {
951 stq_kernel(dst
, val
);
955 case 0x20: /* MMU passthrough */
974 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
978 stb_phys((target_phys_addr_t
)addr
979 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
982 stw_phys((target_phys_addr_t
)addr
983 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
987 stl_phys((target_phys_addr_t
)addr
988 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
991 stq_phys((target_phys_addr_t
)addr
992 | ((target_phys_addr_t
)(asi
& 0xf) << 32), val
);
997 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
998 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
999 Turbosparc snoop RAM */
1000 case 0x32: /* store buffer control or Turbosparc page table
1001 descriptor diagnostic */
1002 case 0x36: /* I-cache flash clear */
1003 case 0x37: /* D-cache flash clear */
1005 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
1007 int reg
= (addr
>> 8) & 3;
1010 case 0: /* Breakpoint Value (Addr) */
1011 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1013 case 1: /* Breakpoint Mask */
1014 env
->mmubpregs
[reg
] = (val
& 0xfffffffffULL
);
1016 case 2: /* Breakpoint Control */
1017 env
->mmubpregs
[reg
] = (val
& 0x7fULL
);
1019 case 3: /* Breakpoint Status */
1020 env
->mmubpregs
[reg
] = (val
& 0xfULL
);
1023 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg
,
1027 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1028 env
->mmubpctrv
= val
& 0xffffffff;
1030 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1031 env
->mmubpctrc
= val
& 0x3;
1033 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1034 env
->mmubpctrs
= val
& 0x3;
1036 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1037 env
->mmubpaction
= val
& 0x1fff;
1039 case 8: /* User code access, XXX */
1040 case 9: /* Supervisor code access, XXX */
1042 do_unassigned_access(addr
, 1, 0, asi
, size
);
1046 dump_asi("write", addr
, asi
, size
, val
);
1050 #endif /* CONFIG_USER_ONLY */
1051 #else /* TARGET_SPARC64 */
1053 #ifdef CONFIG_USER_ONLY
1054 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1057 #if defined(DEBUG_ASI)
1058 target_ulong last_addr
= addr
;
1062 helper_raise_exception(env
, TT_PRIV_ACT
);
1065 helper_check_align(addr
, size
- 1);
1066 addr
= asi_address_mask(env
, asi
, addr
);
1069 case 0x82: /* Primary no-fault */
1070 case 0x8a: /* Primary no-fault LE */
1071 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1073 dump_asi("read ", last_addr
, asi
, size
, ret
);
1078 case 0x80: /* Primary */
1079 case 0x88: /* Primary LE */
1083 ret
= ldub_raw(addr
);
1086 ret
= lduw_raw(addr
);
1089 ret
= ldl_raw(addr
);
1093 ret
= ldq_raw(addr
);
1098 case 0x83: /* Secondary no-fault */
1099 case 0x8b: /* Secondary no-fault LE */
1100 if (page_check_range(addr
, size
, PAGE_READ
) == -1) {
1102 dump_asi("read ", last_addr
, asi
, size
, ret
);
1107 case 0x81: /* Secondary */
1108 case 0x89: /* Secondary LE */
1115 /* Convert from little endian */
1117 case 0x88: /* Primary LE */
1118 case 0x89: /* Secondary LE */
1119 case 0x8a: /* Primary no-fault LE */
1120 case 0x8b: /* Secondary no-fault LE */
1138 /* Convert to signed number */
1145 ret
= (int16_t) ret
;
1148 ret
= (int32_t) ret
;
1155 dump_asi("read ", last_addr
, asi
, size
, ret
);
1160 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1163 dump_asi("write", addr
, asi
, size
, val
);
1166 helper_raise_exception(env
, TT_PRIV_ACT
);
1169 helper_check_align(addr
, size
- 1);
1170 addr
= asi_address_mask(env
, asi
, addr
);
1172 /* Convert to little endian */
1174 case 0x88: /* Primary LE */
1175 case 0x89: /* Secondary LE */
1194 case 0x80: /* Primary */
1195 case 0x88: /* Primary LE */
1214 case 0x81: /* Secondary */
1215 case 0x89: /* Secondary LE */
1219 case 0x82: /* Primary no-fault, RO */
1220 case 0x83: /* Secondary no-fault, RO */
1221 case 0x8a: /* Primary no-fault LE, RO */
1222 case 0x8b: /* Secondary no-fault LE, RO */
1224 do_unassigned_access(addr
, 1, 0, 1, size
);
1229 #else /* CONFIG_USER_ONLY */
1231 uint64_t helper_ld_asi(target_ulong addr
, int asi
, int size
, int sign
)
1234 #if defined(DEBUG_ASI)
1235 target_ulong last_addr
= addr
;
1240 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1241 || (cpu_has_hypervisor(env
)
1242 && asi
>= 0x30 && asi
< 0x80
1243 && !(env
->hpstate
& HS_PRIV
))) {
1244 helper_raise_exception(env
, TT_PRIV_ACT
);
1247 helper_check_align(addr
, size
- 1);
1248 addr
= asi_address_mask(env
, asi
, addr
);
1250 /* process nonfaulting loads first */
1251 if ((asi
& 0xf6) == 0x82) {
1254 /* secondary space access has lowest asi bit equal to 1 */
1255 if (env
->pstate
& PS_PRIV
) {
1256 mmu_idx
= (asi
& 1) ? MMU_KERNEL_SECONDARY_IDX
: MMU_KERNEL_IDX
;
1258 mmu_idx
= (asi
& 1) ? MMU_USER_SECONDARY_IDX
: MMU_USER_IDX
;
1261 if (cpu_get_phys_page_nofault(env
, addr
, mmu_idx
) == -1ULL) {
1263 dump_asi("read ", last_addr
, asi
, size
, ret
);
1265 /* env->exception_index is set in get_physical_address_data(). */
1266 helper_raise_exception(env
, env
->exception_index
);
1269 /* convert nonfaulting load ASIs to normal load ASIs */
1274 case 0x10: /* As if user primary */
1275 case 0x11: /* As if user secondary */
1276 case 0x18: /* As if user primary LE */
1277 case 0x19: /* As if user secondary LE */
1278 case 0x80: /* Primary */
1279 case 0x81: /* Secondary */
1280 case 0x88: /* Primary LE */
1281 case 0x89: /* Secondary LE */
1282 case 0xe2: /* UA2007 Primary block init */
1283 case 0xe3: /* UA2007 Secondary block init */
1284 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1285 if (cpu_hypervisor_mode(env
)) {
1288 ret
= ldub_hypv(addr
);
1291 ret
= lduw_hypv(addr
);
1294 ret
= ldl_hypv(addr
);
1298 ret
= ldq_hypv(addr
);
1302 /* secondary space access has lowest asi bit equal to 1 */
1306 ret
= ldub_kernel_secondary(addr
);
1309 ret
= lduw_kernel_secondary(addr
);
1312 ret
= ldl_kernel_secondary(addr
);
1316 ret
= ldq_kernel_secondary(addr
);
1322 ret
= ldub_kernel(addr
);
1325 ret
= lduw_kernel(addr
);
1328 ret
= ldl_kernel(addr
);
1332 ret
= ldq_kernel(addr
);
1338 /* secondary space access has lowest asi bit equal to 1 */
1342 ret
= ldub_user_secondary(addr
);
1345 ret
= lduw_user_secondary(addr
);
1348 ret
= ldl_user_secondary(addr
);
1352 ret
= ldq_user_secondary(addr
);
1358 ret
= ldub_user(addr
);
1361 ret
= lduw_user(addr
);
1364 ret
= ldl_user(addr
);
1368 ret
= ldq_user(addr
);
1374 case 0x14: /* Bypass */
1375 case 0x15: /* Bypass, non-cacheable */
1376 case 0x1c: /* Bypass LE */
1377 case 0x1d: /* Bypass, non-cacheable LE */
1381 ret
= ldub_phys(addr
);
1384 ret
= lduw_phys(addr
);
1387 ret
= ldl_phys(addr
);
1391 ret
= ldq_phys(addr
);
1396 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1397 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1398 Only ldda allowed */
1399 helper_raise_exception(env
, TT_ILL_INSN
);
1401 case 0x04: /* Nucleus */
1402 case 0x0c: /* Nucleus Little Endian (LE) */
1406 ret
= ldub_nucleus(addr
);
1409 ret
= lduw_nucleus(addr
);
1412 ret
= ldl_nucleus(addr
);
1416 ret
= ldq_nucleus(addr
);
1421 case 0x4a: /* UPA config */
1424 case 0x45: /* LSU */
1427 case 0x50: /* I-MMU regs */
1429 int reg
= (addr
>> 3) & 0xf;
1432 /* I-TSB Tag Target register */
1433 ret
= ultrasparc_tag_target(env
->immu
.tag_access
);
1435 ret
= env
->immuregs
[reg
];
1440 case 0x51: /* I-MMU 8k TSB pointer */
1442 /* env->immuregs[5] holds I-MMU TSB register value
1443 env->immuregs[6] holds I-MMU Tag Access register value */
1444 ret
= ultrasparc_tsb_pointer(env
->immu
.tsb
, env
->immu
.tag_access
,
1448 case 0x52: /* I-MMU 64k TSB pointer */
1450 /* env->immuregs[5] holds I-MMU TSB register value
1451 env->immuregs[6] holds I-MMU Tag Access register value */
1452 ret
= ultrasparc_tsb_pointer(env
->immu
.tsb
, env
->immu
.tag_access
,
1456 case 0x55: /* I-MMU data access */
1458 int reg
= (addr
>> 3) & 0x3f;
1460 ret
= env
->itlb
[reg
].tte
;
1463 case 0x56: /* I-MMU tag read */
1465 int reg
= (addr
>> 3) & 0x3f;
1467 ret
= env
->itlb
[reg
].tag
;
1470 case 0x58: /* D-MMU regs */
1472 int reg
= (addr
>> 3) & 0xf;
1475 /* D-TSB Tag Target register */
1476 ret
= ultrasparc_tag_target(env
->dmmu
.tag_access
);
1478 ret
= env
->dmmuregs
[reg
];
1482 case 0x59: /* D-MMU 8k TSB pointer */
1484 /* env->dmmuregs[5] holds D-MMU TSB register value
1485 env->dmmuregs[6] holds D-MMU Tag Access register value */
1486 ret
= ultrasparc_tsb_pointer(env
->dmmu
.tsb
, env
->dmmu
.tag_access
,
1490 case 0x5a: /* D-MMU 64k TSB pointer */
1492 /* env->dmmuregs[5] holds D-MMU TSB register value
1493 env->dmmuregs[6] holds D-MMU Tag Access register value */
1494 ret
= ultrasparc_tsb_pointer(env
->dmmu
.tsb
, env
->dmmu
.tag_access
,
1498 case 0x5d: /* D-MMU data access */
1500 int reg
= (addr
>> 3) & 0x3f;
1502 ret
= env
->dtlb
[reg
].tte
;
1505 case 0x5e: /* D-MMU tag read */
1507 int reg
= (addr
>> 3) & 0x3f;
1509 ret
= env
->dtlb
[reg
].tag
;
1512 case 0x46: /* D-cache data */
1513 case 0x47: /* D-cache tag access */
1514 case 0x4b: /* E-cache error enable */
1515 case 0x4c: /* E-cache asynchronous fault status */
1516 case 0x4d: /* E-cache asynchronous fault address */
1517 case 0x4e: /* E-cache tag data */
1518 case 0x66: /* I-cache instruction access */
1519 case 0x67: /* I-cache tag access */
1520 case 0x6e: /* I-cache predecode */
1521 case 0x6f: /* I-cache LRU etc. */
1522 case 0x76: /* E-cache tag */
1523 case 0x7e: /* E-cache tag */
1525 case 0x5b: /* D-MMU data pointer */
1526 case 0x48: /* Interrupt dispatch, RO */
1527 case 0x49: /* Interrupt data receive */
1528 case 0x7f: /* Incoming interrupt vector, RO */
1531 case 0x54: /* I-MMU data in, WO */
1532 case 0x57: /* I-MMU demap, WO */
1533 case 0x5c: /* D-MMU data in, WO */
1534 case 0x5f: /* D-MMU demap, WO */
1535 case 0x77: /* Interrupt vector, WO */
1537 do_unassigned_access(addr
, 0, 0, 1, size
);
1542 /* Convert from little endian */
1544 case 0x0c: /* Nucleus Little Endian (LE) */
1545 case 0x18: /* As if user primary LE */
1546 case 0x19: /* As if user secondary LE */
1547 case 0x1c: /* Bypass LE */
1548 case 0x1d: /* Bypass, non-cacheable LE */
1549 case 0x88: /* Primary LE */
1550 case 0x89: /* Secondary LE */
1568 /* Convert to signed number */
1575 ret
= (int16_t) ret
;
1578 ret
= (int32_t) ret
;
1585 dump_asi("read ", last_addr
, asi
, size
, ret
);
1590 void helper_st_asi(target_ulong addr
, target_ulong val
, int asi
, int size
)
1593 dump_asi("write", addr
, asi
, size
, val
);
1598 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1599 || (cpu_has_hypervisor(env
)
1600 && asi
>= 0x30 && asi
< 0x80
1601 && !(env
->hpstate
& HS_PRIV
))) {
1602 helper_raise_exception(env
, TT_PRIV_ACT
);
1605 helper_check_align(addr
, size
- 1);
1606 addr
= asi_address_mask(env
, asi
, addr
);
1608 /* Convert to little endian */
1610 case 0x0c: /* Nucleus Little Endian (LE) */
1611 case 0x18: /* As if user primary LE */
1612 case 0x19: /* As if user secondary LE */
1613 case 0x1c: /* Bypass LE */
1614 case 0x1d: /* Bypass, non-cacheable LE */
1615 case 0x88: /* Primary LE */
1616 case 0x89: /* Secondary LE */
1635 case 0x10: /* As if user primary */
1636 case 0x11: /* As if user secondary */
1637 case 0x18: /* As if user primary LE */
1638 case 0x19: /* As if user secondary LE */
1639 case 0x80: /* Primary */
1640 case 0x81: /* Secondary */
1641 case 0x88: /* Primary LE */
1642 case 0x89: /* Secondary LE */
1643 case 0xe2: /* UA2007 Primary block init */
1644 case 0xe3: /* UA2007 Secondary block init */
1645 if ((asi
& 0x80) && (env
->pstate
& PS_PRIV
)) {
1646 if (cpu_hypervisor_mode(env
)) {
1649 stb_hypv(addr
, val
);
1652 stw_hypv(addr
, val
);
1655 stl_hypv(addr
, val
);
1659 stq_hypv(addr
, val
);
1663 /* secondary space access has lowest asi bit equal to 1 */
1667 stb_kernel_secondary(addr
, val
);
1670 stw_kernel_secondary(addr
, val
);
1673 stl_kernel_secondary(addr
, val
);
1677 stq_kernel_secondary(addr
, val
);
1683 stb_kernel(addr
, val
);
1686 stw_kernel(addr
, val
);
1689 stl_kernel(addr
, val
);
1693 stq_kernel(addr
, val
);
1699 /* secondary space access has lowest asi bit equal to 1 */
1703 stb_user_secondary(addr
, val
);
1706 stw_user_secondary(addr
, val
);
1709 stl_user_secondary(addr
, val
);
1713 stq_user_secondary(addr
, val
);
1719 stb_user(addr
, val
);
1722 stw_user(addr
, val
);
1725 stl_user(addr
, val
);
1729 stq_user(addr
, val
);
1735 case 0x14: /* Bypass */
1736 case 0x15: /* Bypass, non-cacheable */
1737 case 0x1c: /* Bypass LE */
1738 case 0x1d: /* Bypass, non-cacheable LE */
1742 stb_phys(addr
, val
);
1745 stw_phys(addr
, val
);
1748 stl_phys(addr
, val
);
1752 stq_phys(addr
, val
);
1757 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1758 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE
1759 Only ldda allowed */
1760 helper_raise_exception(env
, TT_ILL_INSN
);
1762 case 0x04: /* Nucleus */
1763 case 0x0c: /* Nucleus Little Endian (LE) */
1767 stb_nucleus(addr
, val
);
1770 stw_nucleus(addr
, val
);
1773 stl_nucleus(addr
, val
);
1777 stq_nucleus(addr
, val
);
1783 case 0x4a: /* UPA config */
1786 case 0x45: /* LSU */
1791 env
->lsu
= val
& (DMMU_E
| IMMU_E
);
1792 /* Mappings generated during D/I MMU disabled mode are
1793 invalid in normal mode */
1794 if (oldreg
!= env
->lsu
) {
1795 DPRINTF_MMU("LSU change: 0x%" PRIx64
" -> 0x%" PRIx64
"\n",
1798 dump_mmu(stdout
, fprintf
, env1
);
1804 case 0x50: /* I-MMU regs */
1806 int reg
= (addr
>> 3) & 0xf;
1809 oldreg
= env
->immuregs
[reg
];
1813 case 1: /* Not in I-MMU */
1817 if ((val
& 1) == 0) {
1818 val
= 0; /* Clear SFSR */
1820 env
->immu
.sfsr
= val
;
1824 case 5: /* TSB access */
1825 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64
" -> 0x%016"
1826 PRIx64
"\n", env
->immu
.tsb
, val
);
1827 env
->immu
.tsb
= val
;
1829 case 6: /* Tag access */
1830 env
->immu
.tag_access
= val
;
1839 if (oldreg
!= env
->immuregs
[reg
]) {
1840 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64
" -> 0x%016"
1841 PRIx64
"\n", reg
, oldreg
, env
->immuregs
[reg
]);
1844 dump_mmu(stdout
, fprintf
, env
);
1848 case 0x54: /* I-MMU data in */
1849 replace_tlb_1bit_lru(env
->itlb
, env
->immu
.tag_access
, val
, "immu", env
);
1851 case 0x55: /* I-MMU data access */
1853 /* TODO: auto demap */
1855 unsigned int i
= (addr
>> 3) & 0x3f;
1857 replace_tlb_entry(&env
->itlb
[i
], env
->immu
.tag_access
, val
, env
);
1860 DPRINTF_MMU("immu data access replaced entry [%i]\n", i
);
1861 dump_mmu(stdout
, fprintf
, env
);
1865 case 0x57: /* I-MMU demap */
1866 demap_tlb(env
->itlb
, addr
, "immu", env
);
1868 case 0x58: /* D-MMU regs */
1870 int reg
= (addr
>> 3) & 0xf;
1873 oldreg
= env
->dmmuregs
[reg
];
1879 if ((val
& 1) == 0) {
1880 val
= 0; /* Clear SFSR, Fault address */
1883 env
->dmmu
.sfsr
= val
;
1885 case 1: /* Primary context */
1886 env
->dmmu
.mmu_primary_context
= val
;
1887 /* can be optimized to only flush MMU_USER_IDX
1888 and MMU_KERNEL_IDX entries */
1891 case 2: /* Secondary context */
1892 env
->dmmu
.mmu_secondary_context
= val
;
1893 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1894 and MMU_KERNEL_SECONDARY_IDX entries */
1897 case 5: /* TSB access */
1898 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64
" -> 0x%016"
1899 PRIx64
"\n", env
->dmmu
.tsb
, val
);
1900 env
->dmmu
.tsb
= val
;
1902 case 6: /* Tag access */
1903 env
->dmmu
.tag_access
= val
;
1905 case 7: /* Virtual Watchpoint */
1906 case 8: /* Physical Watchpoint */
1908 env
->dmmuregs
[reg
] = val
;
1912 if (oldreg
!= env
->dmmuregs
[reg
]) {
1913 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64
" -> 0x%016"
1914 PRIx64
"\n", reg
, oldreg
, env
->dmmuregs
[reg
]);
1917 dump_mmu(stdout
, fprintf
, env
);
1921 case 0x5c: /* D-MMU data in */
1922 replace_tlb_1bit_lru(env
->dtlb
, env
->dmmu
.tag_access
, val
, "dmmu", env
);
1924 case 0x5d: /* D-MMU data access */
1926 unsigned int i
= (addr
>> 3) & 0x3f;
1928 replace_tlb_entry(&env
->dtlb
[i
], env
->dmmu
.tag_access
, val
, env
);
1931 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i
);
1932 dump_mmu(stdout
, fprintf
, env
);
1936 case 0x5f: /* D-MMU demap */
1937 demap_tlb(env
->dtlb
, addr
, "dmmu", env
);
1939 case 0x49: /* Interrupt data receive */
1942 case 0x46: /* D-cache data */
1943 case 0x47: /* D-cache tag access */
1944 case 0x4b: /* E-cache error enable */
1945 case 0x4c: /* E-cache asynchronous fault status */
1946 case 0x4d: /* E-cache asynchronous fault address */
1947 case 0x4e: /* E-cache tag data */
1948 case 0x66: /* I-cache instruction access */
1949 case 0x67: /* I-cache tag access */
1950 case 0x6e: /* I-cache predecode */
1951 case 0x6f: /* I-cache LRU etc. */
1952 case 0x76: /* E-cache tag */
1953 case 0x7e: /* E-cache tag */
1955 case 0x51: /* I-MMU 8k TSB pointer, RO */
1956 case 0x52: /* I-MMU 64k TSB pointer, RO */
1957 case 0x56: /* I-MMU tag read, RO */
1958 case 0x59: /* D-MMU 8k TSB pointer, RO */
1959 case 0x5a: /* D-MMU 64k TSB pointer, RO */
1960 case 0x5b: /* D-MMU data pointer, RO */
1961 case 0x5e: /* D-MMU tag read, RO */
1962 case 0x48: /* Interrupt dispatch, RO */
1963 case 0x7f: /* Incoming interrupt vector, RO */
1964 case 0x82: /* Primary no-fault, RO */
1965 case 0x83: /* Secondary no-fault, RO */
1966 case 0x8a: /* Primary no-fault LE, RO */
1967 case 0x8b: /* Secondary no-fault LE, RO */
1969 do_unassigned_access(addr
, 1, 0, 1, size
);
1973 #endif /* CONFIG_USER_ONLY */
1975 void helper_ldda_asi(target_ulong addr
, int asi
, int rd
)
1977 if ((asi
< 0x80 && (env
->pstate
& PS_PRIV
) == 0)
1978 || (cpu_has_hypervisor(env
)
1979 && asi
>= 0x30 && asi
< 0x80
1980 && !(env
->hpstate
& HS_PRIV
))) {
1981 helper_raise_exception(env
, TT_PRIV_ACT
);
1984 addr
= asi_address_mask(env
, asi
, addr
);
1987 #if !defined(CONFIG_USER_ONLY)
1988 case 0x24: /* Nucleus quad LDD 128 bit atomic */
1989 case 0x2c: /* Nucleus quad LDD 128 bit atomic LE */
1990 helper_check_align(addr
, 0xf);
1992 env
->gregs
[1] = ldq_nucleus(addr
+ 8);
1994 bswap64s(&env
->gregs
[1]);
1996 } else if (rd
< 8) {
1997 env
->gregs
[rd
] = ldq_nucleus(addr
);
1998 env
->gregs
[rd
+ 1] = ldq_nucleus(addr
+ 8);
2000 bswap64s(&env
->gregs
[rd
]);
2001 bswap64s(&env
->gregs
[rd
+ 1]);
2004 env
->regwptr
[rd
] = ldq_nucleus(addr
);
2005 env
->regwptr
[rd
+ 1] = ldq_nucleus(addr
+ 8);
2007 bswap64s(&env
->regwptr
[rd
]);
2008 bswap64s(&env
->regwptr
[rd
+ 1]);
2014 helper_check_align(addr
, 0x3);
2016 env
->gregs
[1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2017 } else if (rd
< 8) {
2018 env
->gregs
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2019 env
->gregs
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2021 env
->regwptr
[rd
] = helper_ld_asi(addr
, asi
, 4, 0);
2022 env
->regwptr
[rd
+ 1] = helper_ld_asi(addr
+ 4, asi
, 4, 0);
2028 void helper_ldf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2033 helper_check_align(addr
, 3);
2034 addr
= asi_address_mask(env
, asi
, addr
);
2037 case 0xf0: /* UA2007/JPS1 Block load primary */
2038 case 0xf1: /* UA2007/JPS1 Block load secondary */
2039 case 0xf8: /* UA2007/JPS1 Block load primary LE */
2040 case 0xf9: /* UA2007/JPS1 Block load secondary LE */
2042 helper_raise_exception(env
, TT_ILL_INSN
);
2045 helper_check_align(addr
, 0x3f);
2046 for (i
= 0; i
< 16; i
++) {
2047 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x8f, 4,
2053 case 0x16: /* UA2007 Block load primary, user privilege */
2054 case 0x17: /* UA2007 Block load secondary, user privilege */
2055 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2056 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2057 case 0x70: /* JPS1 Block load primary, user privilege */
2058 case 0x71: /* JPS1 Block load secondary, user privilege */
2059 case 0x78: /* JPS1 Block load primary LE, user privilege */
2060 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2062 helper_raise_exception(env
, TT_ILL_INSN
);
2065 helper_check_align(addr
, 0x3f);
2066 for (i
= 0; i
< 16; i
++) {
2067 *(uint32_t *)&env
->fpr
[rd
++] = helper_ld_asi(addr
, asi
& 0x19, 4,
2080 *((uint32_t *)&env
->fpr
[rd
]) = helper_ld_asi(addr
, asi
, size
, 0);
2083 u
.ll
= helper_ld_asi(addr
, asi
, size
, 0);
2084 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2085 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2088 u
.ll
= helper_ld_asi(addr
, asi
, 8, 0);
2089 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2090 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2091 u
.ll
= helper_ld_asi(addr
+ 8, asi
, 8, 0);
2092 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.upper
;
2093 *((uint32_t *)&env
->fpr
[rd
++]) = u
.l
.lower
;
2098 void helper_stf_asi(target_ulong addr
, int asi
, int size
, int rd
)
2101 target_ulong val
= 0;
2104 helper_check_align(addr
, 3);
2105 addr
= asi_address_mask(env
, asi
, addr
);
2108 case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
2109 case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
2110 case 0xf0: /* UA2007/JPS1 Block store primary */
2111 case 0xf1: /* UA2007/JPS1 Block store secondary */
2112 case 0xf8: /* UA2007/JPS1 Block store primary LE */
2113 case 0xf9: /* UA2007/JPS1 Block store secondary LE */
2115 helper_raise_exception(env
, TT_ILL_INSN
);
2118 helper_check_align(addr
, 0x3f);
2119 for (i
= 0; i
< 16; i
++) {
2120 val
= *(uint32_t *)&env
->fpr
[rd
++];
2121 helper_st_asi(addr
, val
, asi
& 0x8f, 4);
2126 case 0x16: /* UA2007 Block load primary, user privilege */
2127 case 0x17: /* UA2007 Block load secondary, user privilege */
2128 case 0x1e: /* UA2007 Block load primary LE, user privilege */
2129 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
2130 case 0x70: /* JPS1 Block store primary, user privilege */
2131 case 0x71: /* JPS1 Block store secondary, user privilege */
2132 case 0x78: /* JPS1 Block load primary LE, user privilege */
2133 case 0x79: /* JPS1 Block load secondary LE, user privilege */
2135 helper_raise_exception(env
, TT_ILL_INSN
);
2138 helper_check_align(addr
, 0x3f);
2139 for (i
= 0; i
< 16; i
++) {
2140 val
= *(uint32_t *)&env
->fpr
[rd
++];
2141 helper_st_asi(addr
, val
, asi
& 0x19, 4);
2153 helper_st_asi(addr
, *(uint32_t *)&env
->fpr
[rd
], asi
, size
);
2156 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2157 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2158 helper_st_asi(addr
, u
.ll
, asi
, size
);
2161 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2162 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2163 helper_st_asi(addr
, u
.ll
, asi
, 8);
2164 u
.l
.upper
= *(uint32_t *)&env
->fpr
[rd
++];
2165 u
.l
.lower
= *(uint32_t *)&env
->fpr
[rd
++];
2166 helper_st_asi(addr
+ 8, u
.ll
, asi
, 8);
2171 target_ulong
helper_cas_asi(target_ulong addr
, target_ulong val1
,
2172 target_ulong val2
, uint32_t asi
)
2176 val2
&= 0xffffffffUL
;
2177 ret
= helper_ld_asi(addr
, asi
, 4, 0);
2178 ret
&= 0xffffffffUL
;
2180 helper_st_asi(addr
, val1
& 0xffffffffUL
, asi
, 4);
2185 target_ulong
helper_casx_asi(target_ulong addr
, target_ulong val1
,
2186 target_ulong val2
, uint32_t asi
)
2190 ret
= helper_ld_asi(addr
, asi
, 8, 0);
2192 helper_st_asi(addr
, val1
, asi
, 8);
2196 #endif /* TARGET_SPARC64 */
2198 void helper_stdf(target_ulong addr
, int mem_idx
)
2200 helper_check_align(addr
, 7);
2201 #if !defined(CONFIG_USER_ONLY)
2204 stfq_user(addr
, DT0
);
2206 case MMU_KERNEL_IDX
:
2207 stfq_kernel(addr
, DT0
);
2209 #ifdef TARGET_SPARC64
2211 stfq_hypv(addr
, DT0
);
2215 DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx
);
2219 stfq_raw(address_mask(env
, addr
), DT0
);
2223 void helper_lddf(target_ulong addr
, int mem_idx
)
2225 helper_check_align(addr
, 7);
2226 #if !defined(CONFIG_USER_ONLY)
2229 DT0
= ldfq_user(addr
);
2231 case MMU_KERNEL_IDX
:
2232 DT0
= ldfq_kernel(addr
);
2234 #ifdef TARGET_SPARC64
2236 DT0
= ldfq_hypv(addr
);
2240 DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx
);
2244 DT0
= ldfq_raw(address_mask(env
, addr
));
2248 void helper_ldqf(target_ulong addr
, int mem_idx
)
2250 /* XXX add 128 bit load */
2253 helper_check_align(addr
, 7);
2254 #if !defined(CONFIG_USER_ONLY)
2257 u
.ll
.upper
= ldq_user(addr
);
2258 u
.ll
.lower
= ldq_user(addr
+ 8);
2261 case MMU_KERNEL_IDX
:
2262 u
.ll
.upper
= ldq_kernel(addr
);
2263 u
.ll
.lower
= ldq_kernel(addr
+ 8);
2266 #ifdef TARGET_SPARC64
2268 u
.ll
.upper
= ldq_hypv(addr
);
2269 u
.ll
.lower
= ldq_hypv(addr
+ 8);
2274 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx
);
2278 u
.ll
.upper
= ldq_raw(address_mask(env
, addr
));
2279 u
.ll
.lower
= ldq_raw(address_mask(env
, addr
+ 8));
2284 void helper_stqf(target_ulong addr
, int mem_idx
)
2286 /* XXX add 128 bit store */
2289 helper_check_align(addr
, 7);
2290 #if !defined(CONFIG_USER_ONLY)
2294 stq_user(addr
, u
.ll
.upper
);
2295 stq_user(addr
+ 8, u
.ll
.lower
);
2297 case MMU_KERNEL_IDX
:
2299 stq_kernel(addr
, u
.ll
.upper
);
2300 stq_kernel(addr
+ 8, u
.ll
.lower
);
2302 #ifdef TARGET_SPARC64
2305 stq_hypv(addr
, u
.ll
.upper
);
2306 stq_hypv(addr
+ 8, u
.ll
.lower
);
2310 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx
);
2315 stq_raw(address_mask(env
, addr
), u
.ll
.upper
);
2316 stq_raw(address_mask(env
, addr
+ 8), u
.ll
.lower
);
2320 #if !defined(CONFIG_USER_ONLY)
2322 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
2325 #define MMUSUFFIX _mmu
2326 #define ALIGNED_ONLY
2329 #include "softmmu_template.h"
2332 #include "softmmu_template.h"
2335 #include "softmmu_template.h"
2338 #include "softmmu_template.h"
2340 /* XXX: make it generic ? */
2341 static void cpu_restore_state2(void *retaddr
)
2343 TranslationBlock
*tb
;
2347 /* now we have a real cpu fault */
2348 pc
= (unsigned long)retaddr
;
2349 tb
= tb_find_pc(pc
);
2351 /* the PC is inside the translated code. It means that we have
2352 a virtual CPU fault */
2353 cpu_restore_state(tb
, env
, pc
);
2358 static void do_unaligned_access(target_ulong addr
, int is_write
, int is_user
,
2361 #ifdef DEBUG_UNALIGNED
2362 printf("Unaligned access to 0x" TARGET_FMT_lx
" from 0x" TARGET_FMT_lx
2363 "\n", addr
, env
->pc
);
2365 cpu_restore_state2(retaddr
);
2366 helper_raise_exception(env
, TT_UNALIGNED
);
2369 /* try to fill the TLB and return an exception if error. If retaddr is
2370 NULL, it means that the function was called in C code (i.e. not
2371 from generated code or from helper.c) */
2372 /* XXX: fix it to restore all registers */
2373 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
2377 CPUState
*saved_env
;
2382 ret
= cpu_sparc_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
2384 cpu_restore_state2(retaddr
);
2390 #endif /* !CONFIG_USER_ONLY */
2392 #ifndef TARGET_SPARC64
2393 #if !defined(CONFIG_USER_ONLY)
2394 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
2395 int is_exec
, int is_asi
, int size
)
2399 #ifdef DEBUG_UNASSIGNED
2401 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2402 " asi 0x%02x from " TARGET_FMT_lx
"\n",
2403 is_exec
? "exec" : is_write
? "write" : "read", size
,
2404 size
== 1 ? "" : "s", addr
, is_asi
, env
->pc
);
2406 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
2407 " from " TARGET_FMT_lx
"\n",
2408 is_exec
? "exec" : is_write
? "write" : "read", size
,
2409 size
== 1 ? "" : "s", addr
, env
->pc
);
2412 /* Don't overwrite translation and access faults */
2413 fault_type
= (env
->mmuregs
[3] & 0x1c) >> 2;
2414 if ((fault_type
> 4) || (fault_type
== 0)) {
2415 env
->mmuregs
[3] = 0; /* Fault status register */
2417 env
->mmuregs
[3] |= 1 << 16;
2420 env
->mmuregs
[3] |= 1 << 5;
2423 env
->mmuregs
[3] |= 1 << 6;
2426 env
->mmuregs
[3] |= 1 << 7;
2428 env
->mmuregs
[3] |= (5 << 2) | 2;
2429 /* SuperSPARC will never place instruction fault addresses in the FAR */
2431 env
->mmuregs
[4] = addr
; /* Fault address register */
2434 /* overflow (same type fault was not read before another fault) */
2435 if (fault_type
== ((env
->mmuregs
[3] & 0x1c)) >> 2) {
2436 env
->mmuregs
[3] |= 1;
2439 if ((env
->mmuregs
[0] & MMU_E
) && !(env
->mmuregs
[0] & MMU_NF
)) {
2441 helper_raise_exception(env
, TT_CODE_ACCESS
);
2443 helper_raise_exception(env
, TT_DATA_ACCESS
);
2447 /* flush neverland mappings created during no-fault mode,
2448 so the sequential MMU faults report proper fault types */
2449 if (env
->mmuregs
[0] & MMU_NF
) {
2455 #if defined(CONFIG_USER_ONLY)
2456 static void do_unassigned_access(target_ulong addr
, int is_write
, int is_exec
,
2457 int is_asi
, int size
)
2459 static void do_unassigned_access(target_phys_addr_t addr
, int is_write
,
2460 int is_exec
, int is_asi
, int size
)
2463 #ifdef DEBUG_UNASSIGNED
2464 printf("Unassigned mem access to " TARGET_FMT_plx
" from " TARGET_FMT_lx
2465 "\n", addr
, env
->pc
);
2469 helper_raise_exception(env
, TT_CODE_ACCESS
);
2471 helper_raise_exception(env
, TT_DATA_ACCESS
);
2476 #if !defined(CONFIG_USER_ONLY)
2477 void cpu_unassigned_access(CPUState
*env1
, target_phys_addr_t addr
,
2478 int is_write
, int is_exec
, int is_asi
, int size
)
2480 CPUState
*saved_env
;
2484 /* Ignore unassigned accesses outside of CPU context */
2486 do_unassigned_access(addr
, is_write
, is_exec
, is_asi
, size
);