]> git.proxmox.com Git - qemu.git/blob - target-sparc/op_helper.c
slirp: Fix restricted mode
[qemu.git] / target-sparc / op_helper.c
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #include "sysemu.h"
5
6 //#define DEBUG_MMU
7 //#define DEBUG_MXCC
8 //#define DEBUG_UNALIGNED
9 //#define DEBUG_UNASSIGNED
10 //#define DEBUG_ASI
11 //#define DEBUG_PCALL
12 //#define DEBUG_PSTATE
13 //#define DEBUG_CACHE_CONTROL
14
15 #ifdef DEBUG_MMU
16 #define DPRINTF_MMU(fmt, ...) \
17 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
18 #else
19 #define DPRINTF_MMU(fmt, ...) do {} while (0)
20 #endif
21
22 #ifdef DEBUG_MXCC
23 #define DPRINTF_MXCC(fmt, ...) \
24 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
25 #else
26 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
27 #endif
28
29 #ifdef DEBUG_ASI
30 #define DPRINTF_ASI(fmt, ...) \
31 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
32 #endif
33
34 #ifdef DEBUG_PSTATE
35 #define DPRINTF_PSTATE(fmt, ...) \
36 do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF_PSTATE(fmt, ...) do {} while (0)
39 #endif
40
41 #ifdef DEBUG_CACHE_CONTROL
42 #define DPRINTF_CACHE_CONTROL(fmt, ...) \
43 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
44 #else
45 #define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
46 #endif
47
48 #ifdef TARGET_SPARC64
49 #ifndef TARGET_ABI32
50 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
51 #else
52 #define AM_CHECK(env1) (1)
53 #endif
54 #endif
55
56 #define DT0 (env->dt0)
57 #define DT1 (env->dt1)
58 #define QT0 (env->qt0)
59 #define QT1 (env->qt1)
60
61 /* Leon3 cache control */
62
63 /* Cache control: emulate the behavior of cache control registers but without
64 any effect on the emulated */
65
66 #define CACHE_STATE_MASK 0x3
67 #define CACHE_DISABLED 0x0
68 #define CACHE_FROZEN 0x1
69 #define CACHE_ENABLED 0x3
70
71 /* Cache Control register fields */
72
73 #define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */
74 #define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */
75 #define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */
76 #define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */
77 #define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */
78 #define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */
79 #define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
80 #define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
81
82 #if !defined(CONFIG_USER_ONLY)
83 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
84 int is_exec, int is_asi, int size);
85 #else
86 #ifdef TARGET_SPARC64
87 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
88 int is_asi, int size);
89 #endif
90 #endif
91
92 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
93 // Calculates TSB pointer value for fault page size 8k or 64k
94 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
95 uint64_t tag_access_register,
96 int page_size)
97 {
98 uint64_t tsb_base = tsb_register & ~0x1fffULL;
99 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
100 int tsb_size = tsb_register & 0xf;
101
102 // discard lower 13 bits which hold tag access context
103 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
104
105 // now reorder bits
106 uint64_t tsb_base_mask = ~0x1fffULL;
107 uint64_t va = tag_access_va;
108
109 // move va bits to correct position
110 if (page_size == 8*1024) {
111 va >>= 9;
112 } else if (page_size == 64*1024) {
113 va >>= 12;
114 }
115
116 if (tsb_size) {
117 tsb_base_mask <<= tsb_size;
118 }
119
120 // calculate tsb_base mask and adjust va if split is in use
121 if (tsb_split) {
122 if (page_size == 8*1024) {
123 va &= ~(1ULL << (13 + tsb_size));
124 } else if (page_size == 64*1024) {
125 va |= (1ULL << (13 + tsb_size));
126 }
127 tsb_base_mask <<= 1;
128 }
129
130 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
131 }
132
133 // Calculates tag target register value by reordering bits
134 // in tag access register
135 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
136 {
137 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
138 }
139
140 static void replace_tlb_entry(SparcTLBEntry *tlb,
141 uint64_t tlb_tag, uint64_t tlb_tte,
142 CPUState *env1)
143 {
144 target_ulong mask, size, va, offset;
145
146 // flush page range if translation is valid
147 if (TTE_IS_VALID(tlb->tte)) {
148
149 mask = 0xffffffffffffe000ULL;
150 mask <<= 3 * ((tlb->tte >> 61) & 3);
151 size = ~mask + 1;
152
153 va = tlb->tag & mask;
154
155 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
156 tlb_flush_page(env1, va + offset);
157 }
158 }
159
160 tlb->tag = tlb_tag;
161 tlb->tte = tlb_tte;
162 }
163
164 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
165 const char* strmmu, CPUState *env1)
166 {
167 unsigned int i;
168 target_ulong mask;
169 uint64_t context;
170
171 int is_demap_context = (demap_addr >> 6) & 1;
172
173 // demap context
174 switch ((demap_addr >> 4) & 3) {
175 case 0: // primary
176 context = env1->dmmu.mmu_primary_context;
177 break;
178 case 1: // secondary
179 context = env1->dmmu.mmu_secondary_context;
180 break;
181 case 2: // nucleus
182 context = 0;
183 break;
184 case 3: // reserved
185 default:
186 return;
187 }
188
189 for (i = 0; i < 64; i++) {
190 if (TTE_IS_VALID(tlb[i].tte)) {
191
192 if (is_demap_context) {
193 // will remove non-global entries matching context value
194 if (TTE_IS_GLOBAL(tlb[i].tte) ||
195 !tlb_compare_context(&tlb[i], context)) {
196 continue;
197 }
198 } else {
199 // demap page
200 // will remove any entry matching VA
201 mask = 0xffffffffffffe000ULL;
202 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
203
204 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
205 continue;
206 }
207
208 // entry should be global or matching context value
209 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
210 !tlb_compare_context(&tlb[i], context)) {
211 continue;
212 }
213 }
214
215 replace_tlb_entry(&tlb[i], 0, 0, env1);
216 #ifdef DEBUG_MMU
217 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
218 dump_mmu(stdout, fprintf, env1);
219 #endif
220 }
221 }
222 }
223
224 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
225 uint64_t tlb_tag, uint64_t tlb_tte,
226 const char* strmmu, CPUState *env1)
227 {
228 unsigned int i, replace_used;
229
230 // Try replacing invalid entry
231 for (i = 0; i < 64; i++) {
232 if (!TTE_IS_VALID(tlb[i].tte)) {
233 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
234 #ifdef DEBUG_MMU
235 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
236 dump_mmu(stdout, fprintf, env1);
237 #endif
238 return;
239 }
240 }
241
242 // All entries are valid, try replacing unlocked entry
243
244 for (replace_used = 0; replace_used < 2; ++replace_used) {
245
246 // Used entries are not replaced on first pass
247
248 for (i = 0; i < 64; i++) {
249 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
250
251 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
252 #ifdef DEBUG_MMU
253 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
254 strmmu, (replace_used?"used":"unused"), i);
255 dump_mmu(stdout, fprintf, env1);
256 #endif
257 return;
258 }
259 }
260
261 // Now reset used bit and search for unused entries again
262
263 for (i = 0; i < 64; i++) {
264 TTE_SET_UNUSED(tlb[i].tte);
265 }
266 }
267
268 #ifdef DEBUG_MMU
269 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
270 #endif
271 // error state?
272 }
273
274 #endif
275
276 static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
277 {
278 #ifdef TARGET_SPARC64
279 if (AM_CHECK(env1))
280 addr &= 0xffffffffULL;
281 #endif
282 return addr;
283 }
284
285 /* returns true if access using this ASI is to have address translated by MMU
286 otherwise access is to raw physical address */
287 static inline int is_translating_asi(int asi)
288 {
289 #ifdef TARGET_SPARC64
290 /* Ultrasparc IIi translating asi
291 - note this list is defined by cpu implementation
292 */
293 switch (asi) {
294 case 0x04 ... 0x11:
295 case 0x16 ... 0x19:
296 case 0x1E ... 0x1F:
297 case 0x24 ... 0x2C:
298 case 0x70 ... 0x73:
299 case 0x78 ... 0x79:
300 case 0x80 ... 0xFF:
301 return 1;
302
303 default:
304 return 0;
305 }
306 #else
307 /* TODO: check sparc32 bits */
308 return 0;
309 #endif
310 }
311
312 static inline target_ulong asi_address_mask(CPUState *env1,
313 int asi, target_ulong addr)
314 {
315 if (is_translating_asi(asi)) {
316 return address_mask(env, addr);
317 } else {
318 return addr;
319 }
320 }
321
322 static void raise_exception(int tt)
323 {
324 env->exception_index = tt;
325 cpu_loop_exit(env);
326 }
327
328 void HELPER(raise_exception)(int tt)
329 {
330 raise_exception(tt);
331 }
332
333 void helper_shutdown(void)
334 {
335 #if !defined(CONFIG_USER_ONLY)
336 qemu_system_shutdown_request();
337 #endif
338 }
339
340 void helper_check_align(target_ulong addr, uint32_t align)
341 {
342 if (addr & align) {
343 #ifdef DEBUG_UNALIGNED
344 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
345 "\n", addr, env->pc);
346 #endif
347 raise_exception(TT_UNALIGNED);
348 }
349 }
350
351 #define F_HELPER(name, p) void helper_f##name##p(void)
352
353 #define F_BINOP(name) \
354 float32 helper_f ## name ## s (float32 src1, float32 src2) \
355 { \
356 return float32_ ## name (src1, src2, &env->fp_status); \
357 } \
358 F_HELPER(name, d) \
359 { \
360 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
361 } \
362 F_HELPER(name, q) \
363 { \
364 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
365 }
366
367 F_BINOP(add);
368 F_BINOP(sub);
369 F_BINOP(mul);
370 F_BINOP(div);
371 #undef F_BINOP
372
373 void helper_fsmuld(float32 src1, float32 src2)
374 {
375 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
376 float32_to_float64(src2, &env->fp_status),
377 &env->fp_status);
378 }
379
380 void helper_fdmulq(void)
381 {
382 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
383 float64_to_float128(DT1, &env->fp_status),
384 &env->fp_status);
385 }
386
387 float32 helper_fnegs(float32 src)
388 {
389 return float32_chs(src);
390 }
391
392 #ifdef TARGET_SPARC64
393 F_HELPER(neg, d)
394 {
395 DT0 = float64_chs(DT1);
396 }
397
398 F_HELPER(neg, q)
399 {
400 QT0 = float128_chs(QT1);
401 }
402 #endif
403
404 /* Integer to float conversion. */
405 float32 helper_fitos(int32_t src)
406 {
407 return int32_to_float32(src, &env->fp_status);
408 }
409
410 void helper_fitod(int32_t src)
411 {
412 DT0 = int32_to_float64(src, &env->fp_status);
413 }
414
415 void helper_fitoq(int32_t src)
416 {
417 QT0 = int32_to_float128(src, &env->fp_status);
418 }
419
420 #ifdef TARGET_SPARC64
421 float32 helper_fxtos(void)
422 {
423 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
424 }
425
426 F_HELPER(xto, d)
427 {
428 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
429 }
430
431 F_HELPER(xto, q)
432 {
433 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
434 }
435 #endif
436 #undef F_HELPER
437
438 /* floating point conversion */
439 float32 helper_fdtos(void)
440 {
441 return float64_to_float32(DT1, &env->fp_status);
442 }
443
444 void helper_fstod(float32 src)
445 {
446 DT0 = float32_to_float64(src, &env->fp_status);
447 }
448
449 float32 helper_fqtos(void)
450 {
451 return float128_to_float32(QT1, &env->fp_status);
452 }
453
454 void helper_fstoq(float32 src)
455 {
456 QT0 = float32_to_float128(src, &env->fp_status);
457 }
458
459 void helper_fqtod(void)
460 {
461 DT0 = float128_to_float64(QT1, &env->fp_status);
462 }
463
464 void helper_fdtoq(void)
465 {
466 QT0 = float64_to_float128(DT1, &env->fp_status);
467 }
468
469 /* Float to integer conversion. */
470 int32_t helper_fstoi(float32 src)
471 {
472 return float32_to_int32_round_to_zero(src, &env->fp_status);
473 }
474
475 int32_t helper_fdtoi(void)
476 {
477 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
478 }
479
480 int32_t helper_fqtoi(void)
481 {
482 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
483 }
484
485 #ifdef TARGET_SPARC64
486 void helper_fstox(float32 src)
487 {
488 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
489 }
490
491 void helper_fdtox(void)
492 {
493 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
494 }
495
496 void helper_fqtox(void)
497 {
498 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
499 }
500
501 void helper_faligndata(void)
502 {
503 uint64_t tmp;
504
505 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
506 /* on many architectures a shift of 64 does nothing */
507 if ((env->gsr & 7) != 0) {
508 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
509 }
510 *((uint64_t *)&DT0) = tmp;
511 }
512
513 #ifdef HOST_WORDS_BIGENDIAN
514 #define VIS_B64(n) b[7 - (n)]
515 #define VIS_W64(n) w[3 - (n)]
516 #define VIS_SW64(n) sw[3 - (n)]
517 #define VIS_L64(n) l[1 - (n)]
518 #define VIS_B32(n) b[3 - (n)]
519 #define VIS_W32(n) w[1 - (n)]
520 #else
521 #define VIS_B64(n) b[n]
522 #define VIS_W64(n) w[n]
523 #define VIS_SW64(n) sw[n]
524 #define VIS_L64(n) l[n]
525 #define VIS_B32(n) b[n]
526 #define VIS_W32(n) w[n]
527 #endif
528
529 typedef union {
530 uint8_t b[8];
531 uint16_t w[4];
532 int16_t sw[4];
533 uint32_t l[2];
534 uint64_t ll;
535 float64 d;
536 } vis64;
537
538 typedef union {
539 uint8_t b[4];
540 uint16_t w[2];
541 uint32_t l;
542 float32 f;
543 } vis32;
544
545 void helper_fpmerge(void)
546 {
547 vis64 s, d;
548
549 s.d = DT0;
550 d.d = DT1;
551
552 // Reverse calculation order to handle overlap
553 d.VIS_B64(7) = s.VIS_B64(3);
554 d.VIS_B64(6) = d.VIS_B64(3);
555 d.VIS_B64(5) = s.VIS_B64(2);
556 d.VIS_B64(4) = d.VIS_B64(2);
557 d.VIS_B64(3) = s.VIS_B64(1);
558 d.VIS_B64(2) = d.VIS_B64(1);
559 d.VIS_B64(1) = s.VIS_B64(0);
560 //d.VIS_B64(0) = d.VIS_B64(0);
561
562 DT0 = d.d;
563 }
564
565 void helper_fmul8x16(void)
566 {
567 vis64 s, d;
568 uint32_t tmp;
569
570 s.d = DT0;
571 d.d = DT1;
572
573 #define PMUL(r) \
574 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
575 if ((tmp & 0xff) > 0x7f) \
576 tmp += 0x100; \
577 d.VIS_W64(r) = tmp >> 8;
578
579 PMUL(0);
580 PMUL(1);
581 PMUL(2);
582 PMUL(3);
583 #undef PMUL
584
585 DT0 = d.d;
586 }
587
588 void helper_fmul8x16al(void)
589 {
590 vis64 s, d;
591 uint32_t tmp;
592
593 s.d = DT0;
594 d.d = DT1;
595
596 #define PMUL(r) \
597 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
598 if ((tmp & 0xff) > 0x7f) \
599 tmp += 0x100; \
600 d.VIS_W64(r) = tmp >> 8;
601
602 PMUL(0);
603 PMUL(1);
604 PMUL(2);
605 PMUL(3);
606 #undef PMUL
607
608 DT0 = d.d;
609 }
610
611 void helper_fmul8x16au(void)
612 {
613 vis64 s, d;
614 uint32_t tmp;
615
616 s.d = DT0;
617 d.d = DT1;
618
619 #define PMUL(r) \
620 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
621 if ((tmp & 0xff) > 0x7f) \
622 tmp += 0x100; \
623 d.VIS_W64(r) = tmp >> 8;
624
625 PMUL(0);
626 PMUL(1);
627 PMUL(2);
628 PMUL(3);
629 #undef PMUL
630
631 DT0 = d.d;
632 }
633
634 void helper_fmul8sux16(void)
635 {
636 vis64 s, d;
637 uint32_t tmp;
638
639 s.d = DT0;
640 d.d = DT1;
641
642 #define PMUL(r) \
643 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
644 if ((tmp & 0xff) > 0x7f) \
645 tmp += 0x100; \
646 d.VIS_W64(r) = tmp >> 8;
647
648 PMUL(0);
649 PMUL(1);
650 PMUL(2);
651 PMUL(3);
652 #undef PMUL
653
654 DT0 = d.d;
655 }
656
657 void helper_fmul8ulx16(void)
658 {
659 vis64 s, d;
660 uint32_t tmp;
661
662 s.d = DT0;
663 d.d = DT1;
664
665 #define PMUL(r) \
666 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
667 if ((tmp & 0xff) > 0x7f) \
668 tmp += 0x100; \
669 d.VIS_W64(r) = tmp >> 8;
670
671 PMUL(0);
672 PMUL(1);
673 PMUL(2);
674 PMUL(3);
675 #undef PMUL
676
677 DT0 = d.d;
678 }
679
680 void helper_fmuld8sux16(void)
681 {
682 vis64 s, d;
683 uint32_t tmp;
684
685 s.d = DT0;
686 d.d = DT1;
687
688 #define PMUL(r) \
689 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
690 if ((tmp & 0xff) > 0x7f) \
691 tmp += 0x100; \
692 d.VIS_L64(r) = tmp;
693
694 // Reverse calculation order to handle overlap
695 PMUL(1);
696 PMUL(0);
697 #undef PMUL
698
699 DT0 = d.d;
700 }
701
702 void helper_fmuld8ulx16(void)
703 {
704 vis64 s, d;
705 uint32_t tmp;
706
707 s.d = DT0;
708 d.d = DT1;
709
710 #define PMUL(r) \
711 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
712 if ((tmp & 0xff) > 0x7f) \
713 tmp += 0x100; \
714 d.VIS_L64(r) = tmp;
715
716 // Reverse calculation order to handle overlap
717 PMUL(1);
718 PMUL(0);
719 #undef PMUL
720
721 DT0 = d.d;
722 }
723
724 void helper_fexpand(void)
725 {
726 vis32 s;
727 vis64 d;
728
729 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
730 d.d = DT1;
731 d.VIS_W64(0) = s.VIS_B32(0) << 4;
732 d.VIS_W64(1) = s.VIS_B32(1) << 4;
733 d.VIS_W64(2) = s.VIS_B32(2) << 4;
734 d.VIS_W64(3) = s.VIS_B32(3) << 4;
735
736 DT0 = d.d;
737 }
738
739 #define VIS_HELPER(name, F) \
740 void name##16(void) \
741 { \
742 vis64 s, d; \
743 \
744 s.d = DT0; \
745 d.d = DT1; \
746 \
747 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
748 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
749 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
750 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
751 \
752 DT0 = d.d; \
753 } \
754 \
755 uint32_t name##16s(uint32_t src1, uint32_t src2) \
756 { \
757 vis32 s, d; \
758 \
759 s.l = src1; \
760 d.l = src2; \
761 \
762 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
763 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
764 \
765 return d.l; \
766 } \
767 \
768 void name##32(void) \
769 { \
770 vis64 s, d; \
771 \
772 s.d = DT0; \
773 d.d = DT1; \
774 \
775 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
776 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
777 \
778 DT0 = d.d; \
779 } \
780 \
781 uint32_t name##32s(uint32_t src1, uint32_t src2) \
782 { \
783 vis32 s, d; \
784 \
785 s.l = src1; \
786 d.l = src2; \
787 \
788 d.l = F(d.l, s.l); \
789 \
790 return d.l; \
791 }
792
793 #define FADD(a, b) ((a) + (b))
794 #define FSUB(a, b) ((a) - (b))
795 VIS_HELPER(helper_fpadd, FADD)
796 VIS_HELPER(helper_fpsub, FSUB)
797
798 #define VIS_CMPHELPER(name, F) \
799 uint64_t name##16(void) \
800 { \
801 vis64 s, d; \
802 \
803 s.d = DT0; \
804 d.d = DT1; \
805 \
806 d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \
807 d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \
808 d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \
809 d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \
810 d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \
811 \
812 return d.ll; \
813 } \
814 \
815 uint64_t name##32(void) \
816 { \
817 vis64 s, d; \
818 \
819 s.d = DT0; \
820 d.d = DT1; \
821 \
822 d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \
823 d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \
824 d.VIS_L64(1) = 0; \
825 \
826 return d.ll; \
827 }
828
829 #define FCMPGT(a, b) ((a) > (b))
830 #define FCMPEQ(a, b) ((a) == (b))
831 #define FCMPLE(a, b) ((a) <= (b))
832 #define FCMPNE(a, b) ((a) != (b))
833
834 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
835 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
836 VIS_CMPHELPER(helper_fcmple, FCMPLE)
837 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
838 #endif
839
840 void helper_check_ieee_exceptions(void)
841 {
842 target_ulong status;
843
844 status = get_float_exception_flags(&env->fp_status);
845 if (status) {
846 /* Copy IEEE 754 flags into FSR */
847 if (status & float_flag_invalid)
848 env->fsr |= FSR_NVC;
849 if (status & float_flag_overflow)
850 env->fsr |= FSR_OFC;
851 if (status & float_flag_underflow)
852 env->fsr |= FSR_UFC;
853 if (status & float_flag_divbyzero)
854 env->fsr |= FSR_DZC;
855 if (status & float_flag_inexact)
856 env->fsr |= FSR_NXC;
857
858 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
859 /* Unmasked exception, generate a trap */
860 env->fsr |= FSR_FTT_IEEE_EXCP;
861 raise_exception(TT_FP_EXCP);
862 } else {
863 /* Accumulate exceptions */
864 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
865 }
866 }
867 }
868
869 void helper_clear_float_exceptions(void)
870 {
871 set_float_exception_flags(0, &env->fp_status);
872 }
873
874 float32 helper_fabss(float32 src)
875 {
876 return float32_abs(src);
877 }
878
879 #ifdef TARGET_SPARC64
880 void helper_fabsd(void)
881 {
882 DT0 = float64_abs(DT1);
883 }
884
885 void helper_fabsq(void)
886 {
887 QT0 = float128_abs(QT1);
888 }
889 #endif
890
891 float32 helper_fsqrts(float32 src)
892 {
893 return float32_sqrt(src, &env->fp_status);
894 }
895
896 void helper_fsqrtd(void)
897 {
898 DT0 = float64_sqrt(DT1, &env->fp_status);
899 }
900
901 void helper_fsqrtq(void)
902 {
903 QT0 = float128_sqrt(QT1, &env->fp_status);
904 }
905
906 #define GEN_FCMP(name, size, reg1, reg2, FS, E) \
907 void glue(helper_, name) (void) \
908 { \
909 env->fsr &= FSR_FTT_NMASK; \
910 if (E && (glue(size, _is_any_nan)(reg1) || \
911 glue(size, _is_any_nan)(reg2)) && \
912 (env->fsr & FSR_NVM)) { \
913 env->fsr |= FSR_NVC; \
914 env->fsr |= FSR_FTT_IEEE_EXCP; \
915 raise_exception(TT_FP_EXCP); \
916 } \
917 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
918 case float_relation_unordered: \
919 if ((env->fsr & FSR_NVM)) { \
920 env->fsr |= FSR_NVC; \
921 env->fsr |= FSR_FTT_IEEE_EXCP; \
922 raise_exception(TT_FP_EXCP); \
923 } else { \
924 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
925 env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
926 env->fsr |= FSR_NVA; \
927 } \
928 break; \
929 case float_relation_less: \
930 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
931 env->fsr |= FSR_FCC0 << FS; \
932 break; \
933 case float_relation_greater: \
934 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
935 env->fsr |= FSR_FCC1 << FS; \
936 break; \
937 default: \
938 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
939 break; \
940 } \
941 }
942 #define GEN_FCMPS(name, size, FS, E) \
943 void glue(helper_, name)(float32 src1, float32 src2) \
944 { \
945 env->fsr &= FSR_FTT_NMASK; \
946 if (E && (glue(size, _is_any_nan)(src1) || \
947 glue(size, _is_any_nan)(src2)) && \
948 (env->fsr & FSR_NVM)) { \
949 env->fsr |= FSR_NVC; \
950 env->fsr |= FSR_FTT_IEEE_EXCP; \
951 raise_exception(TT_FP_EXCP); \
952 } \
953 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
954 case float_relation_unordered: \
955 if ((env->fsr & FSR_NVM)) { \
956 env->fsr |= FSR_NVC; \
957 env->fsr |= FSR_FTT_IEEE_EXCP; \
958 raise_exception(TT_FP_EXCP); \
959 } else { \
960 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
961 env->fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
962 env->fsr |= FSR_NVA; \
963 } \
964 break; \
965 case float_relation_less: \
966 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
967 env->fsr |= FSR_FCC0 << FS; \
968 break; \
969 case float_relation_greater: \
970 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
971 env->fsr |= FSR_FCC1 << FS; \
972 break; \
973 default: \
974 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
975 break; \
976 } \
977 }
978
979 GEN_FCMPS(fcmps, float32, 0, 0);
980 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
981
982 GEN_FCMPS(fcmpes, float32, 0, 1);
983 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
984
985 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
986 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
987
988 static uint32_t compute_all_flags(void)
989 {
990 return env->psr & PSR_ICC;
991 }
992
993 static uint32_t compute_C_flags(void)
994 {
995 return env->psr & PSR_CARRY;
996 }
997
998 static inline uint32_t get_NZ_icc(int32_t dst)
999 {
1000 uint32_t ret = 0;
1001
1002 if (dst == 0) {
1003 ret = PSR_ZERO;
1004 } else if (dst < 0) {
1005 ret = PSR_NEG;
1006 }
1007 return ret;
1008 }
1009
1010 #ifdef TARGET_SPARC64
1011 static uint32_t compute_all_flags_xcc(void)
1012 {
1013 return env->xcc & PSR_ICC;
1014 }
1015
1016 static uint32_t compute_C_flags_xcc(void)
1017 {
1018 return env->xcc & PSR_CARRY;
1019 }
1020
1021 static inline uint32_t get_NZ_xcc(target_long dst)
1022 {
1023 uint32_t ret = 0;
1024
1025 if (!dst) {
1026 ret = PSR_ZERO;
1027 } else if (dst < 0) {
1028 ret = PSR_NEG;
1029 }
1030 return ret;
1031 }
1032 #endif
1033
1034 static inline uint32_t get_V_div_icc(target_ulong src2)
1035 {
1036 uint32_t ret = 0;
1037
1038 if (src2 != 0) {
1039 ret = PSR_OVF;
1040 }
1041 return ret;
1042 }
1043
1044 static uint32_t compute_all_div(void)
1045 {
1046 uint32_t ret;
1047
1048 ret = get_NZ_icc(CC_DST);
1049 ret |= get_V_div_icc(CC_SRC2);
1050 return ret;
1051 }
1052
1053 static uint32_t compute_C_div(void)
1054 {
1055 return 0;
1056 }
1057
1058 static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
1059 {
1060 uint32_t ret = 0;
1061
1062 if (dst < src1) {
1063 ret = PSR_CARRY;
1064 }
1065 return ret;
1066 }
1067
1068 static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
1069 uint32_t src2)
1070 {
1071 uint32_t ret = 0;
1072
1073 if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
1074 ret = PSR_CARRY;
1075 }
1076 return ret;
1077 }
1078
1079 static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
1080 uint32_t src2)
1081 {
1082 uint32_t ret = 0;
1083
1084 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
1085 ret = PSR_OVF;
1086 }
1087 return ret;
1088 }
1089
1090 #ifdef TARGET_SPARC64
1091 static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
1092 {
1093 uint32_t ret = 0;
1094
1095 if (dst < src1) {
1096 ret = PSR_CARRY;
1097 }
1098 return ret;
1099 }
1100
1101 static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1102 target_ulong src2)
1103 {
1104 uint32_t ret = 0;
1105
1106 if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1107 ret = PSR_CARRY;
1108 }
1109 return ret;
1110 }
1111
1112 static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1113 target_ulong src2)
1114 {
1115 uint32_t ret = 0;
1116
1117 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1118 ret = PSR_OVF;
1119 }
1120 return ret;
1121 }
1122
1123 static uint32_t compute_all_add_xcc(void)
1124 {
1125 uint32_t ret;
1126
1127 ret = get_NZ_xcc(CC_DST);
1128 ret |= get_C_add_xcc(CC_DST, CC_SRC);
1129 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1130 return ret;
1131 }
1132
1133 static uint32_t compute_C_add_xcc(void)
1134 {
1135 return get_C_add_xcc(CC_DST, CC_SRC);
1136 }
1137 #endif
1138
1139 static uint32_t compute_all_add(void)
1140 {
1141 uint32_t ret;
1142
1143 ret = get_NZ_icc(CC_DST);
1144 ret |= get_C_add_icc(CC_DST, CC_SRC);
1145 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1146 return ret;
1147 }
1148
1149 static uint32_t compute_C_add(void)
1150 {
1151 return get_C_add_icc(CC_DST, CC_SRC);
1152 }
1153
1154 #ifdef TARGET_SPARC64
1155 static uint32_t compute_all_addx_xcc(void)
1156 {
1157 uint32_t ret;
1158
1159 ret = get_NZ_xcc(CC_DST);
1160 ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1161 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1162 return ret;
1163 }
1164
1165 static uint32_t compute_C_addx_xcc(void)
1166 {
1167 uint32_t ret;
1168
1169 ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1170 return ret;
1171 }
1172 #endif
1173
1174 static uint32_t compute_all_addx(void)
1175 {
1176 uint32_t ret;
1177
1178 ret = get_NZ_icc(CC_DST);
1179 ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1180 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1181 return ret;
1182 }
1183
1184 static uint32_t compute_C_addx(void)
1185 {
1186 uint32_t ret;
1187
1188 ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1189 return ret;
1190 }
1191
1192 static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1193 {
1194 uint32_t ret = 0;
1195
1196 if ((src1 | src2) & 0x3) {
1197 ret = PSR_OVF;
1198 }
1199 return ret;
1200 }
1201
1202 static uint32_t compute_all_tadd(void)
1203 {
1204 uint32_t ret;
1205
1206 ret = get_NZ_icc(CC_DST);
1207 ret |= get_C_add_icc(CC_DST, CC_SRC);
1208 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1209 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1210 return ret;
1211 }
1212
1213 static uint32_t compute_all_taddtv(void)
1214 {
1215 uint32_t ret;
1216
1217 ret = get_NZ_icc(CC_DST);
1218 ret |= get_C_add_icc(CC_DST, CC_SRC);
1219 return ret;
1220 }
1221
1222 static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1223 {
1224 uint32_t ret = 0;
1225
1226 if (src1 < src2) {
1227 ret = PSR_CARRY;
1228 }
1229 return ret;
1230 }
1231
1232 static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1233 uint32_t src2)
1234 {
1235 uint32_t ret = 0;
1236
1237 if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1238 ret = PSR_CARRY;
1239 }
1240 return ret;
1241 }
1242
1243 static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1244 uint32_t src2)
1245 {
1246 uint32_t ret = 0;
1247
1248 if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1249 ret = PSR_OVF;
1250 }
1251 return ret;
1252 }
1253
1254
1255 #ifdef TARGET_SPARC64
1256 static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1257 {
1258 uint32_t ret = 0;
1259
1260 if (src1 < src2) {
1261 ret = PSR_CARRY;
1262 }
1263 return ret;
1264 }
1265
1266 static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1267 target_ulong src2)
1268 {
1269 uint32_t ret = 0;
1270
1271 if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1272 ret = PSR_CARRY;
1273 }
1274 return ret;
1275 }
1276
1277 static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1278 target_ulong src2)
1279 {
1280 uint32_t ret = 0;
1281
1282 if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1283 ret = PSR_OVF;
1284 }
1285 return ret;
1286 }
1287
1288 static uint32_t compute_all_sub_xcc(void)
1289 {
1290 uint32_t ret;
1291
1292 ret = get_NZ_xcc(CC_DST);
1293 ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1294 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1295 return ret;
1296 }
1297
1298 static uint32_t compute_C_sub_xcc(void)
1299 {
1300 return get_C_sub_xcc(CC_SRC, CC_SRC2);
1301 }
1302 #endif
1303
1304 static uint32_t compute_all_sub(void)
1305 {
1306 uint32_t ret;
1307
1308 ret = get_NZ_icc(CC_DST);
1309 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1310 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1311 return ret;
1312 }
1313
1314 static uint32_t compute_C_sub(void)
1315 {
1316 return get_C_sub_icc(CC_SRC, CC_SRC2);
1317 }
1318
1319 #ifdef TARGET_SPARC64
1320 static uint32_t compute_all_subx_xcc(void)
1321 {
1322 uint32_t ret;
1323
1324 ret = get_NZ_xcc(CC_DST);
1325 ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1326 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1327 return ret;
1328 }
1329
1330 static uint32_t compute_C_subx_xcc(void)
1331 {
1332 uint32_t ret;
1333
1334 ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1335 return ret;
1336 }
1337 #endif
1338
1339 static uint32_t compute_all_subx(void)
1340 {
1341 uint32_t ret;
1342
1343 ret = get_NZ_icc(CC_DST);
1344 ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1345 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1346 return ret;
1347 }
1348
1349 static uint32_t compute_C_subx(void)
1350 {
1351 uint32_t ret;
1352
1353 ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1354 return ret;
1355 }
1356
1357 static uint32_t compute_all_tsub(void)
1358 {
1359 uint32_t ret;
1360
1361 ret = get_NZ_icc(CC_DST);
1362 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1363 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1364 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1365 return ret;
1366 }
1367
1368 static uint32_t compute_all_tsubtv(void)
1369 {
1370 uint32_t ret;
1371
1372 ret = get_NZ_icc(CC_DST);
1373 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1374 return ret;
1375 }
1376
1377 static uint32_t compute_all_logic(void)
1378 {
1379 return get_NZ_icc(CC_DST);
1380 }
1381
1382 static uint32_t compute_C_logic(void)
1383 {
1384 return 0;
1385 }
1386
1387 #ifdef TARGET_SPARC64
1388 static uint32_t compute_all_logic_xcc(void)
1389 {
1390 return get_NZ_xcc(CC_DST);
1391 }
1392 #endif
1393
1394 typedef struct CCTable {
1395 uint32_t (*compute_all)(void); /* return all the flags */
1396 uint32_t (*compute_c)(void); /* return the C flag */
1397 } CCTable;
1398
1399 static const CCTable icc_table[CC_OP_NB] = {
1400 /* CC_OP_DYNAMIC should never happen */
1401 [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1402 [CC_OP_DIV] = { compute_all_div, compute_C_div },
1403 [CC_OP_ADD] = { compute_all_add, compute_C_add },
1404 [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1405 [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1406 [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1407 [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1408 [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1409 [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1410 [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1411 [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1412 };
1413
1414 #ifdef TARGET_SPARC64
1415 static const CCTable xcc_table[CC_OP_NB] = {
1416 /* CC_OP_DYNAMIC should never happen */
1417 [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1418 [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1419 [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1420 [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1421 [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1422 [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1423 [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1424 [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1425 [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1426 [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1427 [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1428 };
1429 #endif
1430
1431 void helper_compute_psr(void)
1432 {
1433 uint32_t new_psr;
1434
1435 new_psr = icc_table[CC_OP].compute_all();
1436 env->psr = new_psr;
1437 #ifdef TARGET_SPARC64
1438 new_psr = xcc_table[CC_OP].compute_all();
1439 env->xcc = new_psr;
1440 #endif
1441 CC_OP = CC_OP_FLAGS;
1442 }
1443
1444 uint32_t helper_compute_C_icc(void)
1445 {
1446 uint32_t ret;
1447
1448 ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1449 return ret;
1450 }
1451
1452 static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1453 {
1454 dst[0] = src[0];
1455 dst[1] = src[1];
1456 dst[2] = src[2];
1457 dst[3] = src[3];
1458 dst[4] = src[4];
1459 dst[5] = src[5];
1460 dst[6] = src[6];
1461 dst[7] = src[7];
1462 }
1463
1464 static void set_cwp(int new_cwp)
1465 {
1466 /* put the modified wrap registers at their proper location */
1467 if (env->cwp == env->nwindows - 1) {
1468 memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1469 }
1470 env->cwp = new_cwp;
1471
1472 /* put the wrap registers at their temporary location */
1473 if (new_cwp == env->nwindows - 1) {
1474 memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1475 }
1476 env->regwptr = env->regbase + (new_cwp * 16);
1477 }
1478
1479 void cpu_set_cwp(CPUState *env1, int new_cwp)
1480 {
1481 CPUState *saved_env;
1482
1483 saved_env = env;
1484 env = env1;
1485 set_cwp(new_cwp);
1486 env = saved_env;
1487 }
1488
1489 static target_ulong get_psr(void)
1490 {
1491 helper_compute_psr();
1492
1493 #if !defined (TARGET_SPARC64)
1494 return env->version | (env->psr & PSR_ICC) |
1495 (env->psref? PSR_EF : 0) |
1496 (env->psrpil << 8) |
1497 (env->psrs? PSR_S : 0) |
1498 (env->psrps? PSR_PS : 0) |
1499 (env->psret? PSR_ET : 0) | env->cwp;
1500 #else
1501 return env->psr & PSR_ICC;
1502 #endif
1503 }
1504
1505 target_ulong cpu_get_psr(CPUState *env1)
1506 {
1507 CPUState *saved_env;
1508 target_ulong ret;
1509
1510 saved_env = env;
1511 env = env1;
1512 ret = get_psr();
1513 env = saved_env;
1514 return ret;
1515 }
1516
1517 static void put_psr(target_ulong val)
1518 {
1519 env->psr = val & PSR_ICC;
1520 #if !defined (TARGET_SPARC64)
1521 env->psref = (val & PSR_EF)? 1 : 0;
1522 env->psrpil = (val & PSR_PIL) >> 8;
1523 #endif
1524 #if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1525 cpu_check_irqs(env);
1526 #endif
1527 #if !defined (TARGET_SPARC64)
1528 env->psrs = (val & PSR_S)? 1 : 0;
1529 env->psrps = (val & PSR_PS)? 1 : 0;
1530 env->psret = (val & PSR_ET)? 1 : 0;
1531 set_cwp(val & PSR_CWP);
1532 #endif
1533 env->cc_op = CC_OP_FLAGS;
1534 }
1535
1536 void cpu_put_psr(CPUState *env1, target_ulong val)
1537 {
1538 CPUState *saved_env;
1539
1540 saved_env = env;
1541 env = env1;
1542 put_psr(val);
1543 env = saved_env;
1544 }
1545
1546 static int cwp_inc(int cwp)
1547 {
1548 if (unlikely(cwp >= env->nwindows)) {
1549 cwp -= env->nwindows;
1550 }
1551 return cwp;
1552 }
1553
1554 int cpu_cwp_inc(CPUState *env1, int cwp)
1555 {
1556 CPUState *saved_env;
1557 target_ulong ret;
1558
1559 saved_env = env;
1560 env = env1;
1561 ret = cwp_inc(cwp);
1562 env = saved_env;
1563 return ret;
1564 }
1565
1566 static int cwp_dec(int cwp)
1567 {
1568 if (unlikely(cwp < 0)) {
1569 cwp += env->nwindows;
1570 }
1571 return cwp;
1572 }
1573
1574 int cpu_cwp_dec(CPUState *env1, int cwp)
1575 {
1576 CPUState *saved_env;
1577 target_ulong ret;
1578
1579 saved_env = env;
1580 env = env1;
1581 ret = cwp_dec(cwp);
1582 env = saved_env;
1583 return ret;
1584 }
1585
1586 #ifdef TARGET_SPARC64
1587 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1588 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1589 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1590
1591 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1592 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1593 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1594
1595 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1596 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1597 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1598
1599 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1600 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1601 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1602
1603 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1604 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1605 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1606
1607 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1608 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1609 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1610 #endif
1611 #undef GEN_FCMPS
1612
1613 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1614 defined(DEBUG_MXCC)
1615 static void dump_mxcc(CPUState *env)
1616 {
1617 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1618 "\n",
1619 env->mxccdata[0], env->mxccdata[1],
1620 env->mxccdata[2], env->mxccdata[3]);
1621 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1622 "\n"
1623 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1624 "\n",
1625 env->mxccregs[0], env->mxccregs[1],
1626 env->mxccregs[2], env->mxccregs[3],
1627 env->mxccregs[4], env->mxccregs[5],
1628 env->mxccregs[6], env->mxccregs[7]);
1629 }
1630 #endif
1631
1632 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1633 && defined(DEBUG_ASI)
1634 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1635 uint64_t r1)
1636 {
1637 switch (size)
1638 {
1639 case 1:
1640 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1641 addr, asi, r1 & 0xff);
1642 break;
1643 case 2:
1644 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1645 addr, asi, r1 & 0xffff);
1646 break;
1647 case 4:
1648 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1649 addr, asi, r1 & 0xffffffff);
1650 break;
1651 case 8:
1652 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1653 addr, asi, r1);
1654 break;
1655 }
1656 }
1657 #endif
1658
1659 #ifndef TARGET_SPARC64
1660 #ifndef CONFIG_USER_ONLY
1661
1662
1663 /* Leon3 cache control */
1664
1665 static void leon3_cache_control_int(void)
1666 {
1667 uint32_t state = 0;
1668
1669 if (env->cache_control & CACHE_CTRL_IF) {
1670 /* Instruction cache state */
1671 state = env->cache_control & CACHE_STATE_MASK;
1672 if (state == CACHE_ENABLED) {
1673 state = CACHE_FROZEN;
1674 DPRINTF_CACHE_CONTROL("Instruction cache: freeze\n");
1675 }
1676
1677 env->cache_control &= ~CACHE_STATE_MASK;
1678 env->cache_control |= state;
1679 }
1680
1681 if (env->cache_control & CACHE_CTRL_DF) {
1682 /* Data cache state */
1683 state = (env->cache_control >> 2) & CACHE_STATE_MASK;
1684 if (state == CACHE_ENABLED) {
1685 state = CACHE_FROZEN;
1686 DPRINTF_CACHE_CONTROL("Data cache: freeze\n");
1687 }
1688
1689 env->cache_control &= ~(CACHE_STATE_MASK << 2);
1690 env->cache_control |= (state << 2);
1691 }
1692 }
1693
1694 static void leon3_cache_control_st(target_ulong addr, uint64_t val, int size)
1695 {
1696 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
1697 addr, val, size);
1698
1699 if (size != 4) {
1700 DPRINTF_CACHE_CONTROL("32bits only\n");
1701 return;
1702 }
1703
1704 switch (addr) {
1705 case 0x00: /* Cache control */
1706
1707 /* These values must always be read as zeros */
1708 val &= ~CACHE_CTRL_FD;
1709 val &= ~CACHE_CTRL_FI;
1710 val &= ~CACHE_CTRL_IB;
1711 val &= ~CACHE_CTRL_IP;
1712 val &= ~CACHE_CTRL_DP;
1713
1714 env->cache_control = val;
1715 break;
1716 case 0x04: /* Instruction cache configuration */
1717 case 0x08: /* Data cache configuration */
1718 /* Read Only */
1719 break;
1720 default:
1721 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
1722 break;
1723 };
1724 }
1725
1726 static uint64_t leon3_cache_control_ld(target_ulong addr, int size)
1727 {
1728 uint64_t ret = 0;
1729
1730 if (size != 4) {
1731 DPRINTF_CACHE_CONTROL("32bits only\n");
1732 return 0;
1733 }
1734
1735 switch (addr) {
1736 case 0x00: /* Cache control */
1737 ret = env->cache_control;
1738 break;
1739
1740 /* Configuration registers are read and only always keep those
1741 predefined values */
1742
1743 case 0x04: /* Instruction cache configuration */
1744 ret = 0x10220000;
1745 break;
1746 case 0x08: /* Data cache configuration */
1747 ret = 0x18220000;
1748 break;
1749 default:
1750 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
1751 break;
1752 };
1753 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
1754 addr, ret, size);
1755 return ret;
1756 }
1757
1758 void leon3_irq_manager(void *irq_manager, int intno)
1759 {
1760 leon3_irq_ack(irq_manager, intno);
1761 leon3_cache_control_int();
1762 }
1763
1764 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1765 {
1766 uint64_t ret = 0;
1767 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1768 uint32_t last_addr = addr;
1769 #endif
1770
1771 helper_check_align(addr, size - 1);
1772 switch (asi) {
1773 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
1774 switch (addr) {
1775 case 0x00: /* Leon3 Cache Control */
1776 case 0x08: /* Leon3 Instruction Cache config */
1777 case 0x0C: /* Leon3 Date Cache config */
1778 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
1779 ret = leon3_cache_control_ld(addr, size);
1780 }
1781 break;
1782 case 0x01c00a00: /* MXCC control register */
1783 if (size == 8)
1784 ret = env->mxccregs[3];
1785 else
1786 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1787 size);
1788 break;
1789 case 0x01c00a04: /* MXCC control register */
1790 if (size == 4)
1791 ret = env->mxccregs[3];
1792 else
1793 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1794 size);
1795 break;
1796 case 0x01c00c00: /* Module reset register */
1797 if (size == 8) {
1798 ret = env->mxccregs[5];
1799 // should we do something here?
1800 } else
1801 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1802 size);
1803 break;
1804 case 0x01c00f00: /* MBus port address register */
1805 if (size == 8)
1806 ret = env->mxccregs[7];
1807 else
1808 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1809 size);
1810 break;
1811 default:
1812 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1813 size);
1814 break;
1815 }
1816 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1817 "addr = %08x -> ret = %" PRIx64 ","
1818 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1819 #ifdef DEBUG_MXCC
1820 dump_mxcc(env);
1821 #endif
1822 break;
1823 case 3: /* MMU probe */
1824 {
1825 int mmulev;
1826
1827 mmulev = (addr >> 8) & 15;
1828 if (mmulev > 4)
1829 ret = 0;
1830 else
1831 ret = mmu_probe(env, addr, mmulev);
1832 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1833 addr, mmulev, ret);
1834 }
1835 break;
1836 case 4: /* read MMU regs */
1837 {
1838 int reg = (addr >> 8) & 0x1f;
1839
1840 ret = env->mmuregs[reg];
1841 if (reg == 3) /* Fault status cleared on read */
1842 env->mmuregs[3] = 0;
1843 else if (reg == 0x13) /* Fault status read */
1844 ret = env->mmuregs[3];
1845 else if (reg == 0x14) /* Fault address read */
1846 ret = env->mmuregs[4];
1847 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1848 }
1849 break;
1850 case 5: // Turbosparc ITLB Diagnostic
1851 case 6: // Turbosparc DTLB Diagnostic
1852 case 7: // Turbosparc IOTLB Diagnostic
1853 break;
1854 case 9: /* Supervisor code access */
1855 switch(size) {
1856 case 1:
1857 ret = ldub_code(addr);
1858 break;
1859 case 2:
1860 ret = lduw_code(addr);
1861 break;
1862 default:
1863 case 4:
1864 ret = ldl_code(addr);
1865 break;
1866 case 8:
1867 ret = ldq_code(addr);
1868 break;
1869 }
1870 break;
1871 case 0xa: /* User data access */
1872 switch(size) {
1873 case 1:
1874 ret = ldub_user(addr);
1875 break;
1876 case 2:
1877 ret = lduw_user(addr);
1878 break;
1879 default:
1880 case 4:
1881 ret = ldl_user(addr);
1882 break;
1883 case 8:
1884 ret = ldq_user(addr);
1885 break;
1886 }
1887 break;
1888 case 0xb: /* Supervisor data access */
1889 switch(size) {
1890 case 1:
1891 ret = ldub_kernel(addr);
1892 break;
1893 case 2:
1894 ret = lduw_kernel(addr);
1895 break;
1896 default:
1897 case 4:
1898 ret = ldl_kernel(addr);
1899 break;
1900 case 8:
1901 ret = ldq_kernel(addr);
1902 break;
1903 }
1904 break;
1905 case 0xc: /* I-cache tag */
1906 case 0xd: /* I-cache data */
1907 case 0xe: /* D-cache tag */
1908 case 0xf: /* D-cache data */
1909 break;
1910 case 0x20: /* MMU passthrough */
1911 switch(size) {
1912 case 1:
1913 ret = ldub_phys(addr);
1914 break;
1915 case 2:
1916 ret = lduw_phys(addr);
1917 break;
1918 default:
1919 case 4:
1920 ret = ldl_phys(addr);
1921 break;
1922 case 8:
1923 ret = ldq_phys(addr);
1924 break;
1925 }
1926 break;
1927 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1928 switch(size) {
1929 case 1:
1930 ret = ldub_phys((target_phys_addr_t)addr
1931 | ((target_phys_addr_t)(asi & 0xf) << 32));
1932 break;
1933 case 2:
1934 ret = lduw_phys((target_phys_addr_t)addr
1935 | ((target_phys_addr_t)(asi & 0xf) << 32));
1936 break;
1937 default:
1938 case 4:
1939 ret = ldl_phys((target_phys_addr_t)addr
1940 | ((target_phys_addr_t)(asi & 0xf) << 32));
1941 break;
1942 case 8:
1943 ret = ldq_phys((target_phys_addr_t)addr
1944 | ((target_phys_addr_t)(asi & 0xf) << 32));
1945 break;
1946 }
1947 break;
1948 case 0x30: // Turbosparc secondary cache diagnostic
1949 case 0x31: // Turbosparc RAM snoop
1950 case 0x32: // Turbosparc page table descriptor diagnostic
1951 case 0x39: /* data cache diagnostic register */
1952 ret = 0;
1953 break;
1954 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1955 {
1956 int reg = (addr >> 8) & 3;
1957
1958 switch(reg) {
1959 case 0: /* Breakpoint Value (Addr) */
1960 ret = env->mmubpregs[reg];
1961 break;
1962 case 1: /* Breakpoint Mask */
1963 ret = env->mmubpregs[reg];
1964 break;
1965 case 2: /* Breakpoint Control */
1966 ret = env->mmubpregs[reg];
1967 break;
1968 case 3: /* Breakpoint Status */
1969 ret = env->mmubpregs[reg];
1970 env->mmubpregs[reg] = 0ULL;
1971 break;
1972 }
1973 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1974 ret);
1975 }
1976 break;
1977 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1978 ret = env->mmubpctrv;
1979 break;
1980 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1981 ret = env->mmubpctrc;
1982 break;
1983 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1984 ret = env->mmubpctrs;
1985 break;
1986 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1987 ret = env->mmubpaction;
1988 break;
1989 case 8: /* User code access, XXX */
1990 default:
1991 do_unassigned_access(addr, 0, 0, asi, size);
1992 ret = 0;
1993 break;
1994 }
1995 if (sign) {
1996 switch(size) {
1997 case 1:
1998 ret = (int8_t) ret;
1999 break;
2000 case 2:
2001 ret = (int16_t) ret;
2002 break;
2003 case 4:
2004 ret = (int32_t) ret;
2005 break;
2006 default:
2007 break;
2008 }
2009 }
2010 #ifdef DEBUG_ASI
2011 dump_asi("read ", last_addr, asi, size, ret);
2012 #endif
2013 return ret;
2014 }
2015
2016 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
2017 {
2018 helper_check_align(addr, size - 1);
2019 switch(asi) {
2020 case 2: /* SuperSparc MXCC registers and Leon3 cache control */
2021 switch (addr) {
2022 case 0x00: /* Leon3 Cache Control */
2023 case 0x08: /* Leon3 Instruction Cache config */
2024 case 0x0C: /* Leon3 Date Cache config */
2025 if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
2026 leon3_cache_control_st(addr, val, size);
2027 }
2028 break;
2029
2030 case 0x01c00000: /* MXCC stream data register 0 */
2031 if (size == 8)
2032 env->mxccdata[0] = val;
2033 else
2034 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2035 size);
2036 break;
2037 case 0x01c00008: /* MXCC stream data register 1 */
2038 if (size == 8)
2039 env->mxccdata[1] = val;
2040 else
2041 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2042 size);
2043 break;
2044 case 0x01c00010: /* MXCC stream data register 2 */
2045 if (size == 8)
2046 env->mxccdata[2] = val;
2047 else
2048 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2049 size);
2050 break;
2051 case 0x01c00018: /* MXCC stream data register 3 */
2052 if (size == 8)
2053 env->mxccdata[3] = val;
2054 else
2055 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2056 size);
2057 break;
2058 case 0x01c00100: /* MXCC stream source */
2059 if (size == 8)
2060 env->mxccregs[0] = val;
2061 else
2062 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2063 size);
2064 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2065 0);
2066 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2067 8);
2068 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2069 16);
2070 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
2071 24);
2072 break;
2073 case 0x01c00200: /* MXCC stream destination */
2074 if (size == 8)
2075 env->mxccregs[1] = val;
2076 else
2077 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2078 size);
2079 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
2080 env->mxccdata[0]);
2081 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
2082 env->mxccdata[1]);
2083 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
2084 env->mxccdata[2]);
2085 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
2086 env->mxccdata[3]);
2087 break;
2088 case 0x01c00a00: /* MXCC control register */
2089 if (size == 8)
2090 env->mxccregs[3] = val;
2091 else
2092 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2093 size);
2094 break;
2095 case 0x01c00a04: /* MXCC control register */
2096 if (size == 4)
2097 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
2098 | val;
2099 else
2100 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2101 size);
2102 break;
2103 case 0x01c00e00: /* MXCC error register */
2104 // writing a 1 bit clears the error
2105 if (size == 8)
2106 env->mxccregs[6] &= ~val;
2107 else
2108 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2109 size);
2110 break;
2111 case 0x01c00f00: /* MBus port address register */
2112 if (size == 8)
2113 env->mxccregs[7] = val;
2114 else
2115 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
2116 size);
2117 break;
2118 default:
2119 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
2120 size);
2121 break;
2122 }
2123 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
2124 asi, size, addr, val);
2125 #ifdef DEBUG_MXCC
2126 dump_mxcc(env);
2127 #endif
2128 break;
2129 case 3: /* MMU flush */
2130 {
2131 int mmulev;
2132
2133 mmulev = (addr >> 8) & 15;
2134 DPRINTF_MMU("mmu flush level %d\n", mmulev);
2135 switch (mmulev) {
2136 case 0: // flush page
2137 tlb_flush_page(env, addr & 0xfffff000);
2138 break;
2139 case 1: // flush segment (256k)
2140 case 2: // flush region (16M)
2141 case 3: // flush context (4G)
2142 case 4: // flush entire
2143 tlb_flush(env, 1);
2144 break;
2145 default:
2146 break;
2147 }
2148 #ifdef DEBUG_MMU
2149 dump_mmu(stdout, fprintf, env);
2150 #endif
2151 }
2152 break;
2153 case 4: /* write MMU regs */
2154 {
2155 int reg = (addr >> 8) & 0x1f;
2156 uint32_t oldreg;
2157
2158 oldreg = env->mmuregs[reg];
2159 switch(reg) {
2160 case 0: // Control Register
2161 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
2162 (val & 0x00ffffff);
2163 // Mappings generated during no-fault mode or MMU
2164 // disabled mode are invalid in normal mode
2165 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
2166 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
2167 tlb_flush(env, 1);
2168 break;
2169 case 1: // Context Table Pointer Register
2170 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
2171 break;
2172 case 2: // Context Register
2173 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
2174 if (oldreg != env->mmuregs[reg]) {
2175 /* we flush when the MMU context changes because
2176 QEMU has no MMU context support */
2177 tlb_flush(env, 1);
2178 }
2179 break;
2180 case 3: // Synchronous Fault Status Register with Clear
2181 case 4: // Synchronous Fault Address Register
2182 break;
2183 case 0x10: // TLB Replacement Control Register
2184 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
2185 break;
2186 case 0x13: // Synchronous Fault Status Register with Read and Clear
2187 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
2188 break;
2189 case 0x14: // Synchronous Fault Address Register
2190 env->mmuregs[4] = val;
2191 break;
2192 default:
2193 env->mmuregs[reg] = val;
2194 break;
2195 }
2196 if (oldreg != env->mmuregs[reg]) {
2197 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
2198 reg, oldreg, env->mmuregs[reg]);
2199 }
2200 #ifdef DEBUG_MMU
2201 dump_mmu(stdout, fprintf, env);
2202 #endif
2203 }
2204 break;
2205 case 5: // Turbosparc ITLB Diagnostic
2206 case 6: // Turbosparc DTLB Diagnostic
2207 case 7: // Turbosparc IOTLB Diagnostic
2208 break;
2209 case 0xa: /* User data access */
2210 switch(size) {
2211 case 1:
2212 stb_user(addr, val);
2213 break;
2214 case 2:
2215 stw_user(addr, val);
2216 break;
2217 default:
2218 case 4:
2219 stl_user(addr, val);
2220 break;
2221 case 8:
2222 stq_user(addr, val);
2223 break;
2224 }
2225 break;
2226 case 0xb: /* Supervisor data access */
2227 switch(size) {
2228 case 1:
2229 stb_kernel(addr, val);
2230 break;
2231 case 2:
2232 stw_kernel(addr, val);
2233 break;
2234 default:
2235 case 4:
2236 stl_kernel(addr, val);
2237 break;
2238 case 8:
2239 stq_kernel(addr, val);
2240 break;
2241 }
2242 break;
2243 case 0xc: /* I-cache tag */
2244 case 0xd: /* I-cache data */
2245 case 0xe: /* D-cache tag */
2246 case 0xf: /* D-cache data */
2247 case 0x10: /* I/D-cache flush page */
2248 case 0x11: /* I/D-cache flush segment */
2249 case 0x12: /* I/D-cache flush region */
2250 case 0x13: /* I/D-cache flush context */
2251 case 0x14: /* I/D-cache flush user */
2252 break;
2253 case 0x17: /* Block copy, sta access */
2254 {
2255 // val = src
2256 // addr = dst
2257 // copy 32 bytes
2258 unsigned int i;
2259 uint32_t src = val & ~3, dst = addr & ~3, temp;
2260
2261 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2262 temp = ldl_kernel(src);
2263 stl_kernel(dst, temp);
2264 }
2265 }
2266 break;
2267 case 0x1f: /* Block fill, stda access */
2268 {
2269 // addr = dst
2270 // fill 32 bytes with val
2271 unsigned int i;
2272 uint32_t dst = addr & 7;
2273
2274 for (i = 0; i < 32; i += 8, dst += 8)
2275 stq_kernel(dst, val);
2276 }
2277 break;
2278 case 0x20: /* MMU passthrough */
2279 {
2280 switch(size) {
2281 case 1:
2282 stb_phys(addr, val);
2283 break;
2284 case 2:
2285 stw_phys(addr, val);
2286 break;
2287 case 4:
2288 default:
2289 stl_phys(addr, val);
2290 break;
2291 case 8:
2292 stq_phys(addr, val);
2293 break;
2294 }
2295 }
2296 break;
2297 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2298 {
2299 switch(size) {
2300 case 1:
2301 stb_phys((target_phys_addr_t)addr
2302 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2303 break;
2304 case 2:
2305 stw_phys((target_phys_addr_t)addr
2306 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2307 break;
2308 case 4:
2309 default:
2310 stl_phys((target_phys_addr_t)addr
2311 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2312 break;
2313 case 8:
2314 stq_phys((target_phys_addr_t)addr
2315 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2316 break;
2317 }
2318 }
2319 break;
2320 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2321 case 0x31: // store buffer data, Ross RT620 I-cache flush or
2322 // Turbosparc snoop RAM
2323 case 0x32: // store buffer control or Turbosparc page table
2324 // descriptor diagnostic
2325 case 0x36: /* I-cache flash clear */
2326 case 0x37: /* D-cache flash clear */
2327 break;
2328 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2329 {
2330 int reg = (addr >> 8) & 3;
2331
2332 switch(reg) {
2333 case 0: /* Breakpoint Value (Addr) */
2334 env->mmubpregs[reg] = (val & 0xfffffffffULL);
2335 break;
2336 case 1: /* Breakpoint Mask */
2337 env->mmubpregs[reg] = (val & 0xfffffffffULL);
2338 break;
2339 case 2: /* Breakpoint Control */
2340 env->mmubpregs[reg] = (val & 0x7fULL);
2341 break;
2342 case 3: /* Breakpoint Status */
2343 env->mmubpregs[reg] = (val & 0xfULL);
2344 break;
2345 }
2346 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2347 env->mmuregs[reg]);
2348 }
2349 break;
2350 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
2351 env->mmubpctrv = val & 0xffffffff;
2352 break;
2353 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
2354 env->mmubpctrc = val & 0x3;
2355 break;
2356 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
2357 env->mmubpctrs = val & 0x3;
2358 break;
2359 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
2360 env->mmubpaction = val & 0x1fff;
2361 break;
2362 case 8: /* User code access, XXX */
2363 case 9: /* Supervisor code access, XXX */
2364 default:
2365 do_unassigned_access(addr, 1, 0, asi, size);
2366 break;
2367 }
2368 #ifdef DEBUG_ASI
2369 dump_asi("write", addr, asi, size, val);
2370 #endif
2371 }
2372
2373 #endif /* CONFIG_USER_ONLY */
2374 #else /* TARGET_SPARC64 */
2375
2376 #ifdef CONFIG_USER_ONLY
2377 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2378 {
2379 uint64_t ret = 0;
2380 #if defined(DEBUG_ASI)
2381 target_ulong last_addr = addr;
2382 #endif
2383
2384 if (asi < 0x80)
2385 raise_exception(TT_PRIV_ACT);
2386
2387 helper_check_align(addr, size - 1);
2388 addr = asi_address_mask(env, asi, addr);
2389
2390 switch (asi) {
2391 case 0x82: // Primary no-fault
2392 case 0x8a: // Primary no-fault LE
2393 if (page_check_range(addr, size, PAGE_READ) == -1) {
2394 #ifdef DEBUG_ASI
2395 dump_asi("read ", last_addr, asi, size, ret);
2396 #endif
2397 return 0;
2398 }
2399 // Fall through
2400 case 0x80: // Primary
2401 case 0x88: // Primary LE
2402 {
2403 switch(size) {
2404 case 1:
2405 ret = ldub_raw(addr);
2406 break;
2407 case 2:
2408 ret = lduw_raw(addr);
2409 break;
2410 case 4:
2411 ret = ldl_raw(addr);
2412 break;
2413 default:
2414 case 8:
2415 ret = ldq_raw(addr);
2416 break;
2417 }
2418 }
2419 break;
2420 case 0x83: // Secondary no-fault
2421 case 0x8b: // Secondary no-fault LE
2422 if (page_check_range(addr, size, PAGE_READ) == -1) {
2423 #ifdef DEBUG_ASI
2424 dump_asi("read ", last_addr, asi, size, ret);
2425 #endif
2426 return 0;
2427 }
2428 // Fall through
2429 case 0x81: // Secondary
2430 case 0x89: // Secondary LE
2431 // XXX
2432 break;
2433 default:
2434 break;
2435 }
2436
2437 /* Convert from little endian */
2438 switch (asi) {
2439 case 0x88: // Primary LE
2440 case 0x89: // Secondary LE
2441 case 0x8a: // Primary no-fault LE
2442 case 0x8b: // Secondary no-fault LE
2443 switch(size) {
2444 case 2:
2445 ret = bswap16(ret);
2446 break;
2447 case 4:
2448 ret = bswap32(ret);
2449 break;
2450 case 8:
2451 ret = bswap64(ret);
2452 break;
2453 default:
2454 break;
2455 }
2456 default:
2457 break;
2458 }
2459
2460 /* Convert to signed number */
2461 if (sign) {
2462 switch(size) {
2463 case 1:
2464 ret = (int8_t) ret;
2465 break;
2466 case 2:
2467 ret = (int16_t) ret;
2468 break;
2469 case 4:
2470 ret = (int32_t) ret;
2471 break;
2472 default:
2473 break;
2474 }
2475 }
2476 #ifdef DEBUG_ASI
2477 dump_asi("read ", last_addr, asi, size, ret);
2478 #endif
2479 return ret;
2480 }
2481
2482 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2483 {
2484 #ifdef DEBUG_ASI
2485 dump_asi("write", addr, asi, size, val);
2486 #endif
2487 if (asi < 0x80)
2488 raise_exception(TT_PRIV_ACT);
2489
2490 helper_check_align(addr, size - 1);
2491 addr = asi_address_mask(env, asi, addr);
2492
2493 /* Convert to little endian */
2494 switch (asi) {
2495 case 0x88: // Primary LE
2496 case 0x89: // Secondary LE
2497 switch(size) {
2498 case 2:
2499 val = bswap16(val);
2500 break;
2501 case 4:
2502 val = bswap32(val);
2503 break;
2504 case 8:
2505 val = bswap64(val);
2506 break;
2507 default:
2508 break;
2509 }
2510 default:
2511 break;
2512 }
2513
2514 switch(asi) {
2515 case 0x80: // Primary
2516 case 0x88: // Primary LE
2517 {
2518 switch(size) {
2519 case 1:
2520 stb_raw(addr, val);
2521 break;
2522 case 2:
2523 stw_raw(addr, val);
2524 break;
2525 case 4:
2526 stl_raw(addr, val);
2527 break;
2528 case 8:
2529 default:
2530 stq_raw(addr, val);
2531 break;
2532 }
2533 }
2534 break;
2535 case 0x81: // Secondary
2536 case 0x89: // Secondary LE
2537 // XXX
2538 return;
2539
2540 case 0x82: // Primary no-fault, RO
2541 case 0x83: // Secondary no-fault, RO
2542 case 0x8a: // Primary no-fault LE, RO
2543 case 0x8b: // Secondary no-fault LE, RO
2544 default:
2545 do_unassigned_access(addr, 1, 0, 1, size);
2546 return;
2547 }
2548 }
2549
2550 #else /* CONFIG_USER_ONLY */
2551
2552 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2553 {
2554 uint64_t ret = 0;
2555 #if defined(DEBUG_ASI)
2556 target_ulong last_addr = addr;
2557 #endif
2558
2559 asi &= 0xff;
2560
2561 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2562 || (cpu_has_hypervisor(env)
2563 && asi >= 0x30 && asi < 0x80
2564 && !(env->hpstate & HS_PRIV)))
2565 raise_exception(TT_PRIV_ACT);
2566
2567 helper_check_align(addr, size - 1);
2568 addr = asi_address_mask(env, asi, addr);
2569
2570 /* process nonfaulting loads first */
2571 if ((asi & 0xf6) == 0x82) {
2572 int mmu_idx;
2573
2574 /* secondary space access has lowest asi bit equal to 1 */
2575 if (env->pstate & PS_PRIV) {
2576 mmu_idx = (asi & 1) ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX;
2577 } else {
2578 mmu_idx = (asi & 1) ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX;
2579 }
2580
2581 if (cpu_get_phys_page_nofault(env, addr, mmu_idx) == -1ULL) {
2582 #ifdef DEBUG_ASI
2583 dump_asi("read ", last_addr, asi, size, ret);
2584 #endif
2585 /* env->exception_index is set in get_physical_address_data(). */
2586 raise_exception(env->exception_index);
2587 }
2588
2589 /* convert nonfaulting load ASIs to normal load ASIs */
2590 asi &= ~0x02;
2591 }
2592
2593 switch (asi) {
2594 case 0x10: // As if user primary
2595 case 0x11: // As if user secondary
2596 case 0x18: // As if user primary LE
2597 case 0x19: // As if user secondary LE
2598 case 0x80: // Primary
2599 case 0x81: // Secondary
2600 case 0x88: // Primary LE
2601 case 0x89: // Secondary LE
2602 case 0xe2: // UA2007 Primary block init
2603 case 0xe3: // UA2007 Secondary block init
2604 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2605 if (cpu_hypervisor_mode(env)) {
2606 switch(size) {
2607 case 1:
2608 ret = ldub_hypv(addr);
2609 break;
2610 case 2:
2611 ret = lduw_hypv(addr);
2612 break;
2613 case 4:
2614 ret = ldl_hypv(addr);
2615 break;
2616 default:
2617 case 8:
2618 ret = ldq_hypv(addr);
2619 break;
2620 }
2621 } else {
2622 /* secondary space access has lowest asi bit equal to 1 */
2623 if (asi & 1) {
2624 switch(size) {
2625 case 1:
2626 ret = ldub_kernel_secondary(addr);
2627 break;
2628 case 2:
2629 ret = lduw_kernel_secondary(addr);
2630 break;
2631 case 4:
2632 ret = ldl_kernel_secondary(addr);
2633 break;
2634 default:
2635 case 8:
2636 ret = ldq_kernel_secondary(addr);
2637 break;
2638 }
2639 } else {
2640 switch(size) {
2641 case 1:
2642 ret = ldub_kernel(addr);
2643 break;
2644 case 2:
2645 ret = lduw_kernel(addr);
2646 break;
2647 case 4:
2648 ret = ldl_kernel(addr);
2649 break;
2650 default:
2651 case 8:
2652 ret = ldq_kernel(addr);
2653 break;
2654 }
2655 }
2656 }
2657 } else {
2658 /* secondary space access has lowest asi bit equal to 1 */
2659 if (asi & 1) {
2660 switch(size) {
2661 case 1:
2662 ret = ldub_user_secondary(addr);
2663 break;
2664 case 2:
2665 ret = lduw_user_secondary(addr);
2666 break;
2667 case 4:
2668 ret = ldl_user_secondary(addr);
2669 break;
2670 default:
2671 case 8:
2672 ret = ldq_user_secondary(addr);
2673 break;
2674 }
2675 } else {
2676 switch(size) {
2677 case 1:
2678 ret = ldub_user(addr);
2679 break;
2680 case 2:
2681 ret = lduw_user(addr);
2682 break;
2683 case 4:
2684 ret = ldl_user(addr);
2685 break;
2686 default:
2687 case 8:
2688 ret = ldq_user(addr);
2689 break;
2690 }
2691 }
2692 }
2693 break;
2694 case 0x14: // Bypass
2695 case 0x15: // Bypass, non-cacheable
2696 case 0x1c: // Bypass LE
2697 case 0x1d: // Bypass, non-cacheable LE
2698 {
2699 switch(size) {
2700 case 1:
2701 ret = ldub_phys(addr);
2702 break;
2703 case 2:
2704 ret = lduw_phys(addr);
2705 break;
2706 case 4:
2707 ret = ldl_phys(addr);
2708 break;
2709 default:
2710 case 8:
2711 ret = ldq_phys(addr);
2712 break;
2713 }
2714 break;
2715 }
2716 case 0x24: // Nucleus quad LDD 128 bit atomic
2717 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2718 // Only ldda allowed
2719 raise_exception(TT_ILL_INSN);
2720 return 0;
2721 case 0x04: // Nucleus
2722 case 0x0c: // Nucleus Little Endian (LE)
2723 {
2724 switch(size) {
2725 case 1:
2726 ret = ldub_nucleus(addr);
2727 break;
2728 case 2:
2729 ret = lduw_nucleus(addr);
2730 break;
2731 case 4:
2732 ret = ldl_nucleus(addr);
2733 break;
2734 default:
2735 case 8:
2736 ret = ldq_nucleus(addr);
2737 break;
2738 }
2739 break;
2740 }
2741 case 0x4a: // UPA config
2742 // XXX
2743 break;
2744 case 0x45: // LSU
2745 ret = env->lsu;
2746 break;
2747 case 0x50: // I-MMU regs
2748 {
2749 int reg = (addr >> 3) & 0xf;
2750
2751 if (reg == 0) {
2752 // I-TSB Tag Target register
2753 ret = ultrasparc_tag_target(env->immu.tag_access);
2754 } else {
2755 ret = env->immuregs[reg];
2756 }
2757
2758 break;
2759 }
2760 case 0x51: // I-MMU 8k TSB pointer
2761 {
2762 // env->immuregs[5] holds I-MMU TSB register value
2763 // env->immuregs[6] holds I-MMU Tag Access register value
2764 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2765 8*1024);
2766 break;
2767 }
2768 case 0x52: // I-MMU 64k TSB pointer
2769 {
2770 // env->immuregs[5] holds I-MMU TSB register value
2771 // env->immuregs[6] holds I-MMU Tag Access register value
2772 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2773 64*1024);
2774 break;
2775 }
2776 case 0x55: // I-MMU data access
2777 {
2778 int reg = (addr >> 3) & 0x3f;
2779
2780 ret = env->itlb[reg].tte;
2781 break;
2782 }
2783 case 0x56: // I-MMU tag read
2784 {
2785 int reg = (addr >> 3) & 0x3f;
2786
2787 ret = env->itlb[reg].tag;
2788 break;
2789 }
2790 case 0x58: // D-MMU regs
2791 {
2792 int reg = (addr >> 3) & 0xf;
2793
2794 if (reg == 0) {
2795 // D-TSB Tag Target register
2796 ret = ultrasparc_tag_target(env->dmmu.tag_access);
2797 } else {
2798 ret = env->dmmuregs[reg];
2799 }
2800 break;
2801 }
2802 case 0x59: // D-MMU 8k TSB pointer
2803 {
2804 // env->dmmuregs[5] holds D-MMU TSB register value
2805 // env->dmmuregs[6] holds D-MMU Tag Access register value
2806 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2807 8*1024);
2808 break;
2809 }
2810 case 0x5a: // D-MMU 64k TSB pointer
2811 {
2812 // env->dmmuregs[5] holds D-MMU TSB register value
2813 // env->dmmuregs[6] holds D-MMU Tag Access register value
2814 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2815 64*1024);
2816 break;
2817 }
2818 case 0x5d: // D-MMU data access
2819 {
2820 int reg = (addr >> 3) & 0x3f;
2821
2822 ret = env->dtlb[reg].tte;
2823 break;
2824 }
2825 case 0x5e: // D-MMU tag read
2826 {
2827 int reg = (addr >> 3) & 0x3f;
2828
2829 ret = env->dtlb[reg].tag;
2830 break;
2831 }
2832 case 0x46: // D-cache data
2833 case 0x47: // D-cache tag access
2834 case 0x4b: // E-cache error enable
2835 case 0x4c: // E-cache asynchronous fault status
2836 case 0x4d: // E-cache asynchronous fault address
2837 case 0x4e: // E-cache tag data
2838 case 0x66: // I-cache instruction access
2839 case 0x67: // I-cache tag access
2840 case 0x6e: // I-cache predecode
2841 case 0x6f: // I-cache LRU etc.
2842 case 0x76: // E-cache tag
2843 case 0x7e: // E-cache tag
2844 break;
2845 case 0x5b: // D-MMU data pointer
2846 case 0x48: // Interrupt dispatch, RO
2847 case 0x49: // Interrupt data receive
2848 case 0x7f: // Incoming interrupt vector, RO
2849 // XXX
2850 break;
2851 case 0x54: // I-MMU data in, WO
2852 case 0x57: // I-MMU demap, WO
2853 case 0x5c: // D-MMU data in, WO
2854 case 0x5f: // D-MMU demap, WO
2855 case 0x77: // Interrupt vector, WO
2856 default:
2857 do_unassigned_access(addr, 0, 0, 1, size);
2858 ret = 0;
2859 break;
2860 }
2861
2862 /* Convert from little endian */
2863 switch (asi) {
2864 case 0x0c: // Nucleus Little Endian (LE)
2865 case 0x18: // As if user primary LE
2866 case 0x19: // As if user secondary LE
2867 case 0x1c: // Bypass LE
2868 case 0x1d: // Bypass, non-cacheable LE
2869 case 0x88: // Primary LE
2870 case 0x89: // Secondary LE
2871 switch(size) {
2872 case 2:
2873 ret = bswap16(ret);
2874 break;
2875 case 4:
2876 ret = bswap32(ret);
2877 break;
2878 case 8:
2879 ret = bswap64(ret);
2880 break;
2881 default:
2882 break;
2883 }
2884 default:
2885 break;
2886 }
2887
2888 /* Convert to signed number */
2889 if (sign) {
2890 switch(size) {
2891 case 1:
2892 ret = (int8_t) ret;
2893 break;
2894 case 2:
2895 ret = (int16_t) ret;
2896 break;
2897 case 4:
2898 ret = (int32_t) ret;
2899 break;
2900 default:
2901 break;
2902 }
2903 }
2904 #ifdef DEBUG_ASI
2905 dump_asi("read ", last_addr, asi, size, ret);
2906 #endif
2907 return ret;
2908 }
2909
2910 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2911 {
2912 #ifdef DEBUG_ASI
2913 dump_asi("write", addr, asi, size, val);
2914 #endif
2915
2916 asi &= 0xff;
2917
2918 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2919 || (cpu_has_hypervisor(env)
2920 && asi >= 0x30 && asi < 0x80
2921 && !(env->hpstate & HS_PRIV)))
2922 raise_exception(TT_PRIV_ACT);
2923
2924 helper_check_align(addr, size - 1);
2925 addr = asi_address_mask(env, asi, addr);
2926
2927 /* Convert to little endian */
2928 switch (asi) {
2929 case 0x0c: // Nucleus Little Endian (LE)
2930 case 0x18: // As if user primary LE
2931 case 0x19: // As if user secondary LE
2932 case 0x1c: // Bypass LE
2933 case 0x1d: // Bypass, non-cacheable LE
2934 case 0x88: // Primary LE
2935 case 0x89: // Secondary LE
2936 switch(size) {
2937 case 2:
2938 val = bswap16(val);
2939 break;
2940 case 4:
2941 val = bswap32(val);
2942 break;
2943 case 8:
2944 val = bswap64(val);
2945 break;
2946 default:
2947 break;
2948 }
2949 default:
2950 break;
2951 }
2952
2953 switch(asi) {
2954 case 0x10: // As if user primary
2955 case 0x11: // As if user secondary
2956 case 0x18: // As if user primary LE
2957 case 0x19: // As if user secondary LE
2958 case 0x80: // Primary
2959 case 0x81: // Secondary
2960 case 0x88: // Primary LE
2961 case 0x89: // Secondary LE
2962 case 0xe2: // UA2007 Primary block init
2963 case 0xe3: // UA2007 Secondary block init
2964 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2965 if (cpu_hypervisor_mode(env)) {
2966 switch(size) {
2967 case 1:
2968 stb_hypv(addr, val);
2969 break;
2970 case 2:
2971 stw_hypv(addr, val);
2972 break;
2973 case 4:
2974 stl_hypv(addr, val);
2975 break;
2976 case 8:
2977 default:
2978 stq_hypv(addr, val);
2979 break;
2980 }
2981 } else {
2982 /* secondary space access has lowest asi bit equal to 1 */
2983 if (asi & 1) {
2984 switch(size) {
2985 case 1:
2986 stb_kernel_secondary(addr, val);
2987 break;
2988 case 2:
2989 stw_kernel_secondary(addr, val);
2990 break;
2991 case 4:
2992 stl_kernel_secondary(addr, val);
2993 break;
2994 case 8:
2995 default:
2996 stq_kernel_secondary(addr, val);
2997 break;
2998 }
2999 } else {
3000 switch(size) {
3001 case 1:
3002 stb_kernel(addr, val);
3003 break;
3004 case 2:
3005 stw_kernel(addr, val);
3006 break;
3007 case 4:
3008 stl_kernel(addr, val);
3009 break;
3010 case 8:
3011 default:
3012 stq_kernel(addr, val);
3013 break;
3014 }
3015 }
3016 }
3017 } else {
3018 /* secondary space access has lowest asi bit equal to 1 */
3019 if (asi & 1) {
3020 switch(size) {
3021 case 1:
3022 stb_user_secondary(addr, val);
3023 break;
3024 case 2:
3025 stw_user_secondary(addr, val);
3026 break;
3027 case 4:
3028 stl_user_secondary(addr, val);
3029 break;
3030 case 8:
3031 default:
3032 stq_user_secondary(addr, val);
3033 break;
3034 }
3035 } else {
3036 switch(size) {
3037 case 1:
3038 stb_user(addr, val);
3039 break;
3040 case 2:
3041 stw_user(addr, val);
3042 break;
3043 case 4:
3044 stl_user(addr, val);
3045 break;
3046 case 8:
3047 default:
3048 stq_user(addr, val);
3049 break;
3050 }
3051 }
3052 }
3053 break;
3054 case 0x14: // Bypass
3055 case 0x15: // Bypass, non-cacheable
3056 case 0x1c: // Bypass LE
3057 case 0x1d: // Bypass, non-cacheable LE
3058 {
3059 switch(size) {
3060 case 1:
3061 stb_phys(addr, val);
3062 break;
3063 case 2:
3064 stw_phys(addr, val);
3065 break;
3066 case 4:
3067 stl_phys(addr, val);
3068 break;
3069 case 8:
3070 default:
3071 stq_phys(addr, val);
3072 break;
3073 }
3074 }
3075 return;
3076 case 0x24: // Nucleus quad LDD 128 bit atomic
3077 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3078 // Only ldda allowed
3079 raise_exception(TT_ILL_INSN);
3080 return;
3081 case 0x04: // Nucleus
3082 case 0x0c: // Nucleus Little Endian (LE)
3083 {
3084 switch(size) {
3085 case 1:
3086 stb_nucleus(addr, val);
3087 break;
3088 case 2:
3089 stw_nucleus(addr, val);
3090 break;
3091 case 4:
3092 stl_nucleus(addr, val);
3093 break;
3094 default:
3095 case 8:
3096 stq_nucleus(addr, val);
3097 break;
3098 }
3099 break;
3100 }
3101
3102 case 0x4a: // UPA config
3103 // XXX
3104 return;
3105 case 0x45: // LSU
3106 {
3107 uint64_t oldreg;
3108
3109 oldreg = env->lsu;
3110 env->lsu = val & (DMMU_E | IMMU_E);
3111 // Mappings generated during D/I MMU disabled mode are
3112 // invalid in normal mode
3113 if (oldreg != env->lsu) {
3114 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
3115 oldreg, env->lsu);
3116 #ifdef DEBUG_MMU
3117 dump_mmu(stdout, fprintf, env1);
3118 #endif
3119 tlb_flush(env, 1);
3120 }
3121 return;
3122 }
3123 case 0x50: // I-MMU regs
3124 {
3125 int reg = (addr >> 3) & 0xf;
3126 uint64_t oldreg;
3127
3128 oldreg = env->immuregs[reg];
3129 switch(reg) {
3130 case 0: // RO
3131 return;
3132 case 1: // Not in I-MMU
3133 case 2:
3134 return;
3135 case 3: // SFSR
3136 if ((val & 1) == 0)
3137 val = 0; // Clear SFSR
3138 env->immu.sfsr = val;
3139 break;
3140 case 4: // RO
3141 return;
3142 case 5: // TSB access
3143 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
3144 PRIx64 "\n", env->immu.tsb, val);
3145 env->immu.tsb = val;
3146 break;
3147 case 6: // Tag access
3148 env->immu.tag_access = val;
3149 break;
3150 case 7:
3151 case 8:
3152 return;
3153 default:
3154 break;
3155 }
3156
3157 if (oldreg != env->immuregs[reg]) {
3158 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3159 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
3160 }
3161 #ifdef DEBUG_MMU
3162 dump_mmu(stdout, fprintf, env);
3163 #endif
3164 return;
3165 }
3166 case 0x54: // I-MMU data in
3167 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
3168 return;
3169 case 0x55: // I-MMU data access
3170 {
3171 // TODO: auto demap
3172
3173 unsigned int i = (addr >> 3) & 0x3f;
3174
3175 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
3176
3177 #ifdef DEBUG_MMU
3178 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
3179 dump_mmu(stdout, fprintf, env);
3180 #endif
3181 return;
3182 }
3183 case 0x57: // I-MMU demap
3184 demap_tlb(env->itlb, addr, "immu", env);
3185 return;
3186 case 0x58: // D-MMU regs
3187 {
3188 int reg = (addr >> 3) & 0xf;
3189 uint64_t oldreg;
3190
3191 oldreg = env->dmmuregs[reg];
3192 switch(reg) {
3193 case 0: // RO
3194 case 4:
3195 return;
3196 case 3: // SFSR
3197 if ((val & 1) == 0) {
3198 val = 0; // Clear SFSR, Fault address
3199 env->dmmu.sfar = 0;
3200 }
3201 env->dmmu.sfsr = val;
3202 break;
3203 case 1: // Primary context
3204 env->dmmu.mmu_primary_context = val;
3205 /* can be optimized to only flush MMU_USER_IDX
3206 and MMU_KERNEL_IDX entries */
3207 tlb_flush(env, 1);
3208 break;
3209 case 2: // Secondary context
3210 env->dmmu.mmu_secondary_context = val;
3211 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
3212 and MMU_KERNEL_SECONDARY_IDX entries */
3213 tlb_flush(env, 1);
3214 break;
3215 case 5: // TSB access
3216 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
3217 PRIx64 "\n", env->dmmu.tsb, val);
3218 env->dmmu.tsb = val;
3219 break;
3220 case 6: // Tag access
3221 env->dmmu.tag_access = val;
3222 break;
3223 case 7: // Virtual Watchpoint
3224 case 8: // Physical Watchpoint
3225 default:
3226 env->dmmuregs[reg] = val;
3227 break;
3228 }
3229
3230 if (oldreg != env->dmmuregs[reg]) {
3231 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
3232 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
3233 }
3234 #ifdef DEBUG_MMU
3235 dump_mmu(stdout, fprintf, env);
3236 #endif
3237 return;
3238 }
3239 case 0x5c: // D-MMU data in
3240 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
3241 return;
3242 case 0x5d: // D-MMU data access
3243 {
3244 unsigned int i = (addr >> 3) & 0x3f;
3245
3246 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3247
3248 #ifdef DEBUG_MMU
3249 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3250 dump_mmu(stdout, fprintf, env);
3251 #endif
3252 return;
3253 }
3254 case 0x5f: // D-MMU demap
3255 demap_tlb(env->dtlb, addr, "dmmu", env);
3256 return;
3257 case 0x49: // Interrupt data receive
3258 // XXX
3259 return;
3260 case 0x46: // D-cache data
3261 case 0x47: // D-cache tag access
3262 case 0x4b: // E-cache error enable
3263 case 0x4c: // E-cache asynchronous fault status
3264 case 0x4d: // E-cache asynchronous fault address
3265 case 0x4e: // E-cache tag data
3266 case 0x66: // I-cache instruction access
3267 case 0x67: // I-cache tag access
3268 case 0x6e: // I-cache predecode
3269 case 0x6f: // I-cache LRU etc.
3270 case 0x76: // E-cache tag
3271 case 0x7e: // E-cache tag
3272 return;
3273 case 0x51: // I-MMU 8k TSB pointer, RO
3274 case 0x52: // I-MMU 64k TSB pointer, RO
3275 case 0x56: // I-MMU tag read, RO
3276 case 0x59: // D-MMU 8k TSB pointer, RO
3277 case 0x5a: // D-MMU 64k TSB pointer, RO
3278 case 0x5b: // D-MMU data pointer, RO
3279 case 0x5e: // D-MMU tag read, RO
3280 case 0x48: // Interrupt dispatch, RO
3281 case 0x7f: // Incoming interrupt vector, RO
3282 case 0x82: // Primary no-fault, RO
3283 case 0x83: // Secondary no-fault, RO
3284 case 0x8a: // Primary no-fault LE, RO
3285 case 0x8b: // Secondary no-fault LE, RO
3286 default:
3287 do_unassigned_access(addr, 1, 0, 1, size);
3288 return;
3289 }
3290 }
3291 #endif /* CONFIG_USER_ONLY */
3292
3293 void helper_ldda_asi(target_ulong addr, int asi, int rd)
3294 {
3295 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3296 || (cpu_has_hypervisor(env)
3297 && asi >= 0x30 && asi < 0x80
3298 && !(env->hpstate & HS_PRIV)))
3299 raise_exception(TT_PRIV_ACT);
3300
3301 addr = asi_address_mask(env, asi, addr);
3302
3303 switch (asi) {
3304 #if !defined(CONFIG_USER_ONLY)
3305 case 0x24: // Nucleus quad LDD 128 bit atomic
3306 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3307 helper_check_align(addr, 0xf);
3308 if (rd == 0) {
3309 env->gregs[1] = ldq_nucleus(addr + 8);
3310 if (asi == 0x2c)
3311 bswap64s(&env->gregs[1]);
3312 } else if (rd < 8) {
3313 env->gregs[rd] = ldq_nucleus(addr);
3314 env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3315 if (asi == 0x2c) {
3316 bswap64s(&env->gregs[rd]);
3317 bswap64s(&env->gregs[rd + 1]);
3318 }
3319 } else {
3320 env->regwptr[rd] = ldq_nucleus(addr);
3321 env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3322 if (asi == 0x2c) {
3323 bswap64s(&env->regwptr[rd]);
3324 bswap64s(&env->regwptr[rd + 1]);
3325 }
3326 }
3327 break;
3328 #endif
3329 default:
3330 helper_check_align(addr, 0x3);
3331 if (rd == 0)
3332 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3333 else if (rd < 8) {
3334 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3335 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3336 } else {
3337 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3338 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3339 }
3340 break;
3341 }
3342 }
3343
3344 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3345 {
3346 unsigned int i;
3347 CPU_DoubleU u;
3348
3349 helper_check_align(addr, 3);
3350 addr = asi_address_mask(env, asi, addr);
3351
3352 switch (asi) {
3353 case 0xf0: /* UA2007/JPS1 Block load primary */
3354 case 0xf1: /* UA2007/JPS1 Block load secondary */
3355 case 0xf8: /* UA2007/JPS1 Block load primary LE */
3356 case 0xf9: /* UA2007/JPS1 Block load secondary LE */
3357 if (rd & 7) {
3358 raise_exception(TT_ILL_INSN);
3359 return;
3360 }
3361 helper_check_align(addr, 0x3f);
3362 for (i = 0; i < 16; i++) {
3363 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3364 0);
3365 addr += 4;
3366 }
3367
3368 return;
3369 case 0x16: /* UA2007 Block load primary, user privilege */
3370 case 0x17: /* UA2007 Block load secondary, user privilege */
3371 case 0x1e: /* UA2007 Block load primary LE, user privilege */
3372 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3373 case 0x70: /* JPS1 Block load primary, user privilege */
3374 case 0x71: /* JPS1 Block load secondary, user privilege */
3375 case 0x78: /* JPS1 Block load primary LE, user privilege */
3376 case 0x79: /* JPS1 Block load secondary LE, user privilege */
3377 if (rd & 7) {
3378 raise_exception(TT_ILL_INSN);
3379 return;
3380 }
3381 helper_check_align(addr, 0x3f);
3382 for (i = 0; i < 16; i++) {
3383 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x19, 4,
3384 0);
3385 addr += 4;
3386 }
3387
3388 return;
3389 default:
3390 break;
3391 }
3392
3393 switch(size) {
3394 default:
3395 case 4:
3396 *((uint32_t *)&env->fpr[rd]) = helper_ld_asi(addr, asi, size, 0);
3397 break;
3398 case 8:
3399 u.ll = helper_ld_asi(addr, asi, size, 0);
3400 *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3401 *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3402 break;
3403 case 16:
3404 u.ll = helper_ld_asi(addr, asi, 8, 0);
3405 *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3406 *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3407 u.ll = helper_ld_asi(addr + 8, asi, 8, 0);
3408 *((uint32_t *)&env->fpr[rd++]) = u.l.upper;
3409 *((uint32_t *)&env->fpr[rd++]) = u.l.lower;
3410 break;
3411 }
3412 }
3413
3414 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3415 {
3416 unsigned int i;
3417 target_ulong val = 0;
3418 CPU_DoubleU u;
3419
3420 helper_check_align(addr, 3);
3421 addr = asi_address_mask(env, asi, addr);
3422
3423 switch (asi) {
3424 case 0xe0: /* UA2007/JPS1 Block commit store primary (cache flush) */
3425 case 0xe1: /* UA2007/JPS1 Block commit store secondary (cache flush) */
3426 case 0xf0: /* UA2007/JPS1 Block store primary */
3427 case 0xf1: /* UA2007/JPS1 Block store secondary */
3428 case 0xf8: /* UA2007/JPS1 Block store primary LE */
3429 case 0xf9: /* UA2007/JPS1 Block store secondary LE */
3430 if (rd & 7) {
3431 raise_exception(TT_ILL_INSN);
3432 return;
3433 }
3434 helper_check_align(addr, 0x3f);
3435 for (i = 0; i < 16; i++) {
3436 val = *(uint32_t *)&env->fpr[rd++];
3437 helper_st_asi(addr, val, asi & 0x8f, 4);
3438 addr += 4;
3439 }
3440
3441 return;
3442 case 0x16: /* UA2007 Block load primary, user privilege */
3443 case 0x17: /* UA2007 Block load secondary, user privilege */
3444 case 0x1e: /* UA2007 Block load primary LE, user privilege */
3445 case 0x1f: /* UA2007 Block load secondary LE, user privilege */
3446 case 0x70: /* JPS1 Block store primary, user privilege */
3447 case 0x71: /* JPS1 Block store secondary, user privilege */
3448 case 0x78: /* JPS1 Block load primary LE, user privilege */
3449 case 0x79: /* JPS1 Block load secondary LE, user privilege */
3450 if (rd & 7) {
3451 raise_exception(TT_ILL_INSN);
3452 return;
3453 }
3454 helper_check_align(addr, 0x3f);
3455 for (i = 0; i < 16; i++) {
3456 val = *(uint32_t *)&env->fpr[rd++];
3457 helper_st_asi(addr, val, asi & 0x19, 4);
3458 addr += 4;
3459 }
3460
3461 return;
3462 default:
3463 break;
3464 }
3465
3466 switch(size) {
3467 default:
3468 case 4:
3469 helper_st_asi(addr, *(uint32_t *)&env->fpr[rd], asi, size);
3470 break;
3471 case 8:
3472 u.l.upper = *(uint32_t *)&env->fpr[rd++];
3473 u.l.lower = *(uint32_t *)&env->fpr[rd++];
3474 helper_st_asi(addr, u.ll, asi, size);
3475 break;
3476 case 16:
3477 u.l.upper = *(uint32_t *)&env->fpr[rd++];
3478 u.l.lower = *(uint32_t *)&env->fpr[rd++];
3479 helper_st_asi(addr, u.ll, asi, 8);
3480 u.l.upper = *(uint32_t *)&env->fpr[rd++];
3481 u.l.lower = *(uint32_t *)&env->fpr[rd++];
3482 helper_st_asi(addr + 8, u.ll, asi, 8);
3483 break;
3484 }
3485 }
3486
3487 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3488 target_ulong val2, uint32_t asi)
3489 {
3490 target_ulong ret;
3491
3492 val2 &= 0xffffffffUL;
3493 ret = helper_ld_asi(addr, asi, 4, 0);
3494 ret &= 0xffffffffUL;
3495 if (val2 == ret)
3496 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3497 return ret;
3498 }
3499
3500 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3501 target_ulong val2, uint32_t asi)
3502 {
3503 target_ulong ret;
3504
3505 ret = helper_ld_asi(addr, asi, 8, 0);
3506 if (val2 == ret)
3507 helper_st_asi(addr, val1, asi, 8);
3508 return ret;
3509 }
3510 #endif /* TARGET_SPARC64 */
3511
3512 #ifndef TARGET_SPARC64
3513 void helper_rett(void)
3514 {
3515 unsigned int cwp;
3516
3517 if (env->psret == 1)
3518 raise_exception(TT_ILL_INSN);
3519
3520 env->psret = 1;
3521 cwp = cwp_inc(env->cwp + 1) ;
3522 if (env->wim & (1 << cwp)) {
3523 raise_exception(TT_WIN_UNF);
3524 }
3525 set_cwp(cwp);
3526 env->psrs = env->psrps;
3527 }
3528 #endif
3529
3530 static target_ulong helper_udiv_common(target_ulong a, target_ulong b, int cc)
3531 {
3532 int overflow = 0;
3533 uint64_t x0;
3534 uint32_t x1;
3535
3536 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3537 x1 = (b & 0xffffffff);
3538
3539 if (x1 == 0) {
3540 raise_exception(TT_DIV_ZERO);
3541 }
3542
3543 x0 = x0 / x1;
3544 if (x0 > 0xffffffff) {
3545 x0 = 0xffffffff;
3546 overflow = 1;
3547 }
3548
3549 if (cc) {
3550 env->cc_dst = x0;
3551 env->cc_src2 = overflow;
3552 env->cc_op = CC_OP_DIV;
3553 }
3554 return x0;
3555 }
3556
3557 target_ulong helper_udiv(target_ulong a, target_ulong b)
3558 {
3559 return helper_udiv_common(a, b, 0);
3560 }
3561
3562 target_ulong helper_udiv_cc(target_ulong a, target_ulong b)
3563 {
3564 return helper_udiv_common(a, b, 1);
3565 }
3566
3567 static target_ulong helper_sdiv_common(target_ulong a, target_ulong b, int cc)
3568 {
3569 int overflow = 0;
3570 int64_t x0;
3571 int32_t x1;
3572
3573 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3574 x1 = (b & 0xffffffff);
3575
3576 if (x1 == 0) {
3577 raise_exception(TT_DIV_ZERO);
3578 }
3579
3580 x0 = x0 / x1;
3581 if ((int32_t) x0 != x0) {
3582 x0 = x0 < 0 ? 0x80000000: 0x7fffffff;
3583 overflow = 1;
3584 }
3585
3586 if (cc) {
3587 env->cc_dst = x0;
3588 env->cc_src2 = overflow;
3589 env->cc_op = CC_OP_DIV;
3590 }
3591 return x0;
3592 }
3593
3594 target_ulong helper_sdiv(target_ulong a, target_ulong b)
3595 {
3596 return helper_sdiv_common(a, b, 0);
3597 }
3598
3599 target_ulong helper_sdiv_cc(target_ulong a, target_ulong b)
3600 {
3601 return helper_sdiv_common(a, b, 1);
3602 }
3603
3604 void helper_stdf(target_ulong addr, int mem_idx)
3605 {
3606 helper_check_align(addr, 7);
3607 #if !defined(CONFIG_USER_ONLY)
3608 switch (mem_idx) {
3609 case MMU_USER_IDX:
3610 stfq_user(addr, DT0);
3611 break;
3612 case MMU_KERNEL_IDX:
3613 stfq_kernel(addr, DT0);
3614 break;
3615 #ifdef TARGET_SPARC64
3616 case MMU_HYPV_IDX:
3617 stfq_hypv(addr, DT0);
3618 break;
3619 #endif
3620 default:
3621 DPRINTF_MMU("helper_stdf: need to check MMU idx %d\n", mem_idx);
3622 break;
3623 }
3624 #else
3625 stfq_raw(address_mask(env, addr), DT0);
3626 #endif
3627 }
3628
3629 void helper_lddf(target_ulong addr, int mem_idx)
3630 {
3631 helper_check_align(addr, 7);
3632 #if !defined(CONFIG_USER_ONLY)
3633 switch (mem_idx) {
3634 case MMU_USER_IDX:
3635 DT0 = ldfq_user(addr);
3636 break;
3637 case MMU_KERNEL_IDX:
3638 DT0 = ldfq_kernel(addr);
3639 break;
3640 #ifdef TARGET_SPARC64
3641 case MMU_HYPV_IDX:
3642 DT0 = ldfq_hypv(addr);
3643 break;
3644 #endif
3645 default:
3646 DPRINTF_MMU("helper_lddf: need to check MMU idx %d\n", mem_idx);
3647 break;
3648 }
3649 #else
3650 DT0 = ldfq_raw(address_mask(env, addr));
3651 #endif
3652 }
3653
3654 void helper_ldqf(target_ulong addr, int mem_idx)
3655 {
3656 // XXX add 128 bit load
3657 CPU_QuadU u;
3658
3659 helper_check_align(addr, 7);
3660 #if !defined(CONFIG_USER_ONLY)
3661 switch (mem_idx) {
3662 case MMU_USER_IDX:
3663 u.ll.upper = ldq_user(addr);
3664 u.ll.lower = ldq_user(addr + 8);
3665 QT0 = u.q;
3666 break;
3667 case MMU_KERNEL_IDX:
3668 u.ll.upper = ldq_kernel(addr);
3669 u.ll.lower = ldq_kernel(addr + 8);
3670 QT0 = u.q;
3671 break;
3672 #ifdef TARGET_SPARC64
3673 case MMU_HYPV_IDX:
3674 u.ll.upper = ldq_hypv(addr);
3675 u.ll.lower = ldq_hypv(addr + 8);
3676 QT0 = u.q;
3677 break;
3678 #endif
3679 default:
3680 DPRINTF_MMU("helper_ldqf: need to check MMU idx %d\n", mem_idx);
3681 break;
3682 }
3683 #else
3684 u.ll.upper = ldq_raw(address_mask(env, addr));
3685 u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3686 QT0 = u.q;
3687 #endif
3688 }
3689
3690 void helper_stqf(target_ulong addr, int mem_idx)
3691 {
3692 // XXX add 128 bit store
3693 CPU_QuadU u;
3694
3695 helper_check_align(addr, 7);
3696 #if !defined(CONFIG_USER_ONLY)
3697 switch (mem_idx) {
3698 case MMU_USER_IDX:
3699 u.q = QT0;
3700 stq_user(addr, u.ll.upper);
3701 stq_user(addr + 8, u.ll.lower);
3702 break;
3703 case MMU_KERNEL_IDX:
3704 u.q = QT0;
3705 stq_kernel(addr, u.ll.upper);
3706 stq_kernel(addr + 8, u.ll.lower);
3707 break;
3708 #ifdef TARGET_SPARC64
3709 case MMU_HYPV_IDX:
3710 u.q = QT0;
3711 stq_hypv(addr, u.ll.upper);
3712 stq_hypv(addr + 8, u.ll.lower);
3713 break;
3714 #endif
3715 default:
3716 DPRINTF_MMU("helper_stqf: need to check MMU idx %d\n", mem_idx);
3717 break;
3718 }
3719 #else
3720 u.q = QT0;
3721 stq_raw(address_mask(env, addr), u.ll.upper);
3722 stq_raw(address_mask(env, addr + 8), u.ll.lower);
3723 #endif
3724 }
3725
3726 static inline void set_fsr(void)
3727 {
3728 int rnd_mode;
3729
3730 switch (env->fsr & FSR_RD_MASK) {
3731 case FSR_RD_NEAREST:
3732 rnd_mode = float_round_nearest_even;
3733 break;
3734 default:
3735 case FSR_RD_ZERO:
3736 rnd_mode = float_round_to_zero;
3737 break;
3738 case FSR_RD_POS:
3739 rnd_mode = float_round_up;
3740 break;
3741 case FSR_RD_NEG:
3742 rnd_mode = float_round_down;
3743 break;
3744 }
3745 set_float_rounding_mode(rnd_mode, &env->fp_status);
3746 }
3747
3748 void helper_ldfsr(uint32_t new_fsr)
3749 {
3750 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3751 set_fsr();
3752 }
3753
3754 #ifdef TARGET_SPARC64
3755 void helper_ldxfsr(uint64_t new_fsr)
3756 {
3757 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3758 set_fsr();
3759 }
3760 #endif
3761
3762 void helper_debug(void)
3763 {
3764 env->exception_index = EXCP_DEBUG;
3765 cpu_loop_exit(env);
3766 }
3767
3768 #ifndef TARGET_SPARC64
3769 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3770 handling ? */
3771 void helper_save(void)
3772 {
3773 uint32_t cwp;
3774
3775 cwp = cwp_dec(env->cwp - 1);
3776 if (env->wim & (1 << cwp)) {
3777 raise_exception(TT_WIN_OVF);
3778 }
3779 set_cwp(cwp);
3780 }
3781
3782 void helper_restore(void)
3783 {
3784 uint32_t cwp;
3785
3786 cwp = cwp_inc(env->cwp + 1);
3787 if (env->wim & (1 << cwp)) {
3788 raise_exception(TT_WIN_UNF);
3789 }
3790 set_cwp(cwp);
3791 }
3792
3793 void helper_wrpsr(target_ulong new_psr)
3794 {
3795 if ((new_psr & PSR_CWP) >= env->nwindows) {
3796 raise_exception(TT_ILL_INSN);
3797 } else {
3798 cpu_put_psr(env, new_psr);
3799 }
3800 }
3801
3802 target_ulong helper_rdpsr(void)
3803 {
3804 return get_psr();
3805 }
3806
3807 #else
3808 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3809 handling ? */
3810 void helper_save(void)
3811 {
3812 uint32_t cwp;
3813
3814 cwp = cwp_dec(env->cwp - 1);
3815 if (env->cansave == 0) {
3816 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3817 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3818 ((env->wstate & 0x7) << 2)));
3819 } else {
3820 if (env->cleanwin - env->canrestore == 0) {
3821 // XXX Clean windows without trap
3822 raise_exception(TT_CLRWIN);
3823 } else {
3824 env->cansave--;
3825 env->canrestore++;
3826 set_cwp(cwp);
3827 }
3828 }
3829 }
3830
3831 void helper_restore(void)
3832 {
3833 uint32_t cwp;
3834
3835 cwp = cwp_inc(env->cwp + 1);
3836 if (env->canrestore == 0) {
3837 raise_exception(TT_FILL | (env->otherwin != 0 ?
3838 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3839 ((env->wstate & 0x7) << 2)));
3840 } else {
3841 env->cansave++;
3842 env->canrestore--;
3843 set_cwp(cwp);
3844 }
3845 }
3846
3847 void helper_flushw(void)
3848 {
3849 if (env->cansave != env->nwindows - 2) {
3850 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3851 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3852 ((env->wstate & 0x7) << 2)));
3853 }
3854 }
3855
3856 void helper_saved(void)
3857 {
3858 env->cansave++;
3859 if (env->otherwin == 0)
3860 env->canrestore--;
3861 else
3862 env->otherwin--;
3863 }
3864
3865 void helper_restored(void)
3866 {
3867 env->canrestore++;
3868 if (env->cleanwin < env->nwindows - 1)
3869 env->cleanwin++;
3870 if (env->otherwin == 0)
3871 env->cansave--;
3872 else
3873 env->otherwin--;
3874 }
3875
3876 static target_ulong get_ccr(void)
3877 {
3878 target_ulong psr;
3879
3880 psr = get_psr();
3881
3882 return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3883 }
3884
3885 target_ulong cpu_get_ccr(CPUState *env1)
3886 {
3887 CPUState *saved_env;
3888 target_ulong ret;
3889
3890 saved_env = env;
3891 env = env1;
3892 ret = get_ccr();
3893 env = saved_env;
3894 return ret;
3895 }
3896
3897 static void put_ccr(target_ulong val)
3898 {
3899 target_ulong tmp = val;
3900
3901 env->xcc = (tmp >> 4) << 20;
3902 env->psr = (tmp & 0xf) << 20;
3903 CC_OP = CC_OP_FLAGS;
3904 }
3905
3906 void cpu_put_ccr(CPUState *env1, target_ulong val)
3907 {
3908 CPUState *saved_env;
3909
3910 saved_env = env;
3911 env = env1;
3912 put_ccr(val);
3913 env = saved_env;
3914 }
3915
3916 static target_ulong get_cwp64(void)
3917 {
3918 return env->nwindows - 1 - env->cwp;
3919 }
3920
3921 target_ulong cpu_get_cwp64(CPUState *env1)
3922 {
3923 CPUState *saved_env;
3924 target_ulong ret;
3925
3926 saved_env = env;
3927 env = env1;
3928 ret = get_cwp64();
3929 env = saved_env;
3930 return ret;
3931 }
3932
3933 static void put_cwp64(int cwp)
3934 {
3935 if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3936 cwp %= env->nwindows;
3937 }
3938 set_cwp(env->nwindows - 1 - cwp);
3939 }
3940
3941 void cpu_put_cwp64(CPUState *env1, int cwp)
3942 {
3943 CPUState *saved_env;
3944
3945 saved_env = env;
3946 env = env1;
3947 put_cwp64(cwp);
3948 env = saved_env;
3949 }
3950
3951 target_ulong helper_rdccr(void)
3952 {
3953 return get_ccr();
3954 }
3955
3956 void helper_wrccr(target_ulong new_ccr)
3957 {
3958 put_ccr(new_ccr);
3959 }
3960
3961 // CWP handling is reversed in V9, but we still use the V8 register
3962 // order.
3963 target_ulong helper_rdcwp(void)
3964 {
3965 return get_cwp64();
3966 }
3967
3968 void helper_wrcwp(target_ulong new_cwp)
3969 {
3970 put_cwp64(new_cwp);
3971 }
3972
3973 // This function uses non-native bit order
3974 #define GET_FIELD(X, FROM, TO) \
3975 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3976
3977 // This function uses the order in the manuals, i.e. bit 0 is 2^0
3978 #define GET_FIELD_SP(X, FROM, TO) \
3979 GET_FIELD(X, 63 - (TO), 63 - (FROM))
3980
3981 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3982 {
3983 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3984 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3985 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3986 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3987 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3988 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3989 (((pixel_addr >> 55) & 1) << 4) |
3990 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3991 GET_FIELD_SP(pixel_addr, 11, 12);
3992 }
3993
3994 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3995 {
3996 uint64_t tmp;
3997
3998 tmp = addr + offset;
3999 env->gsr &= ~7ULL;
4000 env->gsr |= tmp & 7ULL;
4001 return tmp & ~7ULL;
4002 }
4003
4004 target_ulong helper_popc(target_ulong val)
4005 {
4006 return ctpop64(val);
4007 }
4008
4009 static inline uint64_t *get_gregset(uint32_t pstate)
4010 {
4011 switch (pstate) {
4012 default:
4013 DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
4014 pstate,
4015 (pstate & PS_IG) ? " IG" : "",
4016 (pstate & PS_MG) ? " MG" : "",
4017 (pstate & PS_AG) ? " AG" : "");
4018 /* pass through to normal set of global registers */
4019 case 0:
4020 return env->bgregs;
4021 case PS_AG:
4022 return env->agregs;
4023 case PS_MG:
4024 return env->mgregs;
4025 case PS_IG:
4026 return env->igregs;
4027 }
4028 }
4029
4030 static inline void change_pstate(uint32_t new_pstate)
4031 {
4032 uint32_t pstate_regs, new_pstate_regs;
4033 uint64_t *src, *dst;
4034
4035 if (env->def->features & CPU_FEATURE_GL) {
4036 // PS_AG is not implemented in this case
4037 new_pstate &= ~PS_AG;
4038 }
4039
4040 pstate_regs = env->pstate & 0xc01;
4041 new_pstate_regs = new_pstate & 0xc01;
4042
4043 if (new_pstate_regs != pstate_regs) {
4044 DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
4045 pstate_regs, new_pstate_regs);
4046 // Switch global register bank
4047 src = get_gregset(new_pstate_regs);
4048 dst = get_gregset(pstate_regs);
4049 memcpy32(dst, env->gregs);
4050 memcpy32(env->gregs, src);
4051 }
4052 else {
4053 DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
4054 new_pstate_regs);
4055 }
4056 env->pstate = new_pstate;
4057 }
4058
4059 void helper_wrpstate(target_ulong new_state)
4060 {
4061 change_pstate(new_state & 0xf3f);
4062
4063 #if !defined(CONFIG_USER_ONLY)
4064 if (cpu_interrupts_enabled(env)) {
4065 cpu_check_irqs(env);
4066 }
4067 #endif
4068 }
4069
4070 void cpu_change_pstate(CPUState *env1, uint32_t new_pstate)
4071 {
4072 CPUState *saved_env;
4073
4074 saved_env = env;
4075 env = env1;
4076 change_pstate(new_pstate);
4077 env = saved_env;
4078 }
4079
4080 void helper_wrpil(target_ulong new_pil)
4081 {
4082 #if !defined(CONFIG_USER_ONLY)
4083 DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
4084 env->psrpil, (uint32_t)new_pil);
4085
4086 env->psrpil = new_pil;
4087
4088 if (cpu_interrupts_enabled(env)) {
4089 cpu_check_irqs(env);
4090 }
4091 #endif
4092 }
4093
4094 void helper_done(void)
4095 {
4096 trap_state* tsptr = cpu_tsptr(env);
4097
4098 env->pc = tsptr->tnpc;
4099 env->npc = tsptr->tnpc + 4;
4100 put_ccr(tsptr->tstate >> 32);
4101 env->asi = (tsptr->tstate >> 24) & 0xff;
4102 change_pstate((tsptr->tstate >> 8) & 0xf3f);
4103 put_cwp64(tsptr->tstate & 0xff);
4104 env->tl--;
4105
4106 DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
4107
4108 #if !defined(CONFIG_USER_ONLY)
4109 if (cpu_interrupts_enabled(env)) {
4110 cpu_check_irqs(env);
4111 }
4112 #endif
4113 }
4114
4115 void helper_retry(void)
4116 {
4117 trap_state* tsptr = cpu_tsptr(env);
4118
4119 env->pc = tsptr->tpc;
4120 env->npc = tsptr->tnpc;
4121 put_ccr(tsptr->tstate >> 32);
4122 env->asi = (tsptr->tstate >> 24) & 0xff;
4123 change_pstate((tsptr->tstate >> 8) & 0xf3f);
4124 put_cwp64(tsptr->tstate & 0xff);
4125 env->tl--;
4126
4127 DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
4128
4129 #if !defined(CONFIG_USER_ONLY)
4130 if (cpu_interrupts_enabled(env)) {
4131 cpu_check_irqs(env);
4132 }
4133 #endif
4134 }
4135
4136 static void do_modify_softint(const char* operation, uint32_t value)
4137 {
4138 if (env->softint != value) {
4139 env->softint = value;
4140 DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
4141 #if !defined(CONFIG_USER_ONLY)
4142 if (cpu_interrupts_enabled(env)) {
4143 cpu_check_irqs(env);
4144 }
4145 #endif
4146 }
4147 }
4148
4149 void helper_set_softint(uint64_t value)
4150 {
4151 do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
4152 }
4153
4154 void helper_clear_softint(uint64_t value)
4155 {
4156 do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
4157 }
4158
4159 void helper_write_softint(uint64_t value)
4160 {
4161 do_modify_softint("helper_write_softint", (uint32_t)value);
4162 }
4163 #endif
4164
4165 #ifdef TARGET_SPARC64
4166 trap_state* cpu_tsptr(CPUState* env)
4167 {
4168 return &env->ts[env->tl & MAXTL_MASK];
4169 }
4170 #endif
4171
4172 #if !defined(CONFIG_USER_ONLY)
4173
4174 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4175 void *retaddr);
4176
4177 #define MMUSUFFIX _mmu
4178 #define ALIGNED_ONLY
4179
4180 #define SHIFT 0
4181 #include "softmmu_template.h"
4182
4183 #define SHIFT 1
4184 #include "softmmu_template.h"
4185
4186 #define SHIFT 2
4187 #include "softmmu_template.h"
4188
4189 #define SHIFT 3
4190 #include "softmmu_template.h"
4191
4192 /* XXX: make it generic ? */
4193 static void cpu_restore_state2(void *retaddr)
4194 {
4195 TranslationBlock *tb;
4196 unsigned long pc;
4197
4198 if (retaddr) {
4199 /* now we have a real cpu fault */
4200 pc = (unsigned long)retaddr;
4201 tb = tb_find_pc(pc);
4202 if (tb) {
4203 /* the PC is inside the translated code. It means that we have
4204 a virtual CPU fault */
4205 cpu_restore_state(tb, env, pc);
4206 }
4207 }
4208 }
4209
4210 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4211 void *retaddr)
4212 {
4213 #ifdef DEBUG_UNALIGNED
4214 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4215 "\n", addr, env->pc);
4216 #endif
4217 cpu_restore_state2(retaddr);
4218 raise_exception(TT_UNALIGNED);
4219 }
4220
4221 /* try to fill the TLB and return an exception if error. If retaddr is
4222 NULL, it means that the function was called in C code (i.e. not
4223 from generated code or from helper.c) */
4224 /* XXX: fix it to restore all registers */
4225 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4226 {
4227 int ret;
4228 CPUState *saved_env;
4229
4230 /* XXX: hack to restore env in all cases, even if not called from
4231 generated code */
4232 saved_env = env;
4233 env = cpu_single_env;
4234
4235 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4236 if (ret) {
4237 cpu_restore_state2(retaddr);
4238 cpu_loop_exit(env);
4239 }
4240 env = saved_env;
4241 }
4242
4243 #endif /* !CONFIG_USER_ONLY */
4244
4245 #ifndef TARGET_SPARC64
4246 #if !defined(CONFIG_USER_ONLY)
4247 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
4248 int is_exec, int is_asi, int size)
4249 {
4250 CPUState *saved_env;
4251 int fault_type;
4252
4253 /* XXX: hack to restore env in all cases, even if not called from
4254 generated code */
4255 saved_env = env;
4256 env = cpu_single_env;
4257 #ifdef DEBUG_UNASSIGNED
4258 if (is_asi)
4259 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4260 " asi 0x%02x from " TARGET_FMT_lx "\n",
4261 is_exec ? "exec" : is_write ? "write" : "read", size,
4262 size == 1 ? "" : "s", addr, is_asi, env->pc);
4263 else
4264 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4265 " from " TARGET_FMT_lx "\n",
4266 is_exec ? "exec" : is_write ? "write" : "read", size,
4267 size == 1 ? "" : "s", addr, env->pc);
4268 #endif
4269 /* Don't overwrite translation and access faults */
4270 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4271 if ((fault_type > 4) || (fault_type == 0)) {
4272 env->mmuregs[3] = 0; /* Fault status register */
4273 if (is_asi)
4274 env->mmuregs[3] |= 1 << 16;
4275 if (env->psrs)
4276 env->mmuregs[3] |= 1 << 5;
4277 if (is_exec)
4278 env->mmuregs[3] |= 1 << 6;
4279 if (is_write)
4280 env->mmuregs[3] |= 1 << 7;
4281 env->mmuregs[3] |= (5 << 2) | 2;
4282 /* SuperSPARC will never place instruction fault addresses in the FAR */
4283 if (!is_exec) {
4284 env->mmuregs[4] = addr; /* Fault address register */
4285 }
4286 }
4287 /* overflow (same type fault was not read before another fault) */
4288 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4289 env->mmuregs[3] |= 1;
4290 }
4291
4292 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4293 if (is_exec)
4294 raise_exception(TT_CODE_ACCESS);
4295 else
4296 raise_exception(TT_DATA_ACCESS);
4297 }
4298
4299 /* flush neverland mappings created during no-fault mode,
4300 so the sequential MMU faults report proper fault types */
4301 if (env->mmuregs[0] & MMU_NF) {
4302 tlb_flush(env, 1);
4303 }
4304
4305 env = saved_env;
4306 }
4307 #endif
4308 #else
4309 #if defined(CONFIG_USER_ONLY)
4310 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4311 int is_asi, int size)
4312 #else
4313 static void do_unassigned_access(target_phys_addr_t addr, int is_write,
4314 int is_exec, int is_asi, int size)
4315 #endif
4316 {
4317 CPUState *saved_env;
4318
4319 /* XXX: hack to restore env in all cases, even if not called from
4320 generated code */
4321 saved_env = env;
4322 env = cpu_single_env;
4323
4324 #ifdef DEBUG_UNASSIGNED
4325 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4326 "\n", addr, env->pc);
4327 #endif
4328
4329 if (is_exec)
4330 raise_exception(TT_CODE_ACCESS);
4331 else
4332 raise_exception(TT_DATA_ACCESS);
4333
4334 env = saved_env;
4335 }
4336 #endif
4337
4338
4339 #ifdef TARGET_SPARC64
4340 void helper_tick_set_count(void *opaque, uint64_t count)
4341 {
4342 #if !defined(CONFIG_USER_ONLY)
4343 cpu_tick_set_count(opaque, count);
4344 #endif
4345 }
4346
4347 uint64_t helper_tick_get_count(void *opaque)
4348 {
4349 #if !defined(CONFIG_USER_ONLY)
4350 return cpu_tick_get_count(opaque);
4351 #else
4352 return 0;
4353 #endif
4354 }
4355
4356 void helper_tick_set_limit(void *opaque, uint64_t limit)
4357 {
4358 #if !defined(CONFIG_USER_ONLY)
4359 cpu_tick_set_limit(opaque, limit);
4360 #endif
4361 }
4362 #endif
4363
4364 #if !defined(CONFIG_USER_ONLY)
4365 void cpu_unassigned_access(CPUState *env1, target_phys_addr_t addr,
4366 int is_write, int is_exec, int is_asi, int size)
4367 {
4368 env = env1;
4369 do_unassigned_access(addr, is_write, is_exec, is_asi, size);
4370 }
4371 #endif