]> git.proxmox.com Git - qemu.git/blob - target-sparc/op_helper.c
sparc32 SuperSPARC MMU Breakpoint Action register (SS-20 OBP fix)
[qemu.git] / target-sparc / op_helper.c
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4
5 //#define DEBUG_MMU
6 //#define DEBUG_MXCC
7 //#define DEBUG_UNALIGNED
8 //#define DEBUG_UNASSIGNED
9 //#define DEBUG_ASI
10 //#define DEBUG_PCALL
11 //#define DEBUG_PSTATE
12
13 #ifdef DEBUG_MMU
14 #define DPRINTF_MMU(fmt, ...) \
15 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
16 #else
17 #define DPRINTF_MMU(fmt, ...) do {} while (0)
18 #endif
19
20 #ifdef DEBUG_MXCC
21 #define DPRINTF_MXCC(fmt, ...) \
22 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
23 #else
24 #define DPRINTF_MXCC(fmt, ...) do {} while (0)
25 #endif
26
27 #ifdef DEBUG_ASI
28 #define DPRINTF_ASI(fmt, ...) \
29 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
30 #endif
31
32 #ifdef DEBUG_PSTATE
33 #define DPRINTF_PSTATE(fmt, ...) \
34 do { printf("PSTATE: " fmt , ## __VA_ARGS__); } while (0)
35 #else
36 #define DPRINTF_PSTATE(fmt, ...) do {} while (0)
37 #endif
38
39 #ifdef TARGET_SPARC64
40 #ifndef TARGET_ABI32
41 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
42 #else
43 #define AM_CHECK(env1) (1)
44 #endif
45 #endif
46
47 #define DT0 (env->dt0)
48 #define DT1 (env->dt1)
49 #define QT0 (env->qt0)
50 #define QT1 (env->qt1)
51
52 #if defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
53 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
54 int is_asi, int size);
55 #endif
56
57 #if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
58 // Calculates TSB pointer value for fault page size 8k or 64k
59 static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
60 uint64_t tag_access_register,
61 int page_size)
62 {
63 uint64_t tsb_base = tsb_register & ~0x1fffULL;
64 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
65 int tsb_size = tsb_register & 0xf;
66
67 // discard lower 13 bits which hold tag access context
68 uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
69
70 // now reorder bits
71 uint64_t tsb_base_mask = ~0x1fffULL;
72 uint64_t va = tag_access_va;
73
74 // move va bits to correct position
75 if (page_size == 8*1024) {
76 va >>= 9;
77 } else if (page_size == 64*1024) {
78 va >>= 12;
79 }
80
81 if (tsb_size) {
82 tsb_base_mask <<= tsb_size;
83 }
84
85 // calculate tsb_base mask and adjust va if split is in use
86 if (tsb_split) {
87 if (page_size == 8*1024) {
88 va &= ~(1ULL << (13 + tsb_size));
89 } else if (page_size == 64*1024) {
90 va |= (1ULL << (13 + tsb_size));
91 }
92 tsb_base_mask <<= 1;
93 }
94
95 return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
96 }
97
98 // Calculates tag target register value by reordering bits
99 // in tag access register
100 static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
101 {
102 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
103 }
104
105 static void replace_tlb_entry(SparcTLBEntry *tlb,
106 uint64_t tlb_tag, uint64_t tlb_tte,
107 CPUState *env1)
108 {
109 target_ulong mask, size, va, offset;
110
111 // flush page range if translation is valid
112 if (TTE_IS_VALID(tlb->tte)) {
113
114 mask = 0xffffffffffffe000ULL;
115 mask <<= 3 * ((tlb->tte >> 61) & 3);
116 size = ~mask + 1;
117
118 va = tlb->tag & mask;
119
120 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
121 tlb_flush_page(env1, va + offset);
122 }
123 }
124
125 tlb->tag = tlb_tag;
126 tlb->tte = tlb_tte;
127 }
128
129 static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
130 const char* strmmu, CPUState *env1)
131 {
132 unsigned int i;
133 target_ulong mask;
134 uint64_t context;
135
136 int is_demap_context = (demap_addr >> 6) & 1;
137
138 // demap context
139 switch ((demap_addr >> 4) & 3) {
140 case 0: // primary
141 context = env1->dmmu.mmu_primary_context;
142 break;
143 case 1: // secondary
144 context = env1->dmmu.mmu_secondary_context;
145 break;
146 case 2: // nucleus
147 context = 0;
148 break;
149 case 3: // reserved
150 default:
151 return;
152 }
153
154 for (i = 0; i < 64; i++) {
155 if (TTE_IS_VALID(tlb[i].tte)) {
156
157 if (is_demap_context) {
158 // will remove non-global entries matching context value
159 if (TTE_IS_GLOBAL(tlb[i].tte) ||
160 !tlb_compare_context(&tlb[i], context)) {
161 continue;
162 }
163 } else {
164 // demap page
165 // will remove any entry matching VA
166 mask = 0xffffffffffffe000ULL;
167 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
168
169 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
170 continue;
171 }
172
173 // entry should be global or matching context value
174 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
175 !tlb_compare_context(&tlb[i], context)) {
176 continue;
177 }
178 }
179
180 replace_tlb_entry(&tlb[i], 0, 0, env1);
181 #ifdef DEBUG_MMU
182 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
183 dump_mmu(env1);
184 #endif
185 }
186 }
187 }
188
189 static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
190 uint64_t tlb_tag, uint64_t tlb_tte,
191 const char* strmmu, CPUState *env1)
192 {
193 unsigned int i, replace_used;
194
195 // Try replacing invalid entry
196 for (i = 0; i < 64; i++) {
197 if (!TTE_IS_VALID(tlb[i].tte)) {
198 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
199 #ifdef DEBUG_MMU
200 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
201 dump_mmu(env1);
202 #endif
203 return;
204 }
205 }
206
207 // All entries are valid, try replacing unlocked entry
208
209 for (replace_used = 0; replace_used < 2; ++replace_used) {
210
211 // Used entries are not replaced on first pass
212
213 for (i = 0; i < 64; i++) {
214 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
215
216 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
217 #ifdef DEBUG_MMU
218 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
219 strmmu, (replace_used?"used":"unused"), i);
220 dump_mmu(env1);
221 #endif
222 return;
223 }
224 }
225
226 // Now reset used bit and search for unused entries again
227
228 for (i = 0; i < 64; i++) {
229 TTE_SET_UNUSED(tlb[i].tte);
230 }
231 }
232
233 #ifdef DEBUG_MMU
234 DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
235 #endif
236 // error state?
237 }
238
239 #endif
240
241 static inline target_ulong address_mask(CPUState *env1, target_ulong addr)
242 {
243 #ifdef TARGET_SPARC64
244 if (AM_CHECK(env1))
245 addr &= 0xffffffffULL;
246 #endif
247 return addr;
248 }
249
250 static void raise_exception(int tt)
251 {
252 env->exception_index = tt;
253 cpu_loop_exit();
254 }
255
256 void HELPER(raise_exception)(int tt)
257 {
258 raise_exception(tt);
259 }
260
261 void helper_check_align(target_ulong addr, uint32_t align)
262 {
263 if (addr & align) {
264 #ifdef DEBUG_UNALIGNED
265 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
266 "\n", addr, env->pc);
267 #endif
268 raise_exception(TT_UNALIGNED);
269 }
270 }
271
272 #define F_HELPER(name, p) void helper_f##name##p(void)
273
274 #define F_BINOP(name) \
275 float32 helper_f ## name ## s (float32 src1, float32 src2) \
276 { \
277 return float32_ ## name (src1, src2, &env->fp_status); \
278 } \
279 F_HELPER(name, d) \
280 { \
281 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
282 } \
283 F_HELPER(name, q) \
284 { \
285 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
286 }
287
288 F_BINOP(add);
289 F_BINOP(sub);
290 F_BINOP(mul);
291 F_BINOP(div);
292 #undef F_BINOP
293
294 void helper_fsmuld(float32 src1, float32 src2)
295 {
296 DT0 = float64_mul(float32_to_float64(src1, &env->fp_status),
297 float32_to_float64(src2, &env->fp_status),
298 &env->fp_status);
299 }
300
301 void helper_fdmulq(void)
302 {
303 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
304 float64_to_float128(DT1, &env->fp_status),
305 &env->fp_status);
306 }
307
308 float32 helper_fnegs(float32 src)
309 {
310 return float32_chs(src);
311 }
312
313 #ifdef TARGET_SPARC64
314 F_HELPER(neg, d)
315 {
316 DT0 = float64_chs(DT1);
317 }
318
319 F_HELPER(neg, q)
320 {
321 QT0 = float128_chs(QT1);
322 }
323 #endif
324
325 /* Integer to float conversion. */
326 float32 helper_fitos(int32_t src)
327 {
328 return int32_to_float32(src, &env->fp_status);
329 }
330
331 void helper_fitod(int32_t src)
332 {
333 DT0 = int32_to_float64(src, &env->fp_status);
334 }
335
336 void helper_fitoq(int32_t src)
337 {
338 QT0 = int32_to_float128(src, &env->fp_status);
339 }
340
341 #ifdef TARGET_SPARC64
342 float32 helper_fxtos(void)
343 {
344 return int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
345 }
346
347 F_HELPER(xto, d)
348 {
349 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
350 }
351
352 F_HELPER(xto, q)
353 {
354 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
355 }
356 #endif
357 #undef F_HELPER
358
359 /* floating point conversion */
360 float32 helper_fdtos(void)
361 {
362 return float64_to_float32(DT1, &env->fp_status);
363 }
364
365 void helper_fstod(float32 src)
366 {
367 DT0 = float32_to_float64(src, &env->fp_status);
368 }
369
370 float32 helper_fqtos(void)
371 {
372 return float128_to_float32(QT1, &env->fp_status);
373 }
374
375 void helper_fstoq(float32 src)
376 {
377 QT0 = float32_to_float128(src, &env->fp_status);
378 }
379
380 void helper_fqtod(void)
381 {
382 DT0 = float128_to_float64(QT1, &env->fp_status);
383 }
384
385 void helper_fdtoq(void)
386 {
387 QT0 = float64_to_float128(DT1, &env->fp_status);
388 }
389
390 /* Float to integer conversion. */
391 int32_t helper_fstoi(float32 src)
392 {
393 return float32_to_int32_round_to_zero(src, &env->fp_status);
394 }
395
396 int32_t helper_fdtoi(void)
397 {
398 return float64_to_int32_round_to_zero(DT1, &env->fp_status);
399 }
400
401 int32_t helper_fqtoi(void)
402 {
403 return float128_to_int32_round_to_zero(QT1, &env->fp_status);
404 }
405
406 #ifdef TARGET_SPARC64
407 void helper_fstox(float32 src)
408 {
409 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(src, &env->fp_status);
410 }
411
412 void helper_fdtox(void)
413 {
414 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
415 }
416
417 void helper_fqtox(void)
418 {
419 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
420 }
421
422 void helper_faligndata(void)
423 {
424 uint64_t tmp;
425
426 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
427 /* on many architectures a shift of 64 does nothing */
428 if ((env->gsr & 7) != 0) {
429 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
430 }
431 *((uint64_t *)&DT0) = tmp;
432 }
433
434 #ifdef HOST_WORDS_BIGENDIAN
435 #define VIS_B64(n) b[7 - (n)]
436 #define VIS_W64(n) w[3 - (n)]
437 #define VIS_SW64(n) sw[3 - (n)]
438 #define VIS_L64(n) l[1 - (n)]
439 #define VIS_B32(n) b[3 - (n)]
440 #define VIS_W32(n) w[1 - (n)]
441 #else
442 #define VIS_B64(n) b[n]
443 #define VIS_W64(n) w[n]
444 #define VIS_SW64(n) sw[n]
445 #define VIS_L64(n) l[n]
446 #define VIS_B32(n) b[n]
447 #define VIS_W32(n) w[n]
448 #endif
449
450 typedef union {
451 uint8_t b[8];
452 uint16_t w[4];
453 int16_t sw[4];
454 uint32_t l[2];
455 float64 d;
456 } vis64;
457
458 typedef union {
459 uint8_t b[4];
460 uint16_t w[2];
461 uint32_t l;
462 float32 f;
463 } vis32;
464
465 void helper_fpmerge(void)
466 {
467 vis64 s, d;
468
469 s.d = DT0;
470 d.d = DT1;
471
472 // Reverse calculation order to handle overlap
473 d.VIS_B64(7) = s.VIS_B64(3);
474 d.VIS_B64(6) = d.VIS_B64(3);
475 d.VIS_B64(5) = s.VIS_B64(2);
476 d.VIS_B64(4) = d.VIS_B64(2);
477 d.VIS_B64(3) = s.VIS_B64(1);
478 d.VIS_B64(2) = d.VIS_B64(1);
479 d.VIS_B64(1) = s.VIS_B64(0);
480 //d.VIS_B64(0) = d.VIS_B64(0);
481
482 DT0 = d.d;
483 }
484
485 void helper_fmul8x16(void)
486 {
487 vis64 s, d;
488 uint32_t tmp;
489
490 s.d = DT0;
491 d.d = DT1;
492
493 #define PMUL(r) \
494 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
495 if ((tmp & 0xff) > 0x7f) \
496 tmp += 0x100; \
497 d.VIS_W64(r) = tmp >> 8;
498
499 PMUL(0);
500 PMUL(1);
501 PMUL(2);
502 PMUL(3);
503 #undef PMUL
504
505 DT0 = d.d;
506 }
507
508 void helper_fmul8x16al(void)
509 {
510 vis64 s, d;
511 uint32_t tmp;
512
513 s.d = DT0;
514 d.d = DT1;
515
516 #define PMUL(r) \
517 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
518 if ((tmp & 0xff) > 0x7f) \
519 tmp += 0x100; \
520 d.VIS_W64(r) = tmp >> 8;
521
522 PMUL(0);
523 PMUL(1);
524 PMUL(2);
525 PMUL(3);
526 #undef PMUL
527
528 DT0 = d.d;
529 }
530
531 void helper_fmul8x16au(void)
532 {
533 vis64 s, d;
534 uint32_t tmp;
535
536 s.d = DT0;
537 d.d = DT1;
538
539 #define PMUL(r) \
540 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
541 if ((tmp & 0xff) > 0x7f) \
542 tmp += 0x100; \
543 d.VIS_W64(r) = tmp >> 8;
544
545 PMUL(0);
546 PMUL(1);
547 PMUL(2);
548 PMUL(3);
549 #undef PMUL
550
551 DT0 = d.d;
552 }
553
554 void helper_fmul8sux16(void)
555 {
556 vis64 s, d;
557 uint32_t tmp;
558
559 s.d = DT0;
560 d.d = DT1;
561
562 #define PMUL(r) \
563 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
564 if ((tmp & 0xff) > 0x7f) \
565 tmp += 0x100; \
566 d.VIS_W64(r) = tmp >> 8;
567
568 PMUL(0);
569 PMUL(1);
570 PMUL(2);
571 PMUL(3);
572 #undef PMUL
573
574 DT0 = d.d;
575 }
576
577 void helper_fmul8ulx16(void)
578 {
579 vis64 s, d;
580 uint32_t tmp;
581
582 s.d = DT0;
583 d.d = DT1;
584
585 #define PMUL(r) \
586 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
587 if ((tmp & 0xff) > 0x7f) \
588 tmp += 0x100; \
589 d.VIS_W64(r) = tmp >> 8;
590
591 PMUL(0);
592 PMUL(1);
593 PMUL(2);
594 PMUL(3);
595 #undef PMUL
596
597 DT0 = d.d;
598 }
599
600 void helper_fmuld8sux16(void)
601 {
602 vis64 s, d;
603 uint32_t tmp;
604
605 s.d = DT0;
606 d.d = DT1;
607
608 #define PMUL(r) \
609 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
610 if ((tmp & 0xff) > 0x7f) \
611 tmp += 0x100; \
612 d.VIS_L64(r) = tmp;
613
614 // Reverse calculation order to handle overlap
615 PMUL(1);
616 PMUL(0);
617 #undef PMUL
618
619 DT0 = d.d;
620 }
621
622 void helper_fmuld8ulx16(void)
623 {
624 vis64 s, d;
625 uint32_t tmp;
626
627 s.d = DT0;
628 d.d = DT1;
629
630 #define PMUL(r) \
631 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
632 if ((tmp & 0xff) > 0x7f) \
633 tmp += 0x100; \
634 d.VIS_L64(r) = tmp;
635
636 // Reverse calculation order to handle overlap
637 PMUL(1);
638 PMUL(0);
639 #undef PMUL
640
641 DT0 = d.d;
642 }
643
644 void helper_fexpand(void)
645 {
646 vis32 s;
647 vis64 d;
648
649 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
650 d.d = DT1;
651 d.VIS_W64(0) = s.VIS_B32(0) << 4;
652 d.VIS_W64(1) = s.VIS_B32(1) << 4;
653 d.VIS_W64(2) = s.VIS_B32(2) << 4;
654 d.VIS_W64(3) = s.VIS_B32(3) << 4;
655
656 DT0 = d.d;
657 }
658
659 #define VIS_HELPER(name, F) \
660 void name##16(void) \
661 { \
662 vis64 s, d; \
663 \
664 s.d = DT0; \
665 d.d = DT1; \
666 \
667 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
668 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
669 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
670 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
671 \
672 DT0 = d.d; \
673 } \
674 \
675 uint32_t name##16s(uint32_t src1, uint32_t src2) \
676 { \
677 vis32 s, d; \
678 \
679 s.l = src1; \
680 d.l = src2; \
681 \
682 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
683 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
684 \
685 return d.l; \
686 } \
687 \
688 void name##32(void) \
689 { \
690 vis64 s, d; \
691 \
692 s.d = DT0; \
693 d.d = DT1; \
694 \
695 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
696 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
697 \
698 DT0 = d.d; \
699 } \
700 \
701 uint32_t name##32s(uint32_t src1, uint32_t src2) \
702 { \
703 vis32 s, d; \
704 \
705 s.l = src1; \
706 d.l = src2; \
707 \
708 d.l = F(d.l, s.l); \
709 \
710 return d.l; \
711 }
712
713 #define FADD(a, b) ((a) + (b))
714 #define FSUB(a, b) ((a) - (b))
715 VIS_HELPER(helper_fpadd, FADD)
716 VIS_HELPER(helper_fpsub, FSUB)
717
718 #define VIS_CMPHELPER(name, F) \
719 void name##16(void) \
720 { \
721 vis64 s, d; \
722 \
723 s.d = DT0; \
724 d.d = DT1; \
725 \
726 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
727 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
728 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
729 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
730 \
731 DT0 = d.d; \
732 } \
733 \
734 void name##32(void) \
735 { \
736 vis64 s, d; \
737 \
738 s.d = DT0; \
739 d.d = DT1; \
740 \
741 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
742 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
743 \
744 DT0 = d.d; \
745 }
746
747 #define FCMPGT(a, b) ((a) > (b))
748 #define FCMPEQ(a, b) ((a) == (b))
749 #define FCMPLE(a, b) ((a) <= (b))
750 #define FCMPNE(a, b) ((a) != (b))
751
752 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
753 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
754 VIS_CMPHELPER(helper_fcmple, FCMPLE)
755 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
756 #endif
757
758 void helper_check_ieee_exceptions(void)
759 {
760 target_ulong status;
761
762 status = get_float_exception_flags(&env->fp_status);
763 if (status) {
764 /* Copy IEEE 754 flags into FSR */
765 if (status & float_flag_invalid)
766 env->fsr |= FSR_NVC;
767 if (status & float_flag_overflow)
768 env->fsr |= FSR_OFC;
769 if (status & float_flag_underflow)
770 env->fsr |= FSR_UFC;
771 if (status & float_flag_divbyzero)
772 env->fsr |= FSR_DZC;
773 if (status & float_flag_inexact)
774 env->fsr |= FSR_NXC;
775
776 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
777 /* Unmasked exception, generate a trap */
778 env->fsr |= FSR_FTT_IEEE_EXCP;
779 raise_exception(TT_FP_EXCP);
780 } else {
781 /* Accumulate exceptions */
782 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
783 }
784 }
785 }
786
787 void helper_clear_float_exceptions(void)
788 {
789 set_float_exception_flags(0, &env->fp_status);
790 }
791
792 float32 helper_fabss(float32 src)
793 {
794 return float32_abs(src);
795 }
796
797 #ifdef TARGET_SPARC64
798 void helper_fabsd(void)
799 {
800 DT0 = float64_abs(DT1);
801 }
802
803 void helper_fabsq(void)
804 {
805 QT0 = float128_abs(QT1);
806 }
807 #endif
808
809 float32 helper_fsqrts(float32 src)
810 {
811 return float32_sqrt(src, &env->fp_status);
812 }
813
814 void helper_fsqrtd(void)
815 {
816 DT0 = float64_sqrt(DT1, &env->fp_status);
817 }
818
819 void helper_fsqrtq(void)
820 {
821 QT0 = float128_sqrt(QT1, &env->fp_status);
822 }
823
824 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
825 void glue(helper_, name) (void) \
826 { \
827 target_ulong new_fsr; \
828 \
829 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
830 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
831 case float_relation_unordered: \
832 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
833 if ((env->fsr & FSR_NVM) || TRAP) { \
834 env->fsr |= new_fsr; \
835 env->fsr |= FSR_NVC; \
836 env->fsr |= FSR_FTT_IEEE_EXCP; \
837 raise_exception(TT_FP_EXCP); \
838 } else { \
839 env->fsr |= FSR_NVA; \
840 } \
841 break; \
842 case float_relation_less: \
843 new_fsr = FSR_FCC0 << FS; \
844 break; \
845 case float_relation_greater: \
846 new_fsr = FSR_FCC1 << FS; \
847 break; \
848 default: \
849 new_fsr = 0; \
850 break; \
851 } \
852 env->fsr |= new_fsr; \
853 }
854 #define GEN_FCMPS(name, size, FS, TRAP) \
855 void glue(helper_, name)(float32 src1, float32 src2) \
856 { \
857 target_ulong new_fsr; \
858 \
859 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
860 switch (glue(size, _compare) (src1, src2, &env->fp_status)) { \
861 case float_relation_unordered: \
862 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
863 if ((env->fsr & FSR_NVM) || TRAP) { \
864 env->fsr |= new_fsr; \
865 env->fsr |= FSR_NVC; \
866 env->fsr |= FSR_FTT_IEEE_EXCP; \
867 raise_exception(TT_FP_EXCP); \
868 } else { \
869 env->fsr |= FSR_NVA; \
870 } \
871 break; \
872 case float_relation_less: \
873 new_fsr = FSR_FCC0 << FS; \
874 break; \
875 case float_relation_greater: \
876 new_fsr = FSR_FCC1 << FS; \
877 break; \
878 default: \
879 new_fsr = 0; \
880 break; \
881 } \
882 env->fsr |= new_fsr; \
883 }
884
885 GEN_FCMPS(fcmps, float32, 0, 0);
886 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
887
888 GEN_FCMPS(fcmpes, float32, 0, 1);
889 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
890
891 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
892 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
893
894 static uint32_t compute_all_flags(void)
895 {
896 return env->psr & PSR_ICC;
897 }
898
899 static uint32_t compute_C_flags(void)
900 {
901 return env->psr & PSR_CARRY;
902 }
903
904 static inline uint32_t get_NZ_icc(int32_t dst)
905 {
906 uint32_t ret = 0;
907
908 if (dst == 0) {
909 ret = PSR_ZERO;
910 } else if (dst < 0) {
911 ret = PSR_NEG;
912 }
913 return ret;
914 }
915
916 #ifdef TARGET_SPARC64
917 static uint32_t compute_all_flags_xcc(void)
918 {
919 return env->xcc & PSR_ICC;
920 }
921
922 static uint32_t compute_C_flags_xcc(void)
923 {
924 return env->xcc & PSR_CARRY;
925 }
926
927 static inline uint32_t get_NZ_xcc(target_long dst)
928 {
929 uint32_t ret = 0;
930
931 if (!dst) {
932 ret = PSR_ZERO;
933 } else if (dst < 0) {
934 ret = PSR_NEG;
935 }
936 return ret;
937 }
938 #endif
939
940 static inline uint32_t get_V_div_icc(target_ulong src2)
941 {
942 uint32_t ret = 0;
943
944 if (src2 != 0) {
945 ret = PSR_OVF;
946 }
947 return ret;
948 }
949
950 static uint32_t compute_all_div(void)
951 {
952 uint32_t ret;
953
954 ret = get_NZ_icc(CC_DST);
955 ret |= get_V_div_icc(CC_SRC2);
956 return ret;
957 }
958
959 static uint32_t compute_C_div(void)
960 {
961 return 0;
962 }
963
964 static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
965 {
966 uint32_t ret = 0;
967
968 if (dst < src1) {
969 ret = PSR_CARRY;
970 }
971 return ret;
972 }
973
974 static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
975 uint32_t src2)
976 {
977 uint32_t ret = 0;
978
979 if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
980 ret = PSR_CARRY;
981 }
982 return ret;
983 }
984
985 static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
986 uint32_t src2)
987 {
988 uint32_t ret = 0;
989
990 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
991 ret = PSR_OVF;
992 }
993 return ret;
994 }
995
996 #ifdef TARGET_SPARC64
997 static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
998 {
999 uint32_t ret = 0;
1000
1001 if (dst < src1) {
1002 ret = PSR_CARRY;
1003 }
1004 return ret;
1005 }
1006
1007 static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
1008 target_ulong src2)
1009 {
1010 uint32_t ret = 0;
1011
1012 if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
1013 ret = PSR_CARRY;
1014 }
1015 return ret;
1016 }
1017
1018 static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
1019 target_ulong src2)
1020 {
1021 uint32_t ret = 0;
1022
1023 if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
1024 ret = PSR_OVF;
1025 }
1026 return ret;
1027 }
1028
1029 static uint32_t compute_all_add_xcc(void)
1030 {
1031 uint32_t ret;
1032
1033 ret = get_NZ_xcc(CC_DST);
1034 ret |= get_C_add_xcc(CC_DST, CC_SRC);
1035 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1036 return ret;
1037 }
1038
1039 static uint32_t compute_C_add_xcc(void)
1040 {
1041 return get_C_add_xcc(CC_DST, CC_SRC);
1042 }
1043 #endif
1044
1045 static uint32_t compute_all_add(void)
1046 {
1047 uint32_t ret;
1048
1049 ret = get_NZ_icc(CC_DST);
1050 ret |= get_C_add_icc(CC_DST, CC_SRC);
1051 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1052 return ret;
1053 }
1054
1055 static uint32_t compute_C_add(void)
1056 {
1057 return get_C_add_icc(CC_DST, CC_SRC);
1058 }
1059
1060 #ifdef TARGET_SPARC64
1061 static uint32_t compute_all_addx_xcc(void)
1062 {
1063 uint32_t ret;
1064
1065 ret = get_NZ_xcc(CC_DST);
1066 ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1067 ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
1068 return ret;
1069 }
1070
1071 static uint32_t compute_C_addx_xcc(void)
1072 {
1073 uint32_t ret;
1074
1075 ret = get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
1076 return ret;
1077 }
1078 #endif
1079
1080 static uint32_t compute_all_addx(void)
1081 {
1082 uint32_t ret;
1083
1084 ret = get_NZ_icc(CC_DST);
1085 ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1086 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1087 return ret;
1088 }
1089
1090 static uint32_t compute_C_addx(void)
1091 {
1092 uint32_t ret;
1093
1094 ret = get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
1095 return ret;
1096 }
1097
1098 static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
1099 {
1100 uint32_t ret = 0;
1101
1102 if ((src1 | src2) & 0x3) {
1103 ret = PSR_OVF;
1104 }
1105 return ret;
1106 }
1107
1108 static uint32_t compute_all_tadd(void)
1109 {
1110 uint32_t ret;
1111
1112 ret = get_NZ_icc(CC_DST);
1113 ret |= get_C_add_icc(CC_DST, CC_SRC);
1114 ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
1115 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1116 return ret;
1117 }
1118
1119 static uint32_t compute_all_taddtv(void)
1120 {
1121 uint32_t ret;
1122
1123 ret = get_NZ_icc(CC_DST);
1124 ret |= get_C_add_icc(CC_DST, CC_SRC);
1125 return ret;
1126 }
1127
1128 static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
1129 {
1130 uint32_t ret = 0;
1131
1132 if (src1 < src2) {
1133 ret = PSR_CARRY;
1134 }
1135 return ret;
1136 }
1137
1138 static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
1139 uint32_t src2)
1140 {
1141 uint32_t ret = 0;
1142
1143 if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
1144 ret = PSR_CARRY;
1145 }
1146 return ret;
1147 }
1148
1149 static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
1150 uint32_t src2)
1151 {
1152 uint32_t ret = 0;
1153
1154 if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
1155 ret = PSR_OVF;
1156 }
1157 return ret;
1158 }
1159
1160
1161 #ifdef TARGET_SPARC64
1162 static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
1163 {
1164 uint32_t ret = 0;
1165
1166 if (src1 < src2) {
1167 ret = PSR_CARRY;
1168 }
1169 return ret;
1170 }
1171
1172 static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
1173 target_ulong src2)
1174 {
1175 uint32_t ret = 0;
1176
1177 if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
1178 ret = PSR_CARRY;
1179 }
1180 return ret;
1181 }
1182
1183 static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
1184 target_ulong src2)
1185 {
1186 uint32_t ret = 0;
1187
1188 if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
1189 ret = PSR_OVF;
1190 }
1191 return ret;
1192 }
1193
1194 static uint32_t compute_all_sub_xcc(void)
1195 {
1196 uint32_t ret;
1197
1198 ret = get_NZ_xcc(CC_DST);
1199 ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
1200 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1201 return ret;
1202 }
1203
1204 static uint32_t compute_C_sub_xcc(void)
1205 {
1206 return get_C_sub_xcc(CC_SRC, CC_SRC2);
1207 }
1208 #endif
1209
1210 static uint32_t compute_all_sub(void)
1211 {
1212 uint32_t ret;
1213
1214 ret = get_NZ_icc(CC_DST);
1215 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1216 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1217 return ret;
1218 }
1219
1220 static uint32_t compute_C_sub(void)
1221 {
1222 return get_C_sub_icc(CC_SRC, CC_SRC2);
1223 }
1224
1225 #ifdef TARGET_SPARC64
1226 static uint32_t compute_all_subx_xcc(void)
1227 {
1228 uint32_t ret;
1229
1230 ret = get_NZ_xcc(CC_DST);
1231 ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1232 ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
1233 return ret;
1234 }
1235
1236 static uint32_t compute_C_subx_xcc(void)
1237 {
1238 uint32_t ret;
1239
1240 ret = get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
1241 return ret;
1242 }
1243 #endif
1244
1245 static uint32_t compute_all_subx(void)
1246 {
1247 uint32_t ret;
1248
1249 ret = get_NZ_icc(CC_DST);
1250 ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1251 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1252 return ret;
1253 }
1254
1255 static uint32_t compute_C_subx(void)
1256 {
1257 uint32_t ret;
1258
1259 ret = get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
1260 return ret;
1261 }
1262
1263 static uint32_t compute_all_tsub(void)
1264 {
1265 uint32_t ret;
1266
1267 ret = get_NZ_icc(CC_DST);
1268 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1269 ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
1270 ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
1271 return ret;
1272 }
1273
1274 static uint32_t compute_all_tsubtv(void)
1275 {
1276 uint32_t ret;
1277
1278 ret = get_NZ_icc(CC_DST);
1279 ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
1280 return ret;
1281 }
1282
1283 static uint32_t compute_all_logic(void)
1284 {
1285 return get_NZ_icc(CC_DST);
1286 }
1287
1288 static uint32_t compute_C_logic(void)
1289 {
1290 return 0;
1291 }
1292
1293 #ifdef TARGET_SPARC64
1294 static uint32_t compute_all_logic_xcc(void)
1295 {
1296 return get_NZ_xcc(CC_DST);
1297 }
1298 #endif
1299
1300 typedef struct CCTable {
1301 uint32_t (*compute_all)(void); /* return all the flags */
1302 uint32_t (*compute_c)(void); /* return the C flag */
1303 } CCTable;
1304
1305 static const CCTable icc_table[CC_OP_NB] = {
1306 /* CC_OP_DYNAMIC should never happen */
1307 [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
1308 [CC_OP_DIV] = { compute_all_div, compute_C_div },
1309 [CC_OP_ADD] = { compute_all_add, compute_C_add },
1310 [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
1311 [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
1312 [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
1313 [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
1314 [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
1315 [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
1316 [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
1317 [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
1318 };
1319
1320 #ifdef TARGET_SPARC64
1321 static const CCTable xcc_table[CC_OP_NB] = {
1322 /* CC_OP_DYNAMIC should never happen */
1323 [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
1324 [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
1325 [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
1326 [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
1327 [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
1328 [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
1329 [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1330 [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
1331 [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
1332 [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
1333 [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
1334 };
1335 #endif
1336
1337 void helper_compute_psr(void)
1338 {
1339 uint32_t new_psr;
1340
1341 new_psr = icc_table[CC_OP].compute_all();
1342 env->psr = new_psr;
1343 #ifdef TARGET_SPARC64
1344 new_psr = xcc_table[CC_OP].compute_all();
1345 env->xcc = new_psr;
1346 #endif
1347 CC_OP = CC_OP_FLAGS;
1348 }
1349
1350 uint32_t helper_compute_C_icc(void)
1351 {
1352 uint32_t ret;
1353
1354 ret = icc_table[CC_OP].compute_c() >> PSR_CARRY_SHIFT;
1355 return ret;
1356 }
1357
1358 static inline void memcpy32(target_ulong *dst, const target_ulong *src)
1359 {
1360 dst[0] = src[0];
1361 dst[1] = src[1];
1362 dst[2] = src[2];
1363 dst[3] = src[3];
1364 dst[4] = src[4];
1365 dst[5] = src[5];
1366 dst[6] = src[6];
1367 dst[7] = src[7];
1368 }
1369
1370 static void set_cwp(int new_cwp)
1371 {
1372 /* put the modified wrap registers at their proper location */
1373 if (env->cwp == env->nwindows - 1) {
1374 memcpy32(env->regbase, env->regbase + env->nwindows * 16);
1375 }
1376 env->cwp = new_cwp;
1377
1378 /* put the wrap registers at their temporary location */
1379 if (new_cwp == env->nwindows - 1) {
1380 memcpy32(env->regbase + env->nwindows * 16, env->regbase);
1381 }
1382 env->regwptr = env->regbase + (new_cwp * 16);
1383 }
1384
1385 void cpu_set_cwp(CPUState *env1, int new_cwp)
1386 {
1387 CPUState *saved_env;
1388
1389 saved_env = env;
1390 env = env1;
1391 set_cwp(new_cwp);
1392 env = saved_env;
1393 }
1394
1395 static target_ulong get_psr(void)
1396 {
1397 helper_compute_psr();
1398
1399 #if !defined (TARGET_SPARC64)
1400 return env->version | (env->psr & PSR_ICC) |
1401 (env->psref? PSR_EF : 0) |
1402 (env->psrpil << 8) |
1403 (env->psrs? PSR_S : 0) |
1404 (env->psrps? PSR_PS : 0) |
1405 (env->psret? PSR_ET : 0) | env->cwp;
1406 #else
1407 return env->psr & PSR_ICC;
1408 #endif
1409 }
1410
1411 target_ulong cpu_get_psr(CPUState *env1)
1412 {
1413 CPUState *saved_env;
1414 target_ulong ret;
1415
1416 saved_env = env;
1417 env = env1;
1418 ret = get_psr();
1419 env = saved_env;
1420 return ret;
1421 }
1422
1423 static void put_psr(target_ulong val)
1424 {
1425 env->psr = val & PSR_ICC;
1426 #if !defined (TARGET_SPARC64)
1427 env->psref = (val & PSR_EF)? 1 : 0;
1428 env->psrpil = (val & PSR_PIL) >> 8;
1429 #endif
1430 #if ((!defined (TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
1431 cpu_check_irqs(env);
1432 #endif
1433 #if !defined (TARGET_SPARC64)
1434 env->psrs = (val & PSR_S)? 1 : 0;
1435 env->psrps = (val & PSR_PS)? 1 : 0;
1436 env->psret = (val & PSR_ET)? 1 : 0;
1437 set_cwp(val & PSR_CWP);
1438 #endif
1439 env->cc_op = CC_OP_FLAGS;
1440 }
1441
1442 void cpu_put_psr(CPUState *env1, target_ulong val)
1443 {
1444 CPUState *saved_env;
1445
1446 saved_env = env;
1447 env = env1;
1448 put_psr(val);
1449 env = saved_env;
1450 }
1451
1452 static int cwp_inc(int cwp)
1453 {
1454 if (unlikely(cwp >= env->nwindows)) {
1455 cwp -= env->nwindows;
1456 }
1457 return cwp;
1458 }
1459
1460 int cpu_cwp_inc(CPUState *env1, int cwp)
1461 {
1462 CPUState *saved_env;
1463 target_ulong ret;
1464
1465 saved_env = env;
1466 env = env1;
1467 ret = cwp_inc(cwp);
1468 env = saved_env;
1469 return ret;
1470 }
1471
1472 static int cwp_dec(int cwp)
1473 {
1474 if (unlikely(cwp < 0)) {
1475 cwp += env->nwindows;
1476 }
1477 return cwp;
1478 }
1479
1480 int cpu_cwp_dec(CPUState *env1, int cwp)
1481 {
1482 CPUState *saved_env;
1483 target_ulong ret;
1484
1485 saved_env = env;
1486 env = env1;
1487 ret = cwp_dec(cwp);
1488 env = saved_env;
1489 return ret;
1490 }
1491
1492 #ifdef TARGET_SPARC64
1493 GEN_FCMPS(fcmps_fcc1, float32, 22, 0);
1494 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
1495 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
1496
1497 GEN_FCMPS(fcmps_fcc2, float32, 24, 0);
1498 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
1499 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
1500
1501 GEN_FCMPS(fcmps_fcc3, float32, 26, 0);
1502 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
1503 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
1504
1505 GEN_FCMPS(fcmpes_fcc1, float32, 22, 1);
1506 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
1507 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
1508
1509 GEN_FCMPS(fcmpes_fcc2, float32, 24, 1);
1510 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
1511 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
1512
1513 GEN_FCMPS(fcmpes_fcc3, float32, 26, 1);
1514 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
1515 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
1516 #endif
1517 #undef GEN_FCMPS
1518
1519 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
1520 defined(DEBUG_MXCC)
1521 static void dump_mxcc(CPUState *env)
1522 {
1523 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1524 "\n",
1525 env->mxccdata[0], env->mxccdata[1],
1526 env->mxccdata[2], env->mxccdata[3]);
1527 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1528 "\n"
1529 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
1530 "\n",
1531 env->mxccregs[0], env->mxccregs[1],
1532 env->mxccregs[2], env->mxccregs[3],
1533 env->mxccregs[4], env->mxccregs[5],
1534 env->mxccregs[6], env->mxccregs[7]);
1535 }
1536 #endif
1537
1538 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
1539 && defined(DEBUG_ASI)
1540 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
1541 uint64_t r1)
1542 {
1543 switch (size)
1544 {
1545 case 1:
1546 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
1547 addr, asi, r1 & 0xff);
1548 break;
1549 case 2:
1550 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
1551 addr, asi, r1 & 0xffff);
1552 break;
1553 case 4:
1554 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
1555 addr, asi, r1 & 0xffffffff);
1556 break;
1557 case 8:
1558 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
1559 addr, asi, r1);
1560 break;
1561 }
1562 }
1563 #endif
1564
1565 #ifndef TARGET_SPARC64
1566 #ifndef CONFIG_USER_ONLY
1567 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1568 {
1569 uint64_t ret = 0;
1570 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
1571 uint32_t last_addr = addr;
1572 #endif
1573
1574 helper_check_align(addr, size - 1);
1575 switch (asi) {
1576 case 2: /* SuperSparc MXCC registers */
1577 switch (addr) {
1578 case 0x01c00a00: /* MXCC control register */
1579 if (size == 8)
1580 ret = env->mxccregs[3];
1581 else
1582 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1583 size);
1584 break;
1585 case 0x01c00a04: /* MXCC control register */
1586 if (size == 4)
1587 ret = env->mxccregs[3];
1588 else
1589 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1590 size);
1591 break;
1592 case 0x01c00c00: /* Module reset register */
1593 if (size == 8) {
1594 ret = env->mxccregs[5];
1595 // should we do something here?
1596 } else
1597 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1598 size);
1599 break;
1600 case 0x01c00f00: /* MBus port address register */
1601 if (size == 8)
1602 ret = env->mxccregs[7];
1603 else
1604 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1605 size);
1606 break;
1607 default:
1608 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1609 size);
1610 break;
1611 }
1612 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
1613 "addr = %08x -> ret = %" PRIx64 ","
1614 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
1615 #ifdef DEBUG_MXCC
1616 dump_mxcc(env);
1617 #endif
1618 break;
1619 case 3: /* MMU probe */
1620 {
1621 int mmulev;
1622
1623 mmulev = (addr >> 8) & 15;
1624 if (mmulev > 4)
1625 ret = 0;
1626 else
1627 ret = mmu_probe(env, addr, mmulev);
1628 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
1629 addr, mmulev, ret);
1630 }
1631 break;
1632 case 4: /* read MMU regs */
1633 {
1634 int reg = (addr >> 8) & 0x1f;
1635
1636 ret = env->mmuregs[reg];
1637 if (reg == 3) /* Fault status cleared on read */
1638 env->mmuregs[3] = 0;
1639 else if (reg == 0x13) /* Fault status read */
1640 ret = env->mmuregs[3];
1641 else if (reg == 0x14) /* Fault address read */
1642 ret = env->mmuregs[4];
1643 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
1644 }
1645 break;
1646 case 5: // Turbosparc ITLB Diagnostic
1647 case 6: // Turbosparc DTLB Diagnostic
1648 case 7: // Turbosparc IOTLB Diagnostic
1649 break;
1650 case 9: /* Supervisor code access */
1651 switch(size) {
1652 case 1:
1653 ret = ldub_code(addr);
1654 break;
1655 case 2:
1656 ret = lduw_code(addr);
1657 break;
1658 default:
1659 case 4:
1660 ret = ldl_code(addr);
1661 break;
1662 case 8:
1663 ret = ldq_code(addr);
1664 break;
1665 }
1666 break;
1667 case 0xa: /* User data access */
1668 switch(size) {
1669 case 1:
1670 ret = ldub_user(addr);
1671 break;
1672 case 2:
1673 ret = lduw_user(addr);
1674 break;
1675 default:
1676 case 4:
1677 ret = ldl_user(addr);
1678 break;
1679 case 8:
1680 ret = ldq_user(addr);
1681 break;
1682 }
1683 break;
1684 case 0xb: /* Supervisor data access */
1685 switch(size) {
1686 case 1:
1687 ret = ldub_kernel(addr);
1688 break;
1689 case 2:
1690 ret = lduw_kernel(addr);
1691 break;
1692 default:
1693 case 4:
1694 ret = ldl_kernel(addr);
1695 break;
1696 case 8:
1697 ret = ldq_kernel(addr);
1698 break;
1699 }
1700 break;
1701 case 0xc: /* I-cache tag */
1702 case 0xd: /* I-cache data */
1703 case 0xe: /* D-cache tag */
1704 case 0xf: /* D-cache data */
1705 break;
1706 case 0x20: /* MMU passthrough */
1707 switch(size) {
1708 case 1:
1709 ret = ldub_phys(addr);
1710 break;
1711 case 2:
1712 ret = lduw_phys(addr);
1713 break;
1714 default:
1715 case 4:
1716 ret = ldl_phys(addr);
1717 break;
1718 case 8:
1719 ret = ldq_phys(addr);
1720 break;
1721 }
1722 break;
1723 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1724 switch(size) {
1725 case 1:
1726 ret = ldub_phys((target_phys_addr_t)addr
1727 | ((target_phys_addr_t)(asi & 0xf) << 32));
1728 break;
1729 case 2:
1730 ret = lduw_phys((target_phys_addr_t)addr
1731 | ((target_phys_addr_t)(asi & 0xf) << 32));
1732 break;
1733 default:
1734 case 4:
1735 ret = ldl_phys((target_phys_addr_t)addr
1736 | ((target_phys_addr_t)(asi & 0xf) << 32));
1737 break;
1738 case 8:
1739 ret = ldq_phys((target_phys_addr_t)addr
1740 | ((target_phys_addr_t)(asi & 0xf) << 32));
1741 break;
1742 }
1743 break;
1744 case 0x30: // Turbosparc secondary cache diagnostic
1745 case 0x31: // Turbosparc RAM snoop
1746 case 0x32: // Turbosparc page table descriptor diagnostic
1747 case 0x39: /* data cache diagnostic register */
1748 case 0x4c: /* SuperSPARC MMU Breakpoint Action register */
1749 ret = 0;
1750 break;
1751 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
1752 {
1753 int reg = (addr >> 8) & 3;
1754
1755 switch(reg) {
1756 case 0: /* Breakpoint Value (Addr) */
1757 ret = env->mmubpregs[reg];
1758 break;
1759 case 1: /* Breakpoint Mask */
1760 ret = env->mmubpregs[reg];
1761 break;
1762 case 2: /* Breakpoint Control */
1763 ret = env->mmubpregs[reg];
1764 break;
1765 case 3: /* Breakpoint Status */
1766 ret = env->mmubpregs[reg];
1767 env->mmubpregs[reg] = 0ULL;
1768 break;
1769 }
1770 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
1771 ret);
1772 }
1773 break;
1774 case 8: /* User code access, XXX */
1775 default:
1776 do_unassigned_access(addr, 0, 0, asi, size);
1777 ret = 0;
1778 break;
1779 }
1780 if (sign) {
1781 switch(size) {
1782 case 1:
1783 ret = (int8_t) ret;
1784 break;
1785 case 2:
1786 ret = (int16_t) ret;
1787 break;
1788 case 4:
1789 ret = (int32_t) ret;
1790 break;
1791 default:
1792 break;
1793 }
1794 }
1795 #ifdef DEBUG_ASI
1796 dump_asi("read ", last_addr, asi, size, ret);
1797 #endif
1798 return ret;
1799 }
1800
1801 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1802 {
1803 helper_check_align(addr, size - 1);
1804 switch(asi) {
1805 case 2: /* SuperSparc MXCC registers */
1806 switch (addr) {
1807 case 0x01c00000: /* MXCC stream data register 0 */
1808 if (size == 8)
1809 env->mxccdata[0] = val;
1810 else
1811 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1812 size);
1813 break;
1814 case 0x01c00008: /* MXCC stream data register 1 */
1815 if (size == 8)
1816 env->mxccdata[1] = val;
1817 else
1818 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1819 size);
1820 break;
1821 case 0x01c00010: /* MXCC stream data register 2 */
1822 if (size == 8)
1823 env->mxccdata[2] = val;
1824 else
1825 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1826 size);
1827 break;
1828 case 0x01c00018: /* MXCC stream data register 3 */
1829 if (size == 8)
1830 env->mxccdata[3] = val;
1831 else
1832 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1833 size);
1834 break;
1835 case 0x01c00100: /* MXCC stream source */
1836 if (size == 8)
1837 env->mxccregs[0] = val;
1838 else
1839 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1840 size);
1841 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1842 0);
1843 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1844 8);
1845 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1846 16);
1847 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1848 24);
1849 break;
1850 case 0x01c00200: /* MXCC stream destination */
1851 if (size == 8)
1852 env->mxccregs[1] = val;
1853 else
1854 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1855 size);
1856 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1857 env->mxccdata[0]);
1858 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1859 env->mxccdata[1]);
1860 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1861 env->mxccdata[2]);
1862 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1863 env->mxccdata[3]);
1864 break;
1865 case 0x01c00a00: /* MXCC control register */
1866 if (size == 8)
1867 env->mxccregs[3] = val;
1868 else
1869 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1870 size);
1871 break;
1872 case 0x01c00a04: /* MXCC control register */
1873 if (size == 4)
1874 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
1875 | val;
1876 else
1877 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1878 size);
1879 break;
1880 case 0x01c00e00: /* MXCC error register */
1881 // writing a 1 bit clears the error
1882 if (size == 8)
1883 env->mxccregs[6] &= ~val;
1884 else
1885 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1886 size);
1887 break;
1888 case 0x01c00f00: /* MBus port address register */
1889 if (size == 8)
1890 env->mxccregs[7] = val;
1891 else
1892 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1893 size);
1894 break;
1895 default:
1896 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1897 size);
1898 break;
1899 }
1900 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
1901 asi, size, addr, val);
1902 #ifdef DEBUG_MXCC
1903 dump_mxcc(env);
1904 #endif
1905 break;
1906 case 3: /* MMU flush */
1907 {
1908 int mmulev;
1909
1910 mmulev = (addr >> 8) & 15;
1911 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1912 switch (mmulev) {
1913 case 0: // flush page
1914 tlb_flush_page(env, addr & 0xfffff000);
1915 break;
1916 case 1: // flush segment (256k)
1917 case 2: // flush region (16M)
1918 case 3: // flush context (4G)
1919 case 4: // flush entire
1920 tlb_flush(env, 1);
1921 break;
1922 default:
1923 break;
1924 }
1925 #ifdef DEBUG_MMU
1926 dump_mmu(env);
1927 #endif
1928 }
1929 break;
1930 case 4: /* write MMU regs */
1931 {
1932 int reg = (addr >> 8) & 0x1f;
1933 uint32_t oldreg;
1934
1935 oldreg = env->mmuregs[reg];
1936 switch(reg) {
1937 case 0: // Control Register
1938 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1939 (val & 0x00ffffff);
1940 // Mappings generated during no-fault mode or MMU
1941 // disabled mode are invalid in normal mode
1942 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1943 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1944 tlb_flush(env, 1);
1945 break;
1946 case 1: // Context Table Pointer Register
1947 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1948 break;
1949 case 2: // Context Register
1950 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1951 if (oldreg != env->mmuregs[reg]) {
1952 /* we flush when the MMU context changes because
1953 QEMU has no MMU context support */
1954 tlb_flush(env, 1);
1955 }
1956 break;
1957 case 3: // Synchronous Fault Status Register with Clear
1958 case 4: // Synchronous Fault Address Register
1959 break;
1960 case 0x10: // TLB Replacement Control Register
1961 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1962 break;
1963 case 0x13: // Synchronous Fault Status Register with Read and Clear
1964 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1965 break;
1966 case 0x14: // Synchronous Fault Address Register
1967 env->mmuregs[4] = val;
1968 break;
1969 default:
1970 env->mmuregs[reg] = val;
1971 break;
1972 }
1973 if (oldreg != env->mmuregs[reg]) {
1974 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1975 reg, oldreg, env->mmuregs[reg]);
1976 }
1977 #ifdef DEBUG_MMU
1978 dump_mmu(env);
1979 #endif
1980 }
1981 break;
1982 case 5: // Turbosparc ITLB Diagnostic
1983 case 6: // Turbosparc DTLB Diagnostic
1984 case 7: // Turbosparc IOTLB Diagnostic
1985 break;
1986 case 0xa: /* User data access */
1987 switch(size) {
1988 case 1:
1989 stb_user(addr, val);
1990 break;
1991 case 2:
1992 stw_user(addr, val);
1993 break;
1994 default:
1995 case 4:
1996 stl_user(addr, val);
1997 break;
1998 case 8:
1999 stq_user(addr, val);
2000 break;
2001 }
2002 break;
2003 case 0xb: /* Supervisor data access */
2004 switch(size) {
2005 case 1:
2006 stb_kernel(addr, val);
2007 break;
2008 case 2:
2009 stw_kernel(addr, val);
2010 break;
2011 default:
2012 case 4:
2013 stl_kernel(addr, val);
2014 break;
2015 case 8:
2016 stq_kernel(addr, val);
2017 break;
2018 }
2019 break;
2020 case 0xc: /* I-cache tag */
2021 case 0xd: /* I-cache data */
2022 case 0xe: /* D-cache tag */
2023 case 0xf: /* D-cache data */
2024 case 0x10: /* I/D-cache flush page */
2025 case 0x11: /* I/D-cache flush segment */
2026 case 0x12: /* I/D-cache flush region */
2027 case 0x13: /* I/D-cache flush context */
2028 case 0x14: /* I/D-cache flush user */
2029 break;
2030 case 0x17: /* Block copy, sta access */
2031 {
2032 // val = src
2033 // addr = dst
2034 // copy 32 bytes
2035 unsigned int i;
2036 uint32_t src = val & ~3, dst = addr & ~3, temp;
2037
2038 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
2039 temp = ldl_kernel(src);
2040 stl_kernel(dst, temp);
2041 }
2042 }
2043 break;
2044 case 0x1f: /* Block fill, stda access */
2045 {
2046 // addr = dst
2047 // fill 32 bytes with val
2048 unsigned int i;
2049 uint32_t dst = addr & 7;
2050
2051 for (i = 0; i < 32; i += 8, dst += 8)
2052 stq_kernel(dst, val);
2053 }
2054 break;
2055 case 0x20: /* MMU passthrough */
2056 {
2057 switch(size) {
2058 case 1:
2059 stb_phys(addr, val);
2060 break;
2061 case 2:
2062 stw_phys(addr, val);
2063 break;
2064 case 4:
2065 default:
2066 stl_phys(addr, val);
2067 break;
2068 case 8:
2069 stq_phys(addr, val);
2070 break;
2071 }
2072 }
2073 break;
2074 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
2075 {
2076 switch(size) {
2077 case 1:
2078 stb_phys((target_phys_addr_t)addr
2079 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2080 break;
2081 case 2:
2082 stw_phys((target_phys_addr_t)addr
2083 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2084 break;
2085 case 4:
2086 default:
2087 stl_phys((target_phys_addr_t)addr
2088 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2089 break;
2090 case 8:
2091 stq_phys((target_phys_addr_t)addr
2092 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
2093 break;
2094 }
2095 }
2096 break;
2097 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
2098 case 0x31: // store buffer data, Ross RT620 I-cache flush or
2099 // Turbosparc snoop RAM
2100 case 0x32: // store buffer control or Turbosparc page table
2101 // descriptor diagnostic
2102 case 0x36: /* I-cache flash clear */
2103 case 0x37: /* D-cache flash clear */
2104 case 0x4c: /* breakpoint action */
2105 break;
2106 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
2107 {
2108 int reg = (addr >> 8) & 3;
2109
2110 switch(reg) {
2111 case 0: /* Breakpoint Value (Addr) */
2112 env->mmubpregs[reg] = (val & 0xfffffffffULL);
2113 break;
2114 case 1: /* Breakpoint Mask */
2115 env->mmubpregs[reg] = (val & 0xfffffffffULL);
2116 break;
2117 case 2: /* Breakpoint Control */
2118 env->mmubpregs[reg] = (val & 0x7fULL);
2119 break;
2120 case 3: /* Breakpoint Status */
2121 env->mmubpregs[reg] = (val & 0xfULL);
2122 break;
2123 }
2124 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
2125 env->mmuregs[reg]);
2126 }
2127 break;
2128 case 8: /* User code access, XXX */
2129 case 9: /* Supervisor code access, XXX */
2130 default:
2131 do_unassigned_access(addr, 1, 0, asi, size);
2132 break;
2133 }
2134 #ifdef DEBUG_ASI
2135 dump_asi("write", addr, asi, size, val);
2136 #endif
2137 }
2138
2139 #endif /* CONFIG_USER_ONLY */
2140 #else /* TARGET_SPARC64 */
2141
2142 #ifdef CONFIG_USER_ONLY
2143 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2144 {
2145 uint64_t ret = 0;
2146 #if defined(DEBUG_ASI)
2147 target_ulong last_addr = addr;
2148 #endif
2149
2150 if (asi < 0x80)
2151 raise_exception(TT_PRIV_ACT);
2152
2153 helper_check_align(addr, size - 1);
2154 addr = address_mask(env, addr);
2155
2156 switch (asi) {
2157 case 0x82: // Primary no-fault
2158 case 0x8a: // Primary no-fault LE
2159 if (page_check_range(addr, size, PAGE_READ) == -1) {
2160 #ifdef DEBUG_ASI
2161 dump_asi("read ", last_addr, asi, size, ret);
2162 #endif
2163 return 0;
2164 }
2165 // Fall through
2166 case 0x80: // Primary
2167 case 0x88: // Primary LE
2168 {
2169 switch(size) {
2170 case 1:
2171 ret = ldub_raw(addr);
2172 break;
2173 case 2:
2174 ret = lduw_raw(addr);
2175 break;
2176 case 4:
2177 ret = ldl_raw(addr);
2178 break;
2179 default:
2180 case 8:
2181 ret = ldq_raw(addr);
2182 break;
2183 }
2184 }
2185 break;
2186 case 0x83: // Secondary no-fault
2187 case 0x8b: // Secondary no-fault LE
2188 if (page_check_range(addr, size, PAGE_READ) == -1) {
2189 #ifdef DEBUG_ASI
2190 dump_asi("read ", last_addr, asi, size, ret);
2191 #endif
2192 return 0;
2193 }
2194 // Fall through
2195 case 0x81: // Secondary
2196 case 0x89: // Secondary LE
2197 // XXX
2198 break;
2199 default:
2200 break;
2201 }
2202
2203 /* Convert from little endian */
2204 switch (asi) {
2205 case 0x88: // Primary LE
2206 case 0x89: // Secondary LE
2207 case 0x8a: // Primary no-fault LE
2208 case 0x8b: // Secondary no-fault LE
2209 switch(size) {
2210 case 2:
2211 ret = bswap16(ret);
2212 break;
2213 case 4:
2214 ret = bswap32(ret);
2215 break;
2216 case 8:
2217 ret = bswap64(ret);
2218 break;
2219 default:
2220 break;
2221 }
2222 default:
2223 break;
2224 }
2225
2226 /* Convert to signed number */
2227 if (sign) {
2228 switch(size) {
2229 case 1:
2230 ret = (int8_t) ret;
2231 break;
2232 case 2:
2233 ret = (int16_t) ret;
2234 break;
2235 case 4:
2236 ret = (int32_t) ret;
2237 break;
2238 default:
2239 break;
2240 }
2241 }
2242 #ifdef DEBUG_ASI
2243 dump_asi("read ", last_addr, asi, size, ret);
2244 #endif
2245 return ret;
2246 }
2247
2248 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2249 {
2250 #ifdef DEBUG_ASI
2251 dump_asi("write", addr, asi, size, val);
2252 #endif
2253 if (asi < 0x80)
2254 raise_exception(TT_PRIV_ACT);
2255
2256 helper_check_align(addr, size - 1);
2257 addr = address_mask(env, addr);
2258
2259 /* Convert to little endian */
2260 switch (asi) {
2261 case 0x88: // Primary LE
2262 case 0x89: // Secondary LE
2263 switch(size) {
2264 case 2:
2265 val = bswap16(val);
2266 break;
2267 case 4:
2268 val = bswap32(val);
2269 break;
2270 case 8:
2271 val = bswap64(val);
2272 break;
2273 default:
2274 break;
2275 }
2276 default:
2277 break;
2278 }
2279
2280 switch(asi) {
2281 case 0x80: // Primary
2282 case 0x88: // Primary LE
2283 {
2284 switch(size) {
2285 case 1:
2286 stb_raw(addr, val);
2287 break;
2288 case 2:
2289 stw_raw(addr, val);
2290 break;
2291 case 4:
2292 stl_raw(addr, val);
2293 break;
2294 case 8:
2295 default:
2296 stq_raw(addr, val);
2297 break;
2298 }
2299 }
2300 break;
2301 case 0x81: // Secondary
2302 case 0x89: // Secondary LE
2303 // XXX
2304 return;
2305
2306 case 0x82: // Primary no-fault, RO
2307 case 0x83: // Secondary no-fault, RO
2308 case 0x8a: // Primary no-fault LE, RO
2309 case 0x8b: // Secondary no-fault LE, RO
2310 default:
2311 do_unassigned_access(addr, 1, 0, 1, size);
2312 return;
2313 }
2314 }
2315
2316 #else /* CONFIG_USER_ONLY */
2317
2318 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
2319 {
2320 uint64_t ret = 0;
2321 #if defined(DEBUG_ASI)
2322 target_ulong last_addr = addr;
2323 #endif
2324
2325 asi &= 0xff;
2326
2327 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2328 || (cpu_has_hypervisor(env)
2329 && asi >= 0x30 && asi < 0x80
2330 && !(env->hpstate & HS_PRIV)))
2331 raise_exception(TT_PRIV_ACT);
2332
2333 helper_check_align(addr, size - 1);
2334 switch (asi) {
2335 case 0x82: // Primary no-fault
2336 case 0x8a: // Primary no-fault LE
2337 case 0x83: // Secondary no-fault
2338 case 0x8b: // Secondary no-fault LE
2339 {
2340 /* secondary space access has lowest asi bit equal to 1 */
2341 int access_mmu_idx = ( asi & 1 ) ? MMU_KERNEL_IDX
2342 : MMU_KERNEL_SECONDARY_IDX;
2343
2344 if (cpu_get_phys_page_nofault(env, addr, access_mmu_idx) == -1ULL) {
2345 #ifdef DEBUG_ASI
2346 dump_asi("read ", last_addr, asi, size, ret);
2347 #endif
2348 return 0;
2349 }
2350 }
2351 // Fall through
2352 case 0x10: // As if user primary
2353 case 0x11: // As if user secondary
2354 case 0x18: // As if user primary LE
2355 case 0x19: // As if user secondary LE
2356 case 0x80: // Primary
2357 case 0x81: // Secondary
2358 case 0x88: // Primary LE
2359 case 0x89: // Secondary LE
2360 case 0xe2: // UA2007 Primary block init
2361 case 0xe3: // UA2007 Secondary block init
2362 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2363 if (cpu_hypervisor_mode(env)) {
2364 switch(size) {
2365 case 1:
2366 ret = ldub_hypv(addr);
2367 break;
2368 case 2:
2369 ret = lduw_hypv(addr);
2370 break;
2371 case 4:
2372 ret = ldl_hypv(addr);
2373 break;
2374 default:
2375 case 8:
2376 ret = ldq_hypv(addr);
2377 break;
2378 }
2379 } else {
2380 /* secondary space access has lowest asi bit equal to 1 */
2381 if (asi & 1) {
2382 switch(size) {
2383 case 1:
2384 ret = ldub_kernel_secondary(addr);
2385 break;
2386 case 2:
2387 ret = lduw_kernel_secondary(addr);
2388 break;
2389 case 4:
2390 ret = ldl_kernel_secondary(addr);
2391 break;
2392 default:
2393 case 8:
2394 ret = ldq_kernel_secondary(addr);
2395 break;
2396 }
2397 } else {
2398 switch(size) {
2399 case 1:
2400 ret = ldub_kernel(addr);
2401 break;
2402 case 2:
2403 ret = lduw_kernel(addr);
2404 break;
2405 case 4:
2406 ret = ldl_kernel(addr);
2407 break;
2408 default:
2409 case 8:
2410 ret = ldq_kernel(addr);
2411 break;
2412 }
2413 }
2414 }
2415 } else {
2416 /* secondary space access has lowest asi bit equal to 1 */
2417 if (asi & 1) {
2418 switch(size) {
2419 case 1:
2420 ret = ldub_user_secondary(addr);
2421 break;
2422 case 2:
2423 ret = lduw_user_secondary(addr);
2424 break;
2425 case 4:
2426 ret = ldl_user_secondary(addr);
2427 break;
2428 default:
2429 case 8:
2430 ret = ldq_user_secondary(addr);
2431 break;
2432 }
2433 } else {
2434 switch(size) {
2435 case 1:
2436 ret = ldub_user(addr);
2437 break;
2438 case 2:
2439 ret = lduw_user(addr);
2440 break;
2441 case 4:
2442 ret = ldl_user(addr);
2443 break;
2444 default:
2445 case 8:
2446 ret = ldq_user(addr);
2447 break;
2448 }
2449 }
2450 }
2451 break;
2452 case 0x14: // Bypass
2453 case 0x15: // Bypass, non-cacheable
2454 case 0x1c: // Bypass LE
2455 case 0x1d: // Bypass, non-cacheable LE
2456 {
2457 switch(size) {
2458 case 1:
2459 ret = ldub_phys(addr);
2460 break;
2461 case 2:
2462 ret = lduw_phys(addr);
2463 break;
2464 case 4:
2465 ret = ldl_phys(addr);
2466 break;
2467 default:
2468 case 8:
2469 ret = ldq_phys(addr);
2470 break;
2471 }
2472 break;
2473 }
2474 case 0x24: // Nucleus quad LDD 128 bit atomic
2475 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2476 // Only ldda allowed
2477 raise_exception(TT_ILL_INSN);
2478 return 0;
2479 case 0x04: // Nucleus
2480 case 0x0c: // Nucleus Little Endian (LE)
2481 {
2482 switch(size) {
2483 case 1:
2484 ret = ldub_nucleus(addr);
2485 break;
2486 case 2:
2487 ret = lduw_nucleus(addr);
2488 break;
2489 case 4:
2490 ret = ldl_nucleus(addr);
2491 break;
2492 default:
2493 case 8:
2494 ret = ldq_nucleus(addr);
2495 break;
2496 }
2497 break;
2498 }
2499 case 0x4a: // UPA config
2500 // XXX
2501 break;
2502 case 0x45: // LSU
2503 ret = env->lsu;
2504 break;
2505 case 0x50: // I-MMU regs
2506 {
2507 int reg = (addr >> 3) & 0xf;
2508
2509 if (reg == 0) {
2510 // I-TSB Tag Target register
2511 ret = ultrasparc_tag_target(env->immu.tag_access);
2512 } else {
2513 ret = env->immuregs[reg];
2514 }
2515
2516 break;
2517 }
2518 case 0x51: // I-MMU 8k TSB pointer
2519 {
2520 // env->immuregs[5] holds I-MMU TSB register value
2521 // env->immuregs[6] holds I-MMU Tag Access register value
2522 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2523 8*1024);
2524 break;
2525 }
2526 case 0x52: // I-MMU 64k TSB pointer
2527 {
2528 // env->immuregs[5] holds I-MMU TSB register value
2529 // env->immuregs[6] holds I-MMU Tag Access register value
2530 ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
2531 64*1024);
2532 break;
2533 }
2534 case 0x55: // I-MMU data access
2535 {
2536 int reg = (addr >> 3) & 0x3f;
2537
2538 ret = env->itlb[reg].tte;
2539 break;
2540 }
2541 case 0x56: // I-MMU tag read
2542 {
2543 int reg = (addr >> 3) & 0x3f;
2544
2545 ret = env->itlb[reg].tag;
2546 break;
2547 }
2548 case 0x58: // D-MMU regs
2549 {
2550 int reg = (addr >> 3) & 0xf;
2551
2552 if (reg == 0) {
2553 // D-TSB Tag Target register
2554 ret = ultrasparc_tag_target(env->dmmu.tag_access);
2555 } else {
2556 ret = env->dmmuregs[reg];
2557 }
2558 break;
2559 }
2560 case 0x59: // D-MMU 8k TSB pointer
2561 {
2562 // env->dmmuregs[5] holds D-MMU TSB register value
2563 // env->dmmuregs[6] holds D-MMU Tag Access register value
2564 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2565 8*1024);
2566 break;
2567 }
2568 case 0x5a: // D-MMU 64k TSB pointer
2569 {
2570 // env->dmmuregs[5] holds D-MMU TSB register value
2571 // env->dmmuregs[6] holds D-MMU Tag Access register value
2572 ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
2573 64*1024);
2574 break;
2575 }
2576 case 0x5d: // D-MMU data access
2577 {
2578 int reg = (addr >> 3) & 0x3f;
2579
2580 ret = env->dtlb[reg].tte;
2581 break;
2582 }
2583 case 0x5e: // D-MMU tag read
2584 {
2585 int reg = (addr >> 3) & 0x3f;
2586
2587 ret = env->dtlb[reg].tag;
2588 break;
2589 }
2590 case 0x46: // D-cache data
2591 case 0x47: // D-cache tag access
2592 case 0x4b: // E-cache error enable
2593 case 0x4c: // E-cache asynchronous fault status
2594 case 0x4d: // E-cache asynchronous fault address
2595 case 0x4e: // E-cache tag data
2596 case 0x66: // I-cache instruction access
2597 case 0x67: // I-cache tag access
2598 case 0x6e: // I-cache predecode
2599 case 0x6f: // I-cache LRU etc.
2600 case 0x76: // E-cache tag
2601 case 0x7e: // E-cache tag
2602 break;
2603 case 0x5b: // D-MMU data pointer
2604 case 0x48: // Interrupt dispatch, RO
2605 case 0x49: // Interrupt data receive
2606 case 0x7f: // Incoming interrupt vector, RO
2607 // XXX
2608 break;
2609 case 0x54: // I-MMU data in, WO
2610 case 0x57: // I-MMU demap, WO
2611 case 0x5c: // D-MMU data in, WO
2612 case 0x5f: // D-MMU demap, WO
2613 case 0x77: // Interrupt vector, WO
2614 default:
2615 do_unassigned_access(addr, 0, 0, 1, size);
2616 ret = 0;
2617 break;
2618 }
2619
2620 /* Convert from little endian */
2621 switch (asi) {
2622 case 0x0c: // Nucleus Little Endian (LE)
2623 case 0x18: // As if user primary LE
2624 case 0x19: // As if user secondary LE
2625 case 0x1c: // Bypass LE
2626 case 0x1d: // Bypass, non-cacheable LE
2627 case 0x88: // Primary LE
2628 case 0x89: // Secondary LE
2629 case 0x8a: // Primary no-fault LE
2630 case 0x8b: // Secondary no-fault LE
2631 switch(size) {
2632 case 2:
2633 ret = bswap16(ret);
2634 break;
2635 case 4:
2636 ret = bswap32(ret);
2637 break;
2638 case 8:
2639 ret = bswap64(ret);
2640 break;
2641 default:
2642 break;
2643 }
2644 default:
2645 break;
2646 }
2647
2648 /* Convert to signed number */
2649 if (sign) {
2650 switch(size) {
2651 case 1:
2652 ret = (int8_t) ret;
2653 break;
2654 case 2:
2655 ret = (int16_t) ret;
2656 break;
2657 case 4:
2658 ret = (int32_t) ret;
2659 break;
2660 default:
2661 break;
2662 }
2663 }
2664 #ifdef DEBUG_ASI
2665 dump_asi("read ", last_addr, asi, size, ret);
2666 #endif
2667 return ret;
2668 }
2669
2670 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
2671 {
2672 #ifdef DEBUG_ASI
2673 dump_asi("write", addr, asi, size, val);
2674 #endif
2675
2676 asi &= 0xff;
2677
2678 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2679 || (cpu_has_hypervisor(env)
2680 && asi >= 0x30 && asi < 0x80
2681 && !(env->hpstate & HS_PRIV)))
2682 raise_exception(TT_PRIV_ACT);
2683
2684 helper_check_align(addr, size - 1);
2685 /* Convert to little endian */
2686 switch (asi) {
2687 case 0x0c: // Nucleus Little Endian (LE)
2688 case 0x18: // As if user primary LE
2689 case 0x19: // As if user secondary LE
2690 case 0x1c: // Bypass LE
2691 case 0x1d: // Bypass, non-cacheable LE
2692 case 0x88: // Primary LE
2693 case 0x89: // Secondary LE
2694 switch(size) {
2695 case 2:
2696 val = bswap16(val);
2697 break;
2698 case 4:
2699 val = bswap32(val);
2700 break;
2701 case 8:
2702 val = bswap64(val);
2703 break;
2704 default:
2705 break;
2706 }
2707 default:
2708 break;
2709 }
2710
2711 switch(asi) {
2712 case 0x10: // As if user primary
2713 case 0x11: // As if user secondary
2714 case 0x18: // As if user primary LE
2715 case 0x19: // As if user secondary LE
2716 case 0x80: // Primary
2717 case 0x81: // Secondary
2718 case 0x88: // Primary LE
2719 case 0x89: // Secondary LE
2720 case 0xe2: // UA2007 Primary block init
2721 case 0xe3: // UA2007 Secondary block init
2722 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
2723 if (cpu_hypervisor_mode(env)) {
2724 switch(size) {
2725 case 1:
2726 stb_hypv(addr, val);
2727 break;
2728 case 2:
2729 stw_hypv(addr, val);
2730 break;
2731 case 4:
2732 stl_hypv(addr, val);
2733 break;
2734 case 8:
2735 default:
2736 stq_hypv(addr, val);
2737 break;
2738 }
2739 } else {
2740 /* secondary space access has lowest asi bit equal to 1 */
2741 if (asi & 1) {
2742 switch(size) {
2743 case 1:
2744 stb_kernel_secondary(addr, val);
2745 break;
2746 case 2:
2747 stw_kernel_secondary(addr, val);
2748 break;
2749 case 4:
2750 stl_kernel_secondary(addr, val);
2751 break;
2752 case 8:
2753 default:
2754 stq_kernel_secondary(addr, val);
2755 break;
2756 }
2757 } else {
2758 switch(size) {
2759 case 1:
2760 stb_kernel(addr, val);
2761 break;
2762 case 2:
2763 stw_kernel(addr, val);
2764 break;
2765 case 4:
2766 stl_kernel(addr, val);
2767 break;
2768 case 8:
2769 default:
2770 stq_kernel(addr, val);
2771 break;
2772 }
2773 }
2774 }
2775 } else {
2776 /* secondary space access has lowest asi bit equal to 1 */
2777 if (asi & 1) {
2778 switch(size) {
2779 case 1:
2780 stb_user_secondary(addr, val);
2781 break;
2782 case 2:
2783 stw_user_secondary(addr, val);
2784 break;
2785 case 4:
2786 stl_user_secondary(addr, val);
2787 break;
2788 case 8:
2789 default:
2790 stq_user_secondary(addr, val);
2791 break;
2792 }
2793 } else {
2794 switch(size) {
2795 case 1:
2796 stb_user(addr, val);
2797 break;
2798 case 2:
2799 stw_user(addr, val);
2800 break;
2801 case 4:
2802 stl_user(addr, val);
2803 break;
2804 case 8:
2805 default:
2806 stq_user(addr, val);
2807 break;
2808 }
2809 }
2810 }
2811 break;
2812 case 0x14: // Bypass
2813 case 0x15: // Bypass, non-cacheable
2814 case 0x1c: // Bypass LE
2815 case 0x1d: // Bypass, non-cacheable LE
2816 {
2817 switch(size) {
2818 case 1:
2819 stb_phys(addr, val);
2820 break;
2821 case 2:
2822 stw_phys(addr, val);
2823 break;
2824 case 4:
2825 stl_phys(addr, val);
2826 break;
2827 case 8:
2828 default:
2829 stq_phys(addr, val);
2830 break;
2831 }
2832 }
2833 return;
2834 case 0x24: // Nucleus quad LDD 128 bit atomic
2835 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2836 // Only ldda allowed
2837 raise_exception(TT_ILL_INSN);
2838 return;
2839 case 0x04: // Nucleus
2840 case 0x0c: // Nucleus Little Endian (LE)
2841 {
2842 switch(size) {
2843 case 1:
2844 stb_nucleus(addr, val);
2845 break;
2846 case 2:
2847 stw_nucleus(addr, val);
2848 break;
2849 case 4:
2850 stl_nucleus(addr, val);
2851 break;
2852 default:
2853 case 8:
2854 stq_nucleus(addr, val);
2855 break;
2856 }
2857 break;
2858 }
2859
2860 case 0x4a: // UPA config
2861 // XXX
2862 return;
2863 case 0x45: // LSU
2864 {
2865 uint64_t oldreg;
2866
2867 oldreg = env->lsu;
2868 env->lsu = val & (DMMU_E | IMMU_E);
2869 // Mappings generated during D/I MMU disabled mode are
2870 // invalid in normal mode
2871 if (oldreg != env->lsu) {
2872 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
2873 oldreg, env->lsu);
2874 #ifdef DEBUG_MMU
2875 dump_mmu(env);
2876 #endif
2877 tlb_flush(env, 1);
2878 }
2879 return;
2880 }
2881 case 0x50: // I-MMU regs
2882 {
2883 int reg = (addr >> 3) & 0xf;
2884 uint64_t oldreg;
2885
2886 oldreg = env->immuregs[reg];
2887 switch(reg) {
2888 case 0: // RO
2889 return;
2890 case 1: // Not in I-MMU
2891 case 2:
2892 return;
2893 case 3: // SFSR
2894 if ((val & 1) == 0)
2895 val = 0; // Clear SFSR
2896 env->immu.sfsr = val;
2897 break;
2898 case 4: // RO
2899 return;
2900 case 5: // TSB access
2901 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
2902 PRIx64 "\n", env->immu.tsb, val);
2903 env->immu.tsb = val;
2904 break;
2905 case 6: // Tag access
2906 env->immu.tag_access = val;
2907 break;
2908 case 7:
2909 case 8:
2910 return;
2911 default:
2912 break;
2913 }
2914
2915 if (oldreg != env->immuregs[reg]) {
2916 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2917 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2918 }
2919 #ifdef DEBUG_MMU
2920 dump_mmu(env);
2921 #endif
2922 return;
2923 }
2924 case 0x54: // I-MMU data in
2925 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
2926 return;
2927 case 0x55: // I-MMU data access
2928 {
2929 // TODO: auto demap
2930
2931 unsigned int i = (addr >> 3) & 0x3f;
2932
2933 replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
2934
2935 #ifdef DEBUG_MMU
2936 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
2937 dump_mmu(env);
2938 #endif
2939 return;
2940 }
2941 case 0x57: // I-MMU demap
2942 demap_tlb(env->itlb, addr, "immu", env);
2943 return;
2944 case 0x58: // D-MMU regs
2945 {
2946 int reg = (addr >> 3) & 0xf;
2947 uint64_t oldreg;
2948
2949 oldreg = env->dmmuregs[reg];
2950 switch(reg) {
2951 case 0: // RO
2952 case 4:
2953 return;
2954 case 3: // SFSR
2955 if ((val & 1) == 0) {
2956 val = 0; // Clear SFSR, Fault address
2957 env->dmmu.sfar = 0;
2958 }
2959 env->dmmu.sfsr = val;
2960 break;
2961 case 1: // Primary context
2962 env->dmmu.mmu_primary_context = val;
2963 /* can be optimized to only flush MMU_USER_IDX
2964 and MMU_KERNEL_IDX entries */
2965 tlb_flush(env, 1);
2966 break;
2967 case 2: // Secondary context
2968 env->dmmu.mmu_secondary_context = val;
2969 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
2970 and MMU_KERNEL_SECONDARY_IDX entries */
2971 tlb_flush(env, 1);
2972 break;
2973 case 5: // TSB access
2974 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
2975 PRIx64 "\n", env->dmmu.tsb, val);
2976 env->dmmu.tsb = val;
2977 break;
2978 case 6: // Tag access
2979 env->dmmu.tag_access = val;
2980 break;
2981 case 7: // Virtual Watchpoint
2982 case 8: // Physical Watchpoint
2983 default:
2984 env->dmmuregs[reg] = val;
2985 break;
2986 }
2987
2988 if (oldreg != env->dmmuregs[reg]) {
2989 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
2990 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2991 }
2992 #ifdef DEBUG_MMU
2993 dump_mmu(env);
2994 #endif
2995 return;
2996 }
2997 case 0x5c: // D-MMU data in
2998 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
2999 return;
3000 case 0x5d: // D-MMU data access
3001 {
3002 unsigned int i = (addr >> 3) & 0x3f;
3003
3004 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
3005
3006 #ifdef DEBUG_MMU
3007 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
3008 dump_mmu(env);
3009 #endif
3010 return;
3011 }
3012 case 0x5f: // D-MMU demap
3013 demap_tlb(env->dtlb, addr, "dmmu", env);
3014 return;
3015 case 0x49: // Interrupt data receive
3016 // XXX
3017 return;
3018 case 0x46: // D-cache data
3019 case 0x47: // D-cache tag access
3020 case 0x4b: // E-cache error enable
3021 case 0x4c: // E-cache asynchronous fault status
3022 case 0x4d: // E-cache asynchronous fault address
3023 case 0x4e: // E-cache tag data
3024 case 0x66: // I-cache instruction access
3025 case 0x67: // I-cache tag access
3026 case 0x6e: // I-cache predecode
3027 case 0x6f: // I-cache LRU etc.
3028 case 0x76: // E-cache tag
3029 case 0x7e: // E-cache tag
3030 return;
3031 case 0x51: // I-MMU 8k TSB pointer, RO
3032 case 0x52: // I-MMU 64k TSB pointer, RO
3033 case 0x56: // I-MMU tag read, RO
3034 case 0x59: // D-MMU 8k TSB pointer, RO
3035 case 0x5a: // D-MMU 64k TSB pointer, RO
3036 case 0x5b: // D-MMU data pointer, RO
3037 case 0x5e: // D-MMU tag read, RO
3038 case 0x48: // Interrupt dispatch, RO
3039 case 0x7f: // Incoming interrupt vector, RO
3040 case 0x82: // Primary no-fault, RO
3041 case 0x83: // Secondary no-fault, RO
3042 case 0x8a: // Primary no-fault LE, RO
3043 case 0x8b: // Secondary no-fault LE, RO
3044 default:
3045 do_unassigned_access(addr, 1, 0, 1, size);
3046 return;
3047 }
3048 }
3049 #endif /* CONFIG_USER_ONLY */
3050
3051 void helper_ldda_asi(target_ulong addr, int asi, int rd)
3052 {
3053 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
3054 || (cpu_has_hypervisor(env)
3055 && asi >= 0x30 && asi < 0x80
3056 && !(env->hpstate & HS_PRIV)))
3057 raise_exception(TT_PRIV_ACT);
3058
3059 switch (asi) {
3060 #if !defined(CONFIG_USER_ONLY)
3061 case 0x24: // Nucleus quad LDD 128 bit atomic
3062 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
3063 helper_check_align(addr, 0xf);
3064 if (rd == 0) {
3065 env->gregs[1] = ldq_nucleus(addr + 8);
3066 if (asi == 0x2c)
3067 bswap64s(&env->gregs[1]);
3068 } else if (rd < 8) {
3069 env->gregs[rd] = ldq_nucleus(addr);
3070 env->gregs[rd + 1] = ldq_nucleus(addr + 8);
3071 if (asi == 0x2c) {
3072 bswap64s(&env->gregs[rd]);
3073 bswap64s(&env->gregs[rd + 1]);
3074 }
3075 } else {
3076 env->regwptr[rd] = ldq_nucleus(addr);
3077 env->regwptr[rd + 1] = ldq_nucleus(addr + 8);
3078 if (asi == 0x2c) {
3079 bswap64s(&env->regwptr[rd]);
3080 bswap64s(&env->regwptr[rd + 1]);
3081 }
3082 }
3083 break;
3084 #endif
3085 default:
3086 helper_check_align(addr, 0x3);
3087 if (rd == 0)
3088 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
3089 else if (rd < 8) {
3090 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
3091 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3092 } else {
3093 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
3094 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
3095 }
3096 break;
3097 }
3098 }
3099
3100 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
3101 {
3102 unsigned int i;
3103 target_ulong val;
3104
3105 helper_check_align(addr, 3);
3106 switch (asi) {
3107 case 0xf0: // Block load primary
3108 case 0xf1: // Block load secondary
3109 case 0xf8: // Block load primary LE
3110 case 0xf9: // Block load secondary LE
3111 if (rd & 7) {
3112 raise_exception(TT_ILL_INSN);
3113 return;
3114 }
3115 helper_check_align(addr, 0x3f);
3116 for (i = 0; i < 16; i++) {
3117 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
3118 0);
3119 addr += 4;
3120 }
3121
3122 return;
3123 default:
3124 break;
3125 }
3126
3127 val = helper_ld_asi(addr, asi, size, 0);
3128 switch(size) {
3129 default:
3130 case 4:
3131 *((uint32_t *)&env->fpr[rd]) = val;
3132 break;
3133 case 8:
3134 *((int64_t *)&DT0) = val;
3135 break;
3136 case 16:
3137 // XXX
3138 break;
3139 }
3140 }
3141
3142 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
3143 {
3144 unsigned int i;
3145 target_ulong val = 0;
3146
3147 helper_check_align(addr, 3);
3148 switch (asi) {
3149 case 0xe0: // UA2007 Block commit store primary (cache flush)
3150 case 0xe1: // UA2007 Block commit store secondary (cache flush)
3151 case 0xf0: // Block store primary
3152 case 0xf1: // Block store secondary
3153 case 0xf8: // Block store primary LE
3154 case 0xf9: // Block store secondary LE
3155 if (rd & 7) {
3156 raise_exception(TT_ILL_INSN);
3157 return;
3158 }
3159 helper_check_align(addr, 0x3f);
3160 for (i = 0; i < 16; i++) {
3161 val = *(uint32_t *)&env->fpr[rd++];
3162 helper_st_asi(addr, val, asi & 0x8f, 4);
3163 addr += 4;
3164 }
3165
3166 return;
3167 default:
3168 break;
3169 }
3170
3171 switch(size) {
3172 default:
3173 case 4:
3174 val = *((uint32_t *)&env->fpr[rd]);
3175 break;
3176 case 8:
3177 val = *((int64_t *)&DT0);
3178 break;
3179 case 16:
3180 // XXX
3181 break;
3182 }
3183 helper_st_asi(addr, val, asi, size);
3184 }
3185
3186 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
3187 target_ulong val2, uint32_t asi)
3188 {
3189 target_ulong ret;
3190
3191 val2 &= 0xffffffffUL;
3192 ret = helper_ld_asi(addr, asi, 4, 0);
3193 ret &= 0xffffffffUL;
3194 if (val2 == ret)
3195 helper_st_asi(addr, val1 & 0xffffffffUL, asi, 4);
3196 return ret;
3197 }
3198
3199 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
3200 target_ulong val2, uint32_t asi)
3201 {
3202 target_ulong ret;
3203
3204 ret = helper_ld_asi(addr, asi, 8, 0);
3205 if (val2 == ret)
3206 helper_st_asi(addr, val1, asi, 8);
3207 return ret;
3208 }
3209 #endif /* TARGET_SPARC64 */
3210
3211 #ifndef TARGET_SPARC64
3212 void helper_rett(void)
3213 {
3214 unsigned int cwp;
3215
3216 if (env->psret == 1)
3217 raise_exception(TT_ILL_INSN);
3218
3219 env->psret = 1;
3220 cwp = cwp_inc(env->cwp + 1) ;
3221 if (env->wim & (1 << cwp)) {
3222 raise_exception(TT_WIN_UNF);
3223 }
3224 set_cwp(cwp);
3225 env->psrs = env->psrps;
3226 }
3227 #endif
3228
3229 target_ulong helper_udiv(target_ulong a, target_ulong b)
3230 {
3231 uint64_t x0;
3232 uint32_t x1;
3233
3234 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3235 x1 = b;
3236
3237 if (x1 == 0) {
3238 raise_exception(TT_DIV_ZERO);
3239 }
3240
3241 x0 = x0 / x1;
3242 if (x0 > 0xffffffff) {
3243 env->cc_src2 = 1;
3244 return 0xffffffff;
3245 } else {
3246 env->cc_src2 = 0;
3247 return x0;
3248 }
3249 }
3250
3251 target_ulong helper_sdiv(target_ulong a, target_ulong b)
3252 {
3253 int64_t x0;
3254 int32_t x1;
3255
3256 x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
3257 x1 = b;
3258
3259 if (x1 == 0) {
3260 raise_exception(TT_DIV_ZERO);
3261 }
3262
3263 x0 = x0 / x1;
3264 if ((int32_t) x0 != x0) {
3265 env->cc_src2 = 1;
3266 return x0 < 0? 0x80000000: 0x7fffffff;
3267 } else {
3268 env->cc_src2 = 0;
3269 return x0;
3270 }
3271 }
3272
3273 void helper_stdf(target_ulong addr, int mem_idx)
3274 {
3275 helper_check_align(addr, 7);
3276 #if !defined(CONFIG_USER_ONLY)
3277 switch (mem_idx) {
3278 case 0:
3279 stfq_user(addr, DT0);
3280 break;
3281 case 1:
3282 stfq_kernel(addr, DT0);
3283 break;
3284 #ifdef TARGET_SPARC64
3285 case 2:
3286 stfq_hypv(addr, DT0);
3287 break;
3288 #endif
3289 default:
3290 break;
3291 }
3292 #else
3293 stfq_raw(address_mask(env, addr), DT0);
3294 #endif
3295 }
3296
3297 void helper_lddf(target_ulong addr, int mem_idx)
3298 {
3299 helper_check_align(addr, 7);
3300 #if !defined(CONFIG_USER_ONLY)
3301 switch (mem_idx) {
3302 case 0:
3303 DT0 = ldfq_user(addr);
3304 break;
3305 case 1:
3306 DT0 = ldfq_kernel(addr);
3307 break;
3308 #ifdef TARGET_SPARC64
3309 case 2:
3310 DT0 = ldfq_hypv(addr);
3311 break;
3312 #endif
3313 default:
3314 break;
3315 }
3316 #else
3317 DT0 = ldfq_raw(address_mask(env, addr));
3318 #endif
3319 }
3320
3321 void helper_ldqf(target_ulong addr, int mem_idx)
3322 {
3323 // XXX add 128 bit load
3324 CPU_QuadU u;
3325
3326 helper_check_align(addr, 7);
3327 #if !defined(CONFIG_USER_ONLY)
3328 switch (mem_idx) {
3329 case 0:
3330 u.ll.upper = ldq_user(addr);
3331 u.ll.lower = ldq_user(addr + 8);
3332 QT0 = u.q;
3333 break;
3334 case 1:
3335 u.ll.upper = ldq_kernel(addr);
3336 u.ll.lower = ldq_kernel(addr + 8);
3337 QT0 = u.q;
3338 break;
3339 #ifdef TARGET_SPARC64
3340 case 2:
3341 u.ll.upper = ldq_hypv(addr);
3342 u.ll.lower = ldq_hypv(addr + 8);
3343 QT0 = u.q;
3344 break;
3345 #endif
3346 default:
3347 break;
3348 }
3349 #else
3350 u.ll.upper = ldq_raw(address_mask(env, addr));
3351 u.ll.lower = ldq_raw(address_mask(env, addr + 8));
3352 QT0 = u.q;
3353 #endif
3354 }
3355
3356 void helper_stqf(target_ulong addr, int mem_idx)
3357 {
3358 // XXX add 128 bit store
3359 CPU_QuadU u;
3360
3361 helper_check_align(addr, 7);
3362 #if !defined(CONFIG_USER_ONLY)
3363 switch (mem_idx) {
3364 case 0:
3365 u.q = QT0;
3366 stq_user(addr, u.ll.upper);
3367 stq_user(addr + 8, u.ll.lower);
3368 break;
3369 case 1:
3370 u.q = QT0;
3371 stq_kernel(addr, u.ll.upper);
3372 stq_kernel(addr + 8, u.ll.lower);
3373 break;
3374 #ifdef TARGET_SPARC64
3375 case 2:
3376 u.q = QT0;
3377 stq_hypv(addr, u.ll.upper);
3378 stq_hypv(addr + 8, u.ll.lower);
3379 break;
3380 #endif
3381 default:
3382 break;
3383 }
3384 #else
3385 u.q = QT0;
3386 stq_raw(address_mask(env, addr), u.ll.upper);
3387 stq_raw(address_mask(env, addr + 8), u.ll.lower);
3388 #endif
3389 }
3390
3391 static inline void set_fsr(void)
3392 {
3393 int rnd_mode;
3394
3395 switch (env->fsr & FSR_RD_MASK) {
3396 case FSR_RD_NEAREST:
3397 rnd_mode = float_round_nearest_even;
3398 break;
3399 default:
3400 case FSR_RD_ZERO:
3401 rnd_mode = float_round_to_zero;
3402 break;
3403 case FSR_RD_POS:
3404 rnd_mode = float_round_up;
3405 break;
3406 case FSR_RD_NEG:
3407 rnd_mode = float_round_down;
3408 break;
3409 }
3410 set_float_rounding_mode(rnd_mode, &env->fp_status);
3411 }
3412
3413 void helper_ldfsr(uint32_t new_fsr)
3414 {
3415 env->fsr = (new_fsr & FSR_LDFSR_MASK) | (env->fsr & FSR_LDFSR_OLDMASK);
3416 set_fsr();
3417 }
3418
3419 #ifdef TARGET_SPARC64
3420 void helper_ldxfsr(uint64_t new_fsr)
3421 {
3422 env->fsr = (new_fsr & FSR_LDXFSR_MASK) | (env->fsr & FSR_LDXFSR_OLDMASK);
3423 set_fsr();
3424 }
3425 #endif
3426
3427 void helper_debug(void)
3428 {
3429 env->exception_index = EXCP_DEBUG;
3430 cpu_loop_exit();
3431 }
3432
3433 #ifndef TARGET_SPARC64
3434 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3435 handling ? */
3436 void helper_save(void)
3437 {
3438 uint32_t cwp;
3439
3440 cwp = cwp_dec(env->cwp - 1);
3441 if (env->wim & (1 << cwp)) {
3442 raise_exception(TT_WIN_OVF);
3443 }
3444 set_cwp(cwp);
3445 }
3446
3447 void helper_restore(void)
3448 {
3449 uint32_t cwp;
3450
3451 cwp = cwp_inc(env->cwp + 1);
3452 if (env->wim & (1 << cwp)) {
3453 raise_exception(TT_WIN_UNF);
3454 }
3455 set_cwp(cwp);
3456 }
3457
3458 void helper_wrpsr(target_ulong new_psr)
3459 {
3460 if ((new_psr & PSR_CWP) >= env->nwindows) {
3461 raise_exception(TT_ILL_INSN);
3462 } else {
3463 cpu_put_psr(env, new_psr);
3464 }
3465 }
3466
3467 target_ulong helper_rdpsr(void)
3468 {
3469 return get_psr();
3470 }
3471
3472 #else
3473 /* XXX: use another pointer for %iN registers to avoid slow wrapping
3474 handling ? */
3475 void helper_save(void)
3476 {
3477 uint32_t cwp;
3478
3479 cwp = cwp_dec(env->cwp - 1);
3480 if (env->cansave == 0) {
3481 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3482 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3483 ((env->wstate & 0x7) << 2)));
3484 } else {
3485 if (env->cleanwin - env->canrestore == 0) {
3486 // XXX Clean windows without trap
3487 raise_exception(TT_CLRWIN);
3488 } else {
3489 env->cansave--;
3490 env->canrestore++;
3491 set_cwp(cwp);
3492 }
3493 }
3494 }
3495
3496 void helper_restore(void)
3497 {
3498 uint32_t cwp;
3499
3500 cwp = cwp_inc(env->cwp + 1);
3501 if (env->canrestore == 0) {
3502 raise_exception(TT_FILL | (env->otherwin != 0 ?
3503 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3504 ((env->wstate & 0x7) << 2)));
3505 } else {
3506 env->cansave++;
3507 env->canrestore--;
3508 set_cwp(cwp);
3509 }
3510 }
3511
3512 void helper_flushw(void)
3513 {
3514 if (env->cansave != env->nwindows - 2) {
3515 raise_exception(TT_SPILL | (env->otherwin != 0 ?
3516 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
3517 ((env->wstate & 0x7) << 2)));
3518 }
3519 }
3520
3521 void helper_saved(void)
3522 {
3523 env->cansave++;
3524 if (env->otherwin == 0)
3525 env->canrestore--;
3526 else
3527 env->otherwin--;
3528 }
3529
3530 void helper_restored(void)
3531 {
3532 env->canrestore++;
3533 if (env->cleanwin < env->nwindows - 1)
3534 env->cleanwin++;
3535 if (env->otherwin == 0)
3536 env->cansave--;
3537 else
3538 env->otherwin--;
3539 }
3540
3541 static target_ulong get_ccr(void)
3542 {
3543 target_ulong psr;
3544
3545 psr = get_psr();
3546
3547 return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
3548 }
3549
3550 target_ulong cpu_get_ccr(CPUState *env1)
3551 {
3552 CPUState *saved_env;
3553 target_ulong ret;
3554
3555 saved_env = env;
3556 env = env1;
3557 ret = get_ccr();
3558 env = saved_env;
3559 return ret;
3560 }
3561
3562 static void put_ccr(target_ulong val)
3563 {
3564 target_ulong tmp = val;
3565
3566 env->xcc = (tmp >> 4) << 20;
3567 env->psr = (tmp & 0xf) << 20;
3568 CC_OP = CC_OP_FLAGS;
3569 }
3570
3571 void cpu_put_ccr(CPUState *env1, target_ulong val)
3572 {
3573 CPUState *saved_env;
3574
3575 saved_env = env;
3576 env = env1;
3577 put_ccr(val);
3578 env = saved_env;
3579 }
3580
3581 static target_ulong get_cwp64(void)
3582 {
3583 return env->nwindows - 1 - env->cwp;
3584 }
3585
3586 target_ulong cpu_get_cwp64(CPUState *env1)
3587 {
3588 CPUState *saved_env;
3589 target_ulong ret;
3590
3591 saved_env = env;
3592 env = env1;
3593 ret = get_cwp64();
3594 env = saved_env;
3595 return ret;
3596 }
3597
3598 static void put_cwp64(int cwp)
3599 {
3600 if (unlikely(cwp >= env->nwindows || cwp < 0)) {
3601 cwp %= env->nwindows;
3602 }
3603 set_cwp(env->nwindows - 1 - cwp);
3604 }
3605
3606 void cpu_put_cwp64(CPUState *env1, int cwp)
3607 {
3608 CPUState *saved_env;
3609
3610 saved_env = env;
3611 env = env1;
3612 put_cwp64(cwp);
3613 env = saved_env;
3614 }
3615
3616 target_ulong helper_rdccr(void)
3617 {
3618 return get_ccr();
3619 }
3620
3621 void helper_wrccr(target_ulong new_ccr)
3622 {
3623 put_ccr(new_ccr);
3624 }
3625
3626 // CWP handling is reversed in V9, but we still use the V8 register
3627 // order.
3628 target_ulong helper_rdcwp(void)
3629 {
3630 return get_cwp64();
3631 }
3632
3633 void helper_wrcwp(target_ulong new_cwp)
3634 {
3635 put_cwp64(new_cwp);
3636 }
3637
3638 // This function uses non-native bit order
3639 #define GET_FIELD(X, FROM, TO) \
3640 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
3641
3642 // This function uses the order in the manuals, i.e. bit 0 is 2^0
3643 #define GET_FIELD_SP(X, FROM, TO) \
3644 GET_FIELD(X, 63 - (TO), 63 - (FROM))
3645
3646 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
3647 {
3648 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
3649 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
3650 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
3651 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
3652 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
3653 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
3654 (((pixel_addr >> 55) & 1) << 4) |
3655 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
3656 GET_FIELD_SP(pixel_addr, 11, 12);
3657 }
3658
3659 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
3660 {
3661 uint64_t tmp;
3662
3663 tmp = addr + offset;
3664 env->gsr &= ~7ULL;
3665 env->gsr |= tmp & 7ULL;
3666 return tmp & ~7ULL;
3667 }
3668
3669 target_ulong helper_popc(target_ulong val)
3670 {
3671 return ctpop64(val);
3672 }
3673
3674 static inline uint64_t *get_gregset(uint32_t pstate)
3675 {
3676 switch (pstate) {
3677 default:
3678 DPRINTF_PSTATE("ERROR in get_gregset: active pstate bits=%x%s%s%s\n",
3679 pstate,
3680 (pstate & PS_IG) ? " IG" : "",
3681 (pstate & PS_MG) ? " MG" : "",
3682 (pstate & PS_AG) ? " AG" : "");
3683 /* pass through to normal set of global registers */
3684 case 0:
3685 return env->bgregs;
3686 case PS_AG:
3687 return env->agregs;
3688 case PS_MG:
3689 return env->mgregs;
3690 case PS_IG:
3691 return env->igregs;
3692 }
3693 }
3694
3695 static inline void change_pstate(uint32_t new_pstate)
3696 {
3697 uint32_t pstate_regs, new_pstate_regs;
3698 uint64_t *src, *dst;
3699
3700 if (env->def->features & CPU_FEATURE_GL) {
3701 // PS_AG is not implemented in this case
3702 new_pstate &= ~PS_AG;
3703 }
3704
3705 pstate_regs = env->pstate & 0xc01;
3706 new_pstate_regs = new_pstate & 0xc01;
3707
3708 if (new_pstate_regs != pstate_regs) {
3709 DPRINTF_PSTATE("change_pstate: switching regs old=%x new=%x\n",
3710 pstate_regs, new_pstate_regs);
3711 // Switch global register bank
3712 src = get_gregset(new_pstate_regs);
3713 dst = get_gregset(pstate_regs);
3714 memcpy32(dst, env->gregs);
3715 memcpy32(env->gregs, src);
3716 }
3717 else {
3718 DPRINTF_PSTATE("change_pstate: regs new=%x (unchanged)\n",
3719 new_pstate_regs);
3720 }
3721 env->pstate = new_pstate;
3722 }
3723
3724 void helper_wrpstate(target_ulong new_state)
3725 {
3726 change_pstate(new_state & 0xf3f);
3727
3728 #if !defined(CONFIG_USER_ONLY)
3729 if (cpu_interrupts_enabled(env)) {
3730 cpu_check_irqs(env);
3731 }
3732 #endif
3733 }
3734
3735 void helper_wrpil(target_ulong new_pil)
3736 {
3737 #if !defined(CONFIG_USER_ONLY)
3738 DPRINTF_PSTATE("helper_wrpil old=%x new=%x\n",
3739 env->psrpil, (uint32_t)new_pil);
3740
3741 env->psrpil = new_pil;
3742
3743 if (cpu_interrupts_enabled(env)) {
3744 cpu_check_irqs(env);
3745 }
3746 #endif
3747 }
3748
3749 void helper_done(void)
3750 {
3751 trap_state* tsptr = cpu_tsptr(env);
3752
3753 env->pc = tsptr->tnpc;
3754 env->npc = tsptr->tnpc + 4;
3755 put_ccr(tsptr->tstate >> 32);
3756 env->asi = (tsptr->tstate >> 24) & 0xff;
3757 change_pstate((tsptr->tstate >> 8) & 0xf3f);
3758 put_cwp64(tsptr->tstate & 0xff);
3759 env->tl--;
3760
3761 DPRINTF_PSTATE("... helper_done tl=%d\n", env->tl);
3762
3763 #if !defined(CONFIG_USER_ONLY)
3764 if (cpu_interrupts_enabled(env)) {
3765 cpu_check_irqs(env);
3766 }
3767 #endif
3768 }
3769
3770 void helper_retry(void)
3771 {
3772 trap_state* tsptr = cpu_tsptr(env);
3773
3774 env->pc = tsptr->tpc;
3775 env->npc = tsptr->tnpc;
3776 put_ccr(tsptr->tstate >> 32);
3777 env->asi = (tsptr->tstate >> 24) & 0xff;
3778 change_pstate((tsptr->tstate >> 8) & 0xf3f);
3779 put_cwp64(tsptr->tstate & 0xff);
3780 env->tl--;
3781
3782 DPRINTF_PSTATE("... helper_retry tl=%d\n", env->tl);
3783
3784 #if !defined(CONFIG_USER_ONLY)
3785 if (cpu_interrupts_enabled(env)) {
3786 cpu_check_irqs(env);
3787 }
3788 #endif
3789 }
3790
3791 static void do_modify_softint(const char* operation, uint32_t value)
3792 {
3793 if (env->softint != value) {
3794 env->softint = value;
3795 DPRINTF_PSTATE(": %s new %08x\n", operation, env->softint);
3796 #if !defined(CONFIG_USER_ONLY)
3797 if (cpu_interrupts_enabled(env)) {
3798 cpu_check_irqs(env);
3799 }
3800 #endif
3801 }
3802 }
3803
3804 void helper_set_softint(uint64_t value)
3805 {
3806 do_modify_softint("helper_set_softint", env->softint | (uint32_t)value);
3807 }
3808
3809 void helper_clear_softint(uint64_t value)
3810 {
3811 do_modify_softint("helper_clear_softint", env->softint & (uint32_t)~value);
3812 }
3813
3814 void helper_write_softint(uint64_t value)
3815 {
3816 do_modify_softint("helper_write_softint", (uint32_t)value);
3817 }
3818 #endif
3819
3820 void helper_flush(target_ulong addr)
3821 {
3822 addr &= ~7;
3823 tb_invalidate_page_range(addr, addr + 8);
3824 }
3825
3826 #ifdef TARGET_SPARC64
3827 #ifdef DEBUG_PCALL
3828 static const char * const excp_names[0x80] = {
3829 [TT_TFAULT] = "Instruction Access Fault",
3830 [TT_TMISS] = "Instruction Access MMU Miss",
3831 [TT_CODE_ACCESS] = "Instruction Access Error",
3832 [TT_ILL_INSN] = "Illegal Instruction",
3833 [TT_PRIV_INSN] = "Privileged Instruction",
3834 [TT_NFPU_INSN] = "FPU Disabled",
3835 [TT_FP_EXCP] = "FPU Exception",
3836 [TT_TOVF] = "Tag Overflow",
3837 [TT_CLRWIN] = "Clean Windows",
3838 [TT_DIV_ZERO] = "Division By Zero",
3839 [TT_DFAULT] = "Data Access Fault",
3840 [TT_DMISS] = "Data Access MMU Miss",
3841 [TT_DATA_ACCESS] = "Data Access Error",
3842 [TT_DPROT] = "Data Protection Error",
3843 [TT_UNALIGNED] = "Unaligned Memory Access",
3844 [TT_PRIV_ACT] = "Privileged Action",
3845 [TT_EXTINT | 0x1] = "External Interrupt 1",
3846 [TT_EXTINT | 0x2] = "External Interrupt 2",
3847 [TT_EXTINT | 0x3] = "External Interrupt 3",
3848 [TT_EXTINT | 0x4] = "External Interrupt 4",
3849 [TT_EXTINT | 0x5] = "External Interrupt 5",
3850 [TT_EXTINT | 0x6] = "External Interrupt 6",
3851 [TT_EXTINT | 0x7] = "External Interrupt 7",
3852 [TT_EXTINT | 0x8] = "External Interrupt 8",
3853 [TT_EXTINT | 0x9] = "External Interrupt 9",
3854 [TT_EXTINT | 0xa] = "External Interrupt 10",
3855 [TT_EXTINT | 0xb] = "External Interrupt 11",
3856 [TT_EXTINT | 0xc] = "External Interrupt 12",
3857 [TT_EXTINT | 0xd] = "External Interrupt 13",
3858 [TT_EXTINT | 0xe] = "External Interrupt 14",
3859 [TT_EXTINT | 0xf] = "External Interrupt 15",
3860 };
3861 #endif
3862
3863 trap_state* cpu_tsptr(CPUState* env)
3864 {
3865 return &env->ts[env->tl & MAXTL_MASK];
3866 }
3867
3868 void do_interrupt(CPUState *env)
3869 {
3870 int intno = env->exception_index;
3871 trap_state* tsptr;
3872
3873 #ifdef DEBUG_PCALL
3874 if (qemu_loglevel_mask(CPU_LOG_INT)) {
3875 static int count;
3876 const char *name;
3877
3878 if (intno < 0 || intno >= 0x180)
3879 name = "Unknown";
3880 else if (intno >= 0x100)
3881 name = "Trap Instruction";
3882 else if (intno >= 0xc0)
3883 name = "Window Fill";
3884 else if (intno >= 0x80)
3885 name = "Window Spill";
3886 else {
3887 name = excp_names[intno];
3888 if (!name)
3889 name = "Unknown";
3890 }
3891
3892 qemu_log("%6d: %s (v=%04x) pc=%016" PRIx64 " npc=%016" PRIx64
3893 " SP=%016" PRIx64 "\n",
3894 count, name, intno,
3895 env->pc,
3896 env->npc, env->regwptr[6]);
3897 log_cpu_state(env, 0);
3898 #if 0
3899 {
3900 int i;
3901 uint8_t *ptr;
3902
3903 qemu_log(" code=");
3904 ptr = (uint8_t *)env->pc;
3905 for(i = 0; i < 16; i++) {
3906 qemu_log(" %02x", ldub(ptr + i));
3907 }
3908 qemu_log("\n");
3909 }
3910 #endif
3911 count++;
3912 }
3913 #endif
3914 #if !defined(CONFIG_USER_ONLY)
3915 if (env->tl >= env->maxtl) {
3916 cpu_abort(env, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
3917 " Error state", env->exception_index, env->tl, env->maxtl);
3918 return;
3919 }
3920 #endif
3921 if (env->tl < env->maxtl - 1) {
3922 env->tl++;
3923 } else {
3924 env->pstate |= PS_RED;
3925 if (env->tl < env->maxtl)
3926 env->tl++;
3927 }
3928 tsptr = cpu_tsptr(env);
3929
3930 tsptr->tstate = (get_ccr() << 32) |
3931 ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
3932 get_cwp64();
3933 tsptr->tpc = env->pc;
3934 tsptr->tnpc = env->npc;
3935 tsptr->tt = intno;
3936
3937 switch (intno) {
3938 case TT_IVEC:
3939 change_pstate(PS_PEF | PS_PRIV | PS_IG);
3940 break;
3941 case TT_TFAULT:
3942 case TT_DFAULT:
3943 case TT_TMISS ... TT_TMISS + 3:
3944 case TT_DMISS ... TT_DMISS + 3:
3945 case TT_DPROT ... TT_DPROT + 3:
3946 change_pstate(PS_PEF | PS_PRIV | PS_MG);
3947 break;
3948 default:
3949 change_pstate(PS_PEF | PS_PRIV | PS_AG);
3950 break;
3951 }
3952
3953 if (intno == TT_CLRWIN) {
3954 set_cwp(cwp_dec(env->cwp - 1));
3955 } else if ((intno & 0x1c0) == TT_SPILL) {
3956 set_cwp(cwp_dec(env->cwp - env->cansave - 2));
3957 } else if ((intno & 0x1c0) == TT_FILL) {
3958 set_cwp(cwp_inc(env->cwp + 1));
3959 }
3960 env->tbr &= ~0x7fffULL;
3961 env->tbr |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
3962 env->pc = env->tbr;
3963 env->npc = env->pc + 4;
3964 env->exception_index = -1;
3965 }
3966 #else
3967 #ifdef DEBUG_PCALL
3968 static const char * const excp_names[0x80] = {
3969 [TT_TFAULT] = "Instruction Access Fault",
3970 [TT_ILL_INSN] = "Illegal Instruction",
3971 [TT_PRIV_INSN] = "Privileged Instruction",
3972 [TT_NFPU_INSN] = "FPU Disabled",
3973 [TT_WIN_OVF] = "Window Overflow",
3974 [TT_WIN_UNF] = "Window Underflow",
3975 [TT_UNALIGNED] = "Unaligned Memory Access",
3976 [TT_FP_EXCP] = "FPU Exception",
3977 [TT_DFAULT] = "Data Access Fault",
3978 [TT_TOVF] = "Tag Overflow",
3979 [TT_EXTINT | 0x1] = "External Interrupt 1",
3980 [TT_EXTINT | 0x2] = "External Interrupt 2",
3981 [TT_EXTINT | 0x3] = "External Interrupt 3",
3982 [TT_EXTINT | 0x4] = "External Interrupt 4",
3983 [TT_EXTINT | 0x5] = "External Interrupt 5",
3984 [TT_EXTINT | 0x6] = "External Interrupt 6",
3985 [TT_EXTINT | 0x7] = "External Interrupt 7",
3986 [TT_EXTINT | 0x8] = "External Interrupt 8",
3987 [TT_EXTINT | 0x9] = "External Interrupt 9",
3988 [TT_EXTINT | 0xa] = "External Interrupt 10",
3989 [TT_EXTINT | 0xb] = "External Interrupt 11",
3990 [TT_EXTINT | 0xc] = "External Interrupt 12",
3991 [TT_EXTINT | 0xd] = "External Interrupt 13",
3992 [TT_EXTINT | 0xe] = "External Interrupt 14",
3993 [TT_EXTINT | 0xf] = "External Interrupt 15",
3994 [TT_TOVF] = "Tag Overflow",
3995 [TT_CODE_ACCESS] = "Instruction Access Error",
3996 [TT_DATA_ACCESS] = "Data Access Error",
3997 [TT_DIV_ZERO] = "Division By Zero",
3998 [TT_NCP_INSN] = "Coprocessor Disabled",
3999 };
4000 #endif
4001
4002 void do_interrupt(CPUState *env)
4003 {
4004 int cwp, intno = env->exception_index;
4005
4006 #ifdef DEBUG_PCALL
4007 if (qemu_loglevel_mask(CPU_LOG_INT)) {
4008 static int count;
4009 const char *name;
4010
4011 if (intno < 0 || intno >= 0x100)
4012 name = "Unknown";
4013 else if (intno >= 0x80)
4014 name = "Trap Instruction";
4015 else {
4016 name = excp_names[intno];
4017 if (!name)
4018 name = "Unknown";
4019 }
4020
4021 qemu_log("%6d: %s (v=%02x) pc=%08x npc=%08x SP=%08x\n",
4022 count, name, intno,
4023 env->pc,
4024 env->npc, env->regwptr[6]);
4025 log_cpu_state(env, 0);
4026 #if 0
4027 {
4028 int i;
4029 uint8_t *ptr;
4030
4031 qemu_log(" code=");
4032 ptr = (uint8_t *)env->pc;
4033 for(i = 0; i < 16; i++) {
4034 qemu_log(" %02x", ldub(ptr + i));
4035 }
4036 qemu_log("\n");
4037 }
4038 #endif
4039 count++;
4040 }
4041 #endif
4042 #if !defined(CONFIG_USER_ONLY)
4043 if (env->psret == 0) {
4044 cpu_abort(env, "Trap 0x%02x while interrupts disabled, Error state",
4045 env->exception_index);
4046 return;
4047 }
4048 #endif
4049 env->psret = 0;
4050 cwp = cwp_dec(env->cwp - 1);
4051 set_cwp(cwp);
4052 env->regwptr[9] = env->pc;
4053 env->regwptr[10] = env->npc;
4054 env->psrps = env->psrs;
4055 env->psrs = 1;
4056 env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
4057 env->pc = env->tbr;
4058 env->npc = env->pc + 4;
4059 env->exception_index = -1;
4060 }
4061 #endif
4062
4063 #if !defined(CONFIG_USER_ONLY)
4064
4065 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4066 void *retaddr);
4067
4068 #define MMUSUFFIX _mmu
4069 #define ALIGNED_ONLY
4070
4071 #define SHIFT 0
4072 #include "softmmu_template.h"
4073
4074 #define SHIFT 1
4075 #include "softmmu_template.h"
4076
4077 #define SHIFT 2
4078 #include "softmmu_template.h"
4079
4080 #define SHIFT 3
4081 #include "softmmu_template.h"
4082
4083 /* XXX: make it generic ? */
4084 static void cpu_restore_state2(void *retaddr)
4085 {
4086 TranslationBlock *tb;
4087 unsigned long pc;
4088
4089 if (retaddr) {
4090 /* now we have a real cpu fault */
4091 pc = (unsigned long)retaddr;
4092 tb = tb_find_pc(pc);
4093 if (tb) {
4094 /* the PC is inside the translated code. It means that we have
4095 a virtual CPU fault */
4096 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
4097 }
4098 }
4099 }
4100
4101 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
4102 void *retaddr)
4103 {
4104 #ifdef DEBUG_UNALIGNED
4105 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
4106 "\n", addr, env->pc);
4107 #endif
4108 cpu_restore_state2(retaddr);
4109 raise_exception(TT_UNALIGNED);
4110 }
4111
4112 /* try to fill the TLB and return an exception if error. If retaddr is
4113 NULL, it means that the function was called in C code (i.e. not
4114 from generated code or from helper.c) */
4115 /* XXX: fix it to restore all registers */
4116 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4117 {
4118 int ret;
4119 CPUState *saved_env;
4120
4121 /* XXX: hack to restore env in all cases, even if not called from
4122 generated code */
4123 saved_env = env;
4124 env = cpu_single_env;
4125
4126 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4127 if (ret) {
4128 cpu_restore_state2(retaddr);
4129 cpu_loop_exit();
4130 }
4131 env = saved_env;
4132 }
4133
4134 #endif /* !CONFIG_USER_ONLY */
4135
4136 #ifndef TARGET_SPARC64
4137 #if !defined(CONFIG_USER_ONLY)
4138 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4139 int is_asi, int size)
4140 {
4141 CPUState *saved_env;
4142 int fault_type;
4143
4144 /* XXX: hack to restore env in all cases, even if not called from
4145 generated code */
4146 saved_env = env;
4147 env = cpu_single_env;
4148 #ifdef DEBUG_UNASSIGNED
4149 if (is_asi)
4150 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4151 " asi 0x%02x from " TARGET_FMT_lx "\n",
4152 is_exec ? "exec" : is_write ? "write" : "read", size,
4153 size == 1 ? "" : "s", addr, is_asi, env->pc);
4154 else
4155 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
4156 " from " TARGET_FMT_lx "\n",
4157 is_exec ? "exec" : is_write ? "write" : "read", size,
4158 size == 1 ? "" : "s", addr, env->pc);
4159 #endif
4160 /* Don't overwrite translation and access faults */
4161 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
4162 if ((fault_type > 4) || (fault_type == 0)) {
4163 env->mmuregs[3] = 0; /* Fault status register */
4164 if (is_asi)
4165 env->mmuregs[3] |= 1 << 16;
4166 if (env->psrs)
4167 env->mmuregs[3] |= 1 << 5;
4168 if (is_exec)
4169 env->mmuregs[3] |= 1 << 6;
4170 if (is_write)
4171 env->mmuregs[3] |= 1 << 7;
4172 env->mmuregs[3] |= (5 << 2) | 2;
4173 /* SuperSPARC will never place instruction fault addresses in the FAR */
4174 if (!is_exec) {
4175 env->mmuregs[4] = addr; /* Fault address register */
4176 }
4177 }
4178 /* overflow (same type fault was not read before another fault) */
4179 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
4180 env->mmuregs[3] |= 1;
4181 }
4182
4183 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
4184 if (is_exec)
4185 raise_exception(TT_CODE_ACCESS);
4186 else
4187 raise_exception(TT_DATA_ACCESS);
4188 }
4189
4190 /* flush neverland mappings created during no-fault mode,
4191 so the sequential MMU faults report proper fault types */
4192 if (env->mmuregs[0] & MMU_NF) {
4193 tlb_flush(env, 1);
4194 }
4195
4196 env = saved_env;
4197 }
4198 #endif
4199 #else
4200 #if defined(CONFIG_USER_ONLY)
4201 static void do_unassigned_access(target_ulong addr, int is_write, int is_exec,
4202 int is_asi, int size)
4203 #else
4204 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
4205 int is_asi, int size)
4206 #endif
4207 {
4208 CPUState *saved_env;
4209
4210 /* XXX: hack to restore env in all cases, even if not called from
4211 generated code */
4212 saved_env = env;
4213 env = cpu_single_env;
4214
4215 #ifdef DEBUG_UNASSIGNED
4216 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
4217 "\n", addr, env->pc);
4218 #endif
4219
4220 if (is_exec)
4221 raise_exception(TT_CODE_ACCESS);
4222 else
4223 raise_exception(TT_DATA_ACCESS);
4224
4225 env = saved_env;
4226 }
4227 #endif
4228
4229
4230 #ifdef TARGET_SPARC64
4231 void helper_tick_set_count(void *opaque, uint64_t count)
4232 {
4233 #if !defined(CONFIG_USER_ONLY)
4234 cpu_tick_set_count(opaque, count);
4235 #endif
4236 }
4237
4238 uint64_t helper_tick_get_count(void *opaque)
4239 {
4240 #if !defined(CONFIG_USER_ONLY)
4241 return cpu_tick_get_count(opaque);
4242 #else
4243 return 0;
4244 #endif
4245 }
4246
4247 void helper_tick_set_limit(void *opaque, uint64_t limit)
4248 {
4249 #if !defined(CONFIG_USER_ONLY)
4250 cpu_tick_set_limit(opaque, limit);
4251 #endif
4252 }
4253 #endif