]> git.proxmox.com Git - mirror_qemu.git/blob - target-sparc/op_helper.c
wrhpr hstick_cmpr is a store, not a load
[mirror_qemu.git] / target-sparc / op_helper.c
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
7
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
13
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
20
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
27
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
34
35 #ifdef TARGET_SPARC64
36 #ifndef TARGET_ABI32
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #else
39 #define AM_CHECK(env1) (1)
40 #endif
41 #endif
42
43 static inline void address_mask(CPUState *env1, target_ulong *addr)
44 {
45 #ifdef TARGET_SPARC64
46 if (AM_CHECK(env1))
47 *addr &= 0xffffffffULL;
48 #endif
49 }
50
51 void raise_exception(int tt)
52 {
53 env->exception_index = tt;
54 cpu_loop_exit();
55 }
56
57 void helper_trap(target_ulong nb_trap)
58 {
59 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
60 cpu_loop_exit();
61 }
62
63 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
64 {
65 if (do_trap) {
66 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
67 cpu_loop_exit();
68 }
69 }
70
71 void helper_check_align(target_ulong addr, uint32_t align)
72 {
73 if (addr & align) {
74 #ifdef DEBUG_UNALIGNED
75 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
76 "\n", addr, env->pc);
77 #endif
78 raise_exception(TT_UNALIGNED);
79 }
80 }
81
82 #define F_HELPER(name, p) void helper_f##name##p(void)
83
84 #define F_BINOP(name) \
85 F_HELPER(name, s) \
86 { \
87 FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
88 } \
89 F_HELPER(name, d) \
90 { \
91 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
92 } \
93 F_HELPER(name, q) \
94 { \
95 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
96 }
97
98 F_BINOP(add);
99 F_BINOP(sub);
100 F_BINOP(mul);
101 F_BINOP(div);
102 #undef F_BINOP
103
104 void helper_fsmuld(void)
105 {
106 DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
107 float32_to_float64(FT1, &env->fp_status),
108 &env->fp_status);
109 }
110
111 void helper_fdmulq(void)
112 {
113 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
114 float64_to_float128(DT1, &env->fp_status),
115 &env->fp_status);
116 }
117
118 F_HELPER(neg, s)
119 {
120 FT0 = float32_chs(FT1);
121 }
122
123 #ifdef TARGET_SPARC64
124 F_HELPER(neg, d)
125 {
126 DT0 = float64_chs(DT1);
127 }
128
129 F_HELPER(neg, q)
130 {
131 QT0 = float128_chs(QT1);
132 }
133 #endif
134
135 /* Integer to float conversion. */
136 F_HELPER(ito, s)
137 {
138 FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
139 }
140
141 F_HELPER(ito, d)
142 {
143 DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
144 }
145
146 F_HELPER(ito, q)
147 {
148 QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
149 }
150
151 #ifdef TARGET_SPARC64
152 F_HELPER(xto, s)
153 {
154 FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
155 }
156
157 F_HELPER(xto, d)
158 {
159 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
160 }
161
162 F_HELPER(xto, q)
163 {
164 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
165 }
166 #endif
167 #undef F_HELPER
168
169 /* floating point conversion */
170 void helper_fdtos(void)
171 {
172 FT0 = float64_to_float32(DT1, &env->fp_status);
173 }
174
175 void helper_fstod(void)
176 {
177 DT0 = float32_to_float64(FT1, &env->fp_status);
178 }
179
180 void helper_fqtos(void)
181 {
182 FT0 = float128_to_float32(QT1, &env->fp_status);
183 }
184
185 void helper_fstoq(void)
186 {
187 QT0 = float32_to_float128(FT1, &env->fp_status);
188 }
189
190 void helper_fqtod(void)
191 {
192 DT0 = float128_to_float64(QT1, &env->fp_status);
193 }
194
195 void helper_fdtoq(void)
196 {
197 QT0 = float64_to_float128(DT1, &env->fp_status);
198 }
199
200 /* Float to integer conversion. */
201 void helper_fstoi(void)
202 {
203 *((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
204 }
205
206 void helper_fdtoi(void)
207 {
208 *((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
209 }
210
211 void helper_fqtoi(void)
212 {
213 *((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
214 }
215
216 #ifdef TARGET_SPARC64
217 void helper_fstox(void)
218 {
219 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
220 }
221
222 void helper_fdtox(void)
223 {
224 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
225 }
226
227 void helper_fqtox(void)
228 {
229 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
230 }
231
232 void helper_faligndata(void)
233 {
234 uint64_t tmp;
235
236 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
237 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
238 *((uint64_t *)&DT0) = tmp;
239 }
240
241 void helper_movl_FT0_0(void)
242 {
243 *((uint32_t *)&FT0) = 0;
244 }
245
246 void helper_movl_DT0_0(void)
247 {
248 *((uint64_t *)&DT0) = 0;
249 }
250
251 void helper_movl_FT0_1(void)
252 {
253 *((uint32_t *)&FT0) = 0xffffffff;
254 }
255
256 void helper_movl_DT0_1(void)
257 {
258 *((uint64_t *)&DT0) = 0xffffffffffffffffULL;
259 }
260
261 void helper_fnot(void)
262 {
263 *(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
264 }
265
266 void helper_fnots(void)
267 {
268 *(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
269 }
270
271 void helper_fnor(void)
272 {
273 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
274 }
275
276 void helper_fnors(void)
277 {
278 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
279 }
280
281 void helper_for(void)
282 {
283 *(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
284 }
285
286 void helper_fors(void)
287 {
288 *(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
289 }
290
291 void helper_fxor(void)
292 {
293 *(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
294 }
295
296 void helper_fxors(void)
297 {
298 *(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
299 }
300
301 void helper_fand(void)
302 {
303 *(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
304 }
305
306 void helper_fands(void)
307 {
308 *(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
309 }
310
311 void helper_fornot(void)
312 {
313 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
314 }
315
316 void helper_fornots(void)
317 {
318 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
319 }
320
321 void helper_fandnot(void)
322 {
323 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
324 }
325
326 void helper_fandnots(void)
327 {
328 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
329 }
330
331 void helper_fnand(void)
332 {
333 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
334 }
335
336 void helper_fnands(void)
337 {
338 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
339 }
340
341 void helper_fxnor(void)
342 {
343 *(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
344 }
345
346 void helper_fxnors(void)
347 {
348 *(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
349 }
350
351 #ifdef WORDS_BIGENDIAN
352 #define VIS_B64(n) b[7 - (n)]
353 #define VIS_W64(n) w[3 - (n)]
354 #define VIS_SW64(n) sw[3 - (n)]
355 #define VIS_L64(n) l[1 - (n)]
356 #define VIS_B32(n) b[3 - (n)]
357 #define VIS_W32(n) w[1 - (n)]
358 #else
359 #define VIS_B64(n) b[n]
360 #define VIS_W64(n) w[n]
361 #define VIS_SW64(n) sw[n]
362 #define VIS_L64(n) l[n]
363 #define VIS_B32(n) b[n]
364 #define VIS_W32(n) w[n]
365 #endif
366
367 typedef union {
368 uint8_t b[8];
369 uint16_t w[4];
370 int16_t sw[4];
371 uint32_t l[2];
372 float64 d;
373 } vis64;
374
375 typedef union {
376 uint8_t b[4];
377 uint16_t w[2];
378 uint32_t l;
379 float32 f;
380 } vis32;
381
382 void helper_fpmerge(void)
383 {
384 vis64 s, d;
385
386 s.d = DT0;
387 d.d = DT1;
388
389 // Reverse calculation order to handle overlap
390 d.VIS_B64(7) = s.VIS_B64(3);
391 d.VIS_B64(6) = d.VIS_B64(3);
392 d.VIS_B64(5) = s.VIS_B64(2);
393 d.VIS_B64(4) = d.VIS_B64(2);
394 d.VIS_B64(3) = s.VIS_B64(1);
395 d.VIS_B64(2) = d.VIS_B64(1);
396 d.VIS_B64(1) = s.VIS_B64(0);
397 //d.VIS_B64(0) = d.VIS_B64(0);
398
399 DT0 = d.d;
400 }
401
402 void helper_fmul8x16(void)
403 {
404 vis64 s, d;
405 uint32_t tmp;
406
407 s.d = DT0;
408 d.d = DT1;
409
410 #define PMUL(r) \
411 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
412 if ((tmp & 0xff) > 0x7f) \
413 tmp += 0x100; \
414 d.VIS_W64(r) = tmp >> 8;
415
416 PMUL(0);
417 PMUL(1);
418 PMUL(2);
419 PMUL(3);
420 #undef PMUL
421
422 DT0 = d.d;
423 }
424
425 void helper_fmul8x16al(void)
426 {
427 vis64 s, d;
428 uint32_t tmp;
429
430 s.d = DT0;
431 d.d = DT1;
432
433 #define PMUL(r) \
434 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
435 if ((tmp & 0xff) > 0x7f) \
436 tmp += 0x100; \
437 d.VIS_W64(r) = tmp >> 8;
438
439 PMUL(0);
440 PMUL(1);
441 PMUL(2);
442 PMUL(3);
443 #undef PMUL
444
445 DT0 = d.d;
446 }
447
448 void helper_fmul8x16au(void)
449 {
450 vis64 s, d;
451 uint32_t tmp;
452
453 s.d = DT0;
454 d.d = DT1;
455
456 #define PMUL(r) \
457 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
458 if ((tmp & 0xff) > 0x7f) \
459 tmp += 0x100; \
460 d.VIS_W64(r) = tmp >> 8;
461
462 PMUL(0);
463 PMUL(1);
464 PMUL(2);
465 PMUL(3);
466 #undef PMUL
467
468 DT0 = d.d;
469 }
470
471 void helper_fmul8sux16(void)
472 {
473 vis64 s, d;
474 uint32_t tmp;
475
476 s.d = DT0;
477 d.d = DT1;
478
479 #define PMUL(r) \
480 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
481 if ((tmp & 0xff) > 0x7f) \
482 tmp += 0x100; \
483 d.VIS_W64(r) = tmp >> 8;
484
485 PMUL(0);
486 PMUL(1);
487 PMUL(2);
488 PMUL(3);
489 #undef PMUL
490
491 DT0 = d.d;
492 }
493
494 void helper_fmul8ulx16(void)
495 {
496 vis64 s, d;
497 uint32_t tmp;
498
499 s.d = DT0;
500 d.d = DT1;
501
502 #define PMUL(r) \
503 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
504 if ((tmp & 0xff) > 0x7f) \
505 tmp += 0x100; \
506 d.VIS_W64(r) = tmp >> 8;
507
508 PMUL(0);
509 PMUL(1);
510 PMUL(2);
511 PMUL(3);
512 #undef PMUL
513
514 DT0 = d.d;
515 }
516
517 void helper_fmuld8sux16(void)
518 {
519 vis64 s, d;
520 uint32_t tmp;
521
522 s.d = DT0;
523 d.d = DT1;
524
525 #define PMUL(r) \
526 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
527 if ((tmp & 0xff) > 0x7f) \
528 tmp += 0x100; \
529 d.VIS_L64(r) = tmp;
530
531 // Reverse calculation order to handle overlap
532 PMUL(1);
533 PMUL(0);
534 #undef PMUL
535
536 DT0 = d.d;
537 }
538
539 void helper_fmuld8ulx16(void)
540 {
541 vis64 s, d;
542 uint32_t tmp;
543
544 s.d = DT0;
545 d.d = DT1;
546
547 #define PMUL(r) \
548 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
549 if ((tmp & 0xff) > 0x7f) \
550 tmp += 0x100; \
551 d.VIS_L64(r) = tmp;
552
553 // Reverse calculation order to handle overlap
554 PMUL(1);
555 PMUL(0);
556 #undef PMUL
557
558 DT0 = d.d;
559 }
560
561 void helper_fexpand(void)
562 {
563 vis32 s;
564 vis64 d;
565
566 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
567 d.d = DT1;
568 d.VIS_L64(0) = s.VIS_W32(0) << 4;
569 d.VIS_L64(1) = s.VIS_W32(1) << 4;
570 d.VIS_L64(2) = s.VIS_W32(2) << 4;
571 d.VIS_L64(3) = s.VIS_W32(3) << 4;
572
573 DT0 = d.d;
574 }
575
576 #define VIS_HELPER(name, F) \
577 void name##16(void) \
578 { \
579 vis64 s, d; \
580 \
581 s.d = DT0; \
582 d.d = DT1; \
583 \
584 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
585 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
586 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
587 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
588 \
589 DT0 = d.d; \
590 } \
591 \
592 void name##16s(void) \
593 { \
594 vis32 s, d; \
595 \
596 s.f = FT0; \
597 d.f = FT1; \
598 \
599 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
600 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
601 \
602 FT0 = d.f; \
603 } \
604 \
605 void name##32(void) \
606 { \
607 vis64 s, d; \
608 \
609 s.d = DT0; \
610 d.d = DT1; \
611 \
612 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
613 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
614 \
615 DT0 = d.d; \
616 } \
617 \
618 void name##32s(void) \
619 { \
620 vis32 s, d; \
621 \
622 s.f = FT0; \
623 d.f = FT1; \
624 \
625 d.l = F(d.l, s.l); \
626 \
627 FT0 = d.f; \
628 }
629
630 #define FADD(a, b) ((a) + (b))
631 #define FSUB(a, b) ((a) - (b))
632 VIS_HELPER(helper_fpadd, FADD)
633 VIS_HELPER(helper_fpsub, FSUB)
634
635 #define VIS_CMPHELPER(name, F) \
636 void name##16(void) \
637 { \
638 vis64 s, d; \
639 \
640 s.d = DT0; \
641 d.d = DT1; \
642 \
643 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
644 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
645 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
646 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
647 \
648 DT0 = d.d; \
649 } \
650 \
651 void name##32(void) \
652 { \
653 vis64 s, d; \
654 \
655 s.d = DT0; \
656 d.d = DT1; \
657 \
658 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
659 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
660 \
661 DT0 = d.d; \
662 }
663
664 #define FCMPGT(a, b) ((a) > (b))
665 #define FCMPEQ(a, b) ((a) == (b))
666 #define FCMPLE(a, b) ((a) <= (b))
667 #define FCMPNE(a, b) ((a) != (b))
668
669 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
670 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
671 VIS_CMPHELPER(helper_fcmple, FCMPLE)
672 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
673 #endif
674
675 void helper_check_ieee_exceptions(void)
676 {
677 target_ulong status;
678
679 status = get_float_exception_flags(&env->fp_status);
680 if (status) {
681 /* Copy IEEE 754 flags into FSR */
682 if (status & float_flag_invalid)
683 env->fsr |= FSR_NVC;
684 if (status & float_flag_overflow)
685 env->fsr |= FSR_OFC;
686 if (status & float_flag_underflow)
687 env->fsr |= FSR_UFC;
688 if (status & float_flag_divbyzero)
689 env->fsr |= FSR_DZC;
690 if (status & float_flag_inexact)
691 env->fsr |= FSR_NXC;
692
693 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
694 /* Unmasked exception, generate a trap */
695 env->fsr |= FSR_FTT_IEEE_EXCP;
696 raise_exception(TT_FP_EXCP);
697 } else {
698 /* Accumulate exceptions */
699 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
700 }
701 }
702 }
703
704 void helper_clear_float_exceptions(void)
705 {
706 set_float_exception_flags(0, &env->fp_status);
707 }
708
709 void helper_fabss(void)
710 {
711 FT0 = float32_abs(FT1);
712 }
713
714 #ifdef TARGET_SPARC64
715 void helper_fabsd(void)
716 {
717 DT0 = float64_abs(DT1);
718 }
719
720 void helper_fabsq(void)
721 {
722 QT0 = float128_abs(QT1);
723 }
724 #endif
725
726 void helper_fsqrts(void)
727 {
728 FT0 = float32_sqrt(FT1, &env->fp_status);
729 }
730
731 void helper_fsqrtd(void)
732 {
733 DT0 = float64_sqrt(DT1, &env->fp_status);
734 }
735
736 void helper_fsqrtq(void)
737 {
738 QT0 = float128_sqrt(QT1, &env->fp_status);
739 }
740
741 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
742 void glue(helper_, name) (void) \
743 { \
744 target_ulong new_fsr; \
745 \
746 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
747 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
748 case float_relation_unordered: \
749 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
750 if ((env->fsr & FSR_NVM) || TRAP) { \
751 env->fsr |= new_fsr; \
752 env->fsr |= FSR_NVC; \
753 env->fsr |= FSR_FTT_IEEE_EXCP; \
754 raise_exception(TT_FP_EXCP); \
755 } else { \
756 env->fsr |= FSR_NVA; \
757 } \
758 break; \
759 case float_relation_less: \
760 new_fsr = FSR_FCC0 << FS; \
761 break; \
762 case float_relation_greater: \
763 new_fsr = FSR_FCC1 << FS; \
764 break; \
765 default: \
766 new_fsr = 0; \
767 break; \
768 } \
769 env->fsr |= new_fsr; \
770 }
771
772 GEN_FCMP(fcmps, float32, FT0, FT1, 0, 0);
773 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
774
775 GEN_FCMP(fcmpes, float32, FT0, FT1, 0, 1);
776 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
777
778 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
779 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
780
781 #ifdef TARGET_SPARC64
782 GEN_FCMP(fcmps_fcc1, float32, FT0, FT1, 22, 0);
783 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
784 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
785
786 GEN_FCMP(fcmps_fcc2, float32, FT0, FT1, 24, 0);
787 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
788 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
789
790 GEN_FCMP(fcmps_fcc3, float32, FT0, FT1, 26, 0);
791 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
792 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
793
794 GEN_FCMP(fcmpes_fcc1, float32, FT0, FT1, 22, 1);
795 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
796 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
797
798 GEN_FCMP(fcmpes_fcc2, float32, FT0, FT1, 24, 1);
799 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
800 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
801
802 GEN_FCMP(fcmpes_fcc3, float32, FT0, FT1, 26, 1);
803 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
804 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
805 #endif
806
807 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
808 defined(DEBUG_MXCC)
809 static void dump_mxcc(CPUState *env)
810 {
811 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
812 env->mxccdata[0], env->mxccdata[1],
813 env->mxccdata[2], env->mxccdata[3]);
814 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
815 " %016llx %016llx %016llx %016llx\n",
816 env->mxccregs[0], env->mxccregs[1],
817 env->mxccregs[2], env->mxccregs[3],
818 env->mxccregs[4], env->mxccregs[5],
819 env->mxccregs[6], env->mxccregs[7]);
820 }
821 #endif
822
823 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
824 && defined(DEBUG_ASI)
825 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
826 uint64_t r1)
827 {
828 switch (size)
829 {
830 case 1:
831 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
832 addr, asi, r1 & 0xff);
833 break;
834 case 2:
835 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
836 addr, asi, r1 & 0xffff);
837 break;
838 case 4:
839 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
840 addr, asi, r1 & 0xffffffff);
841 break;
842 case 8:
843 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
844 addr, asi, r1);
845 break;
846 }
847 }
848 #endif
849
850 #ifndef TARGET_SPARC64
851 #ifndef CONFIG_USER_ONLY
852 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
853 {
854 uint64_t ret = 0;
855 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
856 uint32_t last_addr = addr;
857 #endif
858
859 helper_check_align(addr, size - 1);
860 switch (asi) {
861 case 2: /* SuperSparc MXCC registers */
862 switch (addr) {
863 case 0x01c00a00: /* MXCC control register */
864 if (size == 8)
865 ret = env->mxccregs[3];
866 else
867 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
868 size);
869 break;
870 case 0x01c00a04: /* MXCC control register */
871 if (size == 4)
872 ret = env->mxccregs[3];
873 else
874 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
875 size);
876 break;
877 case 0x01c00c00: /* Module reset register */
878 if (size == 8) {
879 ret = env->mxccregs[5];
880 // should we do something here?
881 } else
882 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
883 size);
884 break;
885 case 0x01c00f00: /* MBus port address register */
886 if (size == 8)
887 ret = env->mxccregs[7];
888 else
889 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
890 size);
891 break;
892 default:
893 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
894 size);
895 break;
896 }
897 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
898 "addr = %08x -> ret = %08x,"
899 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
900 #ifdef DEBUG_MXCC
901 dump_mxcc(env);
902 #endif
903 break;
904 case 3: /* MMU probe */
905 {
906 int mmulev;
907
908 mmulev = (addr >> 8) & 15;
909 if (mmulev > 4)
910 ret = 0;
911 else
912 ret = mmu_probe(env, addr, mmulev);
913 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
914 addr, mmulev, ret);
915 }
916 break;
917 case 4: /* read MMU regs */
918 {
919 int reg = (addr >> 8) & 0x1f;
920
921 ret = env->mmuregs[reg];
922 if (reg == 3) /* Fault status cleared on read */
923 env->mmuregs[3] = 0;
924 else if (reg == 0x13) /* Fault status read */
925 ret = env->mmuregs[3];
926 else if (reg == 0x14) /* Fault address read */
927 ret = env->mmuregs[4];
928 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
929 }
930 break;
931 case 5: // Turbosparc ITLB Diagnostic
932 case 6: // Turbosparc DTLB Diagnostic
933 case 7: // Turbosparc IOTLB Diagnostic
934 break;
935 case 9: /* Supervisor code access */
936 switch(size) {
937 case 1:
938 ret = ldub_code(addr);
939 break;
940 case 2:
941 ret = lduw_code(addr);
942 break;
943 default:
944 case 4:
945 ret = ldl_code(addr);
946 break;
947 case 8:
948 ret = ldq_code(addr);
949 break;
950 }
951 break;
952 case 0xa: /* User data access */
953 switch(size) {
954 case 1:
955 ret = ldub_user(addr);
956 break;
957 case 2:
958 ret = lduw_user(addr);
959 break;
960 default:
961 case 4:
962 ret = ldl_user(addr);
963 break;
964 case 8:
965 ret = ldq_user(addr);
966 break;
967 }
968 break;
969 case 0xb: /* Supervisor data access */
970 switch(size) {
971 case 1:
972 ret = ldub_kernel(addr);
973 break;
974 case 2:
975 ret = lduw_kernel(addr);
976 break;
977 default:
978 case 4:
979 ret = ldl_kernel(addr);
980 break;
981 case 8:
982 ret = ldq_kernel(addr);
983 break;
984 }
985 break;
986 case 0xc: /* I-cache tag */
987 case 0xd: /* I-cache data */
988 case 0xe: /* D-cache tag */
989 case 0xf: /* D-cache data */
990 break;
991 case 0x20: /* MMU passthrough */
992 switch(size) {
993 case 1:
994 ret = ldub_phys(addr);
995 break;
996 case 2:
997 ret = lduw_phys(addr);
998 break;
999 default:
1000 case 4:
1001 ret = ldl_phys(addr);
1002 break;
1003 case 8:
1004 ret = ldq_phys(addr);
1005 break;
1006 }
1007 break;
1008 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1009 switch(size) {
1010 case 1:
1011 ret = ldub_phys((target_phys_addr_t)addr
1012 | ((target_phys_addr_t)(asi & 0xf) << 32));
1013 break;
1014 case 2:
1015 ret = lduw_phys((target_phys_addr_t)addr
1016 | ((target_phys_addr_t)(asi & 0xf) << 32));
1017 break;
1018 default:
1019 case 4:
1020 ret = ldl_phys((target_phys_addr_t)addr
1021 | ((target_phys_addr_t)(asi & 0xf) << 32));
1022 break;
1023 case 8:
1024 ret = ldq_phys((target_phys_addr_t)addr
1025 | ((target_phys_addr_t)(asi & 0xf) << 32));
1026 break;
1027 }
1028 break;
1029 case 0x30: // Turbosparc secondary cache diagnostic
1030 case 0x31: // Turbosparc RAM snoop
1031 case 0x32: // Turbosparc page table descriptor diagnostic
1032 case 0x39: /* data cache diagnostic register */
1033 ret = 0;
1034 break;
1035 case 8: /* User code access, XXX */
1036 default:
1037 do_unassigned_access(addr, 0, 0, asi);
1038 ret = 0;
1039 break;
1040 }
1041 if (sign) {
1042 switch(size) {
1043 case 1:
1044 ret = (int8_t) ret;
1045 break;
1046 case 2:
1047 ret = (int16_t) ret;
1048 break;
1049 case 4:
1050 ret = (int32_t) ret;
1051 break;
1052 default:
1053 break;
1054 }
1055 }
1056 #ifdef DEBUG_ASI
1057 dump_asi("read ", last_addr, asi, size, ret);
1058 #endif
1059 return ret;
1060 }
1061
1062 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1063 {
1064 helper_check_align(addr, size - 1);
1065 switch(asi) {
1066 case 2: /* SuperSparc MXCC registers */
1067 switch (addr) {
1068 case 0x01c00000: /* MXCC stream data register 0 */
1069 if (size == 8)
1070 env->mxccdata[0] = val;
1071 else
1072 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1073 size);
1074 break;
1075 case 0x01c00008: /* MXCC stream data register 1 */
1076 if (size == 8)
1077 env->mxccdata[1] = val;
1078 else
1079 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1080 size);
1081 break;
1082 case 0x01c00010: /* MXCC stream data register 2 */
1083 if (size == 8)
1084 env->mxccdata[2] = val;
1085 else
1086 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1087 size);
1088 break;
1089 case 0x01c00018: /* MXCC stream data register 3 */
1090 if (size == 8)
1091 env->mxccdata[3] = val;
1092 else
1093 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1094 size);
1095 break;
1096 case 0x01c00100: /* MXCC stream source */
1097 if (size == 8)
1098 env->mxccregs[0] = val;
1099 else
1100 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1101 size);
1102 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1103 0);
1104 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1105 8);
1106 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1107 16);
1108 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1109 24);
1110 break;
1111 case 0x01c00200: /* MXCC stream destination */
1112 if (size == 8)
1113 env->mxccregs[1] = val;
1114 else
1115 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1116 size);
1117 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1118 env->mxccdata[0]);
1119 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1120 env->mxccdata[1]);
1121 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1122 env->mxccdata[2]);
1123 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1124 env->mxccdata[3]);
1125 break;
1126 case 0x01c00a00: /* MXCC control register */
1127 if (size == 8)
1128 env->mxccregs[3] = val;
1129 else
1130 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1131 size);
1132 break;
1133 case 0x01c00a04: /* MXCC control register */
1134 if (size == 4)
1135 env->mxccregs[3] = (env->mxccregs[0xa] & 0xffffffff00000000ULL)
1136 | val;
1137 else
1138 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1139 size);
1140 break;
1141 case 0x01c00e00: /* MXCC error register */
1142 // writing a 1 bit clears the error
1143 if (size == 8)
1144 env->mxccregs[6] &= ~val;
1145 else
1146 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1147 size);
1148 break;
1149 case 0x01c00f00: /* MBus port address register */
1150 if (size == 8)
1151 env->mxccregs[7] = val;
1152 else
1153 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1154 size);
1155 break;
1156 default:
1157 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1158 size);
1159 break;
1160 }
1161 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1162 size, addr, val);
1163 #ifdef DEBUG_MXCC
1164 dump_mxcc(env);
1165 #endif
1166 break;
1167 case 3: /* MMU flush */
1168 {
1169 int mmulev;
1170
1171 mmulev = (addr >> 8) & 15;
1172 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1173 switch (mmulev) {
1174 case 0: // flush page
1175 tlb_flush_page(env, addr & 0xfffff000);
1176 break;
1177 case 1: // flush segment (256k)
1178 case 2: // flush region (16M)
1179 case 3: // flush context (4G)
1180 case 4: // flush entire
1181 tlb_flush(env, 1);
1182 break;
1183 default:
1184 break;
1185 }
1186 #ifdef DEBUG_MMU
1187 dump_mmu(env);
1188 #endif
1189 }
1190 break;
1191 case 4: /* write MMU regs */
1192 {
1193 int reg = (addr >> 8) & 0x1f;
1194 uint32_t oldreg;
1195
1196 oldreg = env->mmuregs[reg];
1197 switch(reg) {
1198 case 0: // Control Register
1199 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1200 (val & 0x00ffffff);
1201 // Mappings generated during no-fault mode or MMU
1202 // disabled mode are invalid in normal mode
1203 if ((oldreg & (MMU_E | MMU_NF | env->mmu_bm)) !=
1204 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->mmu_bm)))
1205 tlb_flush(env, 1);
1206 break;
1207 case 1: // Context Table Pointer Register
1208 env->mmuregs[reg] = val & env->mmu_ctpr_mask;
1209 break;
1210 case 2: // Context Register
1211 env->mmuregs[reg] = val & env->mmu_cxr_mask;
1212 if (oldreg != env->mmuregs[reg]) {
1213 /* we flush when the MMU context changes because
1214 QEMU has no MMU context support */
1215 tlb_flush(env, 1);
1216 }
1217 break;
1218 case 3: // Synchronous Fault Status Register with Clear
1219 case 4: // Synchronous Fault Address Register
1220 break;
1221 case 0x10: // TLB Replacement Control Register
1222 env->mmuregs[reg] = val & env->mmu_trcr_mask;
1223 break;
1224 case 0x13: // Synchronous Fault Status Register with Read and Clear
1225 env->mmuregs[3] = val & env->mmu_sfsr_mask;
1226 break;
1227 case 0x14: // Synchronous Fault Address Register
1228 env->mmuregs[4] = val;
1229 break;
1230 default:
1231 env->mmuregs[reg] = val;
1232 break;
1233 }
1234 if (oldreg != env->mmuregs[reg]) {
1235 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1236 reg, oldreg, env->mmuregs[reg]);
1237 }
1238 #ifdef DEBUG_MMU
1239 dump_mmu(env);
1240 #endif
1241 }
1242 break;
1243 case 5: // Turbosparc ITLB Diagnostic
1244 case 6: // Turbosparc DTLB Diagnostic
1245 case 7: // Turbosparc IOTLB Diagnostic
1246 break;
1247 case 0xa: /* User data access */
1248 switch(size) {
1249 case 1:
1250 stb_user(addr, val);
1251 break;
1252 case 2:
1253 stw_user(addr, val);
1254 break;
1255 default:
1256 case 4:
1257 stl_user(addr, val);
1258 break;
1259 case 8:
1260 stq_user(addr, val);
1261 break;
1262 }
1263 break;
1264 case 0xb: /* Supervisor data access */
1265 switch(size) {
1266 case 1:
1267 stb_kernel(addr, val);
1268 break;
1269 case 2:
1270 stw_kernel(addr, val);
1271 break;
1272 default:
1273 case 4:
1274 stl_kernel(addr, val);
1275 break;
1276 case 8:
1277 stq_kernel(addr, val);
1278 break;
1279 }
1280 break;
1281 case 0xc: /* I-cache tag */
1282 case 0xd: /* I-cache data */
1283 case 0xe: /* D-cache tag */
1284 case 0xf: /* D-cache data */
1285 case 0x10: /* I/D-cache flush page */
1286 case 0x11: /* I/D-cache flush segment */
1287 case 0x12: /* I/D-cache flush region */
1288 case 0x13: /* I/D-cache flush context */
1289 case 0x14: /* I/D-cache flush user */
1290 break;
1291 case 0x17: /* Block copy, sta access */
1292 {
1293 // val = src
1294 // addr = dst
1295 // copy 32 bytes
1296 unsigned int i;
1297 uint32_t src = val & ~3, dst = addr & ~3, temp;
1298
1299 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1300 temp = ldl_kernel(src);
1301 stl_kernel(dst, temp);
1302 }
1303 }
1304 break;
1305 case 0x1f: /* Block fill, stda access */
1306 {
1307 // addr = dst
1308 // fill 32 bytes with val
1309 unsigned int i;
1310 uint32_t dst = addr & 7;
1311
1312 for (i = 0; i < 32; i += 8, dst += 8)
1313 stq_kernel(dst, val);
1314 }
1315 break;
1316 case 0x20: /* MMU passthrough */
1317 {
1318 switch(size) {
1319 case 1:
1320 stb_phys(addr, val);
1321 break;
1322 case 2:
1323 stw_phys(addr, val);
1324 break;
1325 case 4:
1326 default:
1327 stl_phys(addr, val);
1328 break;
1329 case 8:
1330 stq_phys(addr, val);
1331 break;
1332 }
1333 }
1334 break;
1335 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1336 {
1337 switch(size) {
1338 case 1:
1339 stb_phys((target_phys_addr_t)addr
1340 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1341 break;
1342 case 2:
1343 stw_phys((target_phys_addr_t)addr
1344 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1345 break;
1346 case 4:
1347 default:
1348 stl_phys((target_phys_addr_t)addr
1349 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1350 break;
1351 case 8:
1352 stq_phys((target_phys_addr_t)addr
1353 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1354 break;
1355 }
1356 }
1357 break;
1358 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1359 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1360 // Turbosparc snoop RAM
1361 case 0x32: // store buffer control or Turbosparc page table
1362 // descriptor diagnostic
1363 case 0x36: /* I-cache flash clear */
1364 case 0x37: /* D-cache flash clear */
1365 case 0x38: /* breakpoint diagnostics */
1366 case 0x4c: /* breakpoint action */
1367 break;
1368 case 8: /* User code access, XXX */
1369 case 9: /* Supervisor code access, XXX */
1370 default:
1371 do_unassigned_access(addr, 1, 0, asi);
1372 break;
1373 }
1374 #ifdef DEBUG_ASI
1375 dump_asi("write", addr, asi, size, val);
1376 #endif
1377 }
1378
1379 #endif /* CONFIG_USER_ONLY */
1380 #else /* TARGET_SPARC64 */
1381
1382 #ifdef CONFIG_USER_ONLY
1383 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1384 {
1385 uint64_t ret = 0;
1386 #if defined(DEBUG_ASI)
1387 target_ulong last_addr = addr;
1388 #endif
1389
1390 if (asi < 0x80)
1391 raise_exception(TT_PRIV_ACT);
1392
1393 helper_check_align(addr, size - 1);
1394 address_mask(env, &addr);
1395
1396 switch (asi) {
1397 case 0x80: // Primary
1398 case 0x82: // Primary no-fault
1399 case 0x88: // Primary LE
1400 case 0x8a: // Primary no-fault LE
1401 {
1402 switch(size) {
1403 case 1:
1404 ret = ldub_raw(addr);
1405 break;
1406 case 2:
1407 ret = lduw_raw(addr);
1408 break;
1409 case 4:
1410 ret = ldl_raw(addr);
1411 break;
1412 default:
1413 case 8:
1414 ret = ldq_raw(addr);
1415 break;
1416 }
1417 }
1418 break;
1419 case 0x81: // Secondary
1420 case 0x83: // Secondary no-fault
1421 case 0x89: // Secondary LE
1422 case 0x8b: // Secondary no-fault LE
1423 // XXX
1424 break;
1425 default:
1426 break;
1427 }
1428
1429 /* Convert from little endian */
1430 switch (asi) {
1431 case 0x88: // Primary LE
1432 case 0x89: // Secondary LE
1433 case 0x8a: // Primary no-fault LE
1434 case 0x8b: // Secondary no-fault LE
1435 switch(size) {
1436 case 2:
1437 ret = bswap16(ret);
1438 break;
1439 case 4:
1440 ret = bswap32(ret);
1441 break;
1442 case 8:
1443 ret = bswap64(ret);
1444 break;
1445 default:
1446 break;
1447 }
1448 default:
1449 break;
1450 }
1451
1452 /* Convert to signed number */
1453 if (sign) {
1454 switch(size) {
1455 case 1:
1456 ret = (int8_t) ret;
1457 break;
1458 case 2:
1459 ret = (int16_t) ret;
1460 break;
1461 case 4:
1462 ret = (int32_t) ret;
1463 break;
1464 default:
1465 break;
1466 }
1467 }
1468 #ifdef DEBUG_ASI
1469 dump_asi("read ", last_addr, asi, size, ret);
1470 #endif
1471 return ret;
1472 }
1473
1474 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1475 {
1476 #ifdef DEBUG_ASI
1477 dump_asi("write", addr, asi, size, val);
1478 #endif
1479 if (asi < 0x80)
1480 raise_exception(TT_PRIV_ACT);
1481
1482 helper_check_align(addr, size - 1);
1483 address_mask(env, &addr);
1484
1485 /* Convert to little endian */
1486 switch (asi) {
1487 case 0x88: // Primary LE
1488 case 0x89: // Secondary LE
1489 switch(size) {
1490 case 2:
1491 addr = bswap16(addr);
1492 break;
1493 case 4:
1494 addr = bswap32(addr);
1495 break;
1496 case 8:
1497 addr = bswap64(addr);
1498 break;
1499 default:
1500 break;
1501 }
1502 default:
1503 break;
1504 }
1505
1506 switch(asi) {
1507 case 0x80: // Primary
1508 case 0x88: // Primary LE
1509 {
1510 switch(size) {
1511 case 1:
1512 stb_raw(addr, val);
1513 break;
1514 case 2:
1515 stw_raw(addr, val);
1516 break;
1517 case 4:
1518 stl_raw(addr, val);
1519 break;
1520 case 8:
1521 default:
1522 stq_raw(addr, val);
1523 break;
1524 }
1525 }
1526 break;
1527 case 0x81: // Secondary
1528 case 0x89: // Secondary LE
1529 // XXX
1530 return;
1531
1532 case 0x82: // Primary no-fault, RO
1533 case 0x83: // Secondary no-fault, RO
1534 case 0x8a: // Primary no-fault LE, RO
1535 case 0x8b: // Secondary no-fault LE, RO
1536 default:
1537 do_unassigned_access(addr, 1, 0, 1);
1538 return;
1539 }
1540 }
1541
1542 #else /* CONFIG_USER_ONLY */
1543
1544 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1545 {
1546 uint64_t ret = 0;
1547 #if defined(DEBUG_ASI)
1548 target_ulong last_addr = addr;
1549 #endif
1550
1551 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1552 || (asi >= 0x30 && asi < 0x80 && !(env->hpstate & HS_PRIV)))
1553 raise_exception(TT_PRIV_ACT);
1554
1555 helper_check_align(addr, size - 1);
1556 switch (asi) {
1557 case 0x10: // As if user primary
1558 case 0x18: // As if user primary LE
1559 case 0x80: // Primary
1560 case 0x82: // Primary no-fault
1561 case 0x88: // Primary LE
1562 case 0x8a: // Primary no-fault LE
1563 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1564 if (env->hpstate & HS_PRIV) {
1565 switch(size) {
1566 case 1:
1567 ret = ldub_hypv(addr);
1568 break;
1569 case 2:
1570 ret = lduw_hypv(addr);
1571 break;
1572 case 4:
1573 ret = ldl_hypv(addr);
1574 break;
1575 default:
1576 case 8:
1577 ret = ldq_hypv(addr);
1578 break;
1579 }
1580 } else {
1581 switch(size) {
1582 case 1:
1583 ret = ldub_kernel(addr);
1584 break;
1585 case 2:
1586 ret = lduw_kernel(addr);
1587 break;
1588 case 4:
1589 ret = ldl_kernel(addr);
1590 break;
1591 default:
1592 case 8:
1593 ret = ldq_kernel(addr);
1594 break;
1595 }
1596 }
1597 } else {
1598 switch(size) {
1599 case 1:
1600 ret = ldub_user(addr);
1601 break;
1602 case 2:
1603 ret = lduw_user(addr);
1604 break;
1605 case 4:
1606 ret = ldl_user(addr);
1607 break;
1608 default:
1609 case 8:
1610 ret = ldq_user(addr);
1611 break;
1612 }
1613 }
1614 break;
1615 case 0x14: // Bypass
1616 case 0x15: // Bypass, non-cacheable
1617 case 0x1c: // Bypass LE
1618 case 0x1d: // Bypass, non-cacheable LE
1619 {
1620 switch(size) {
1621 case 1:
1622 ret = ldub_phys(addr);
1623 break;
1624 case 2:
1625 ret = lduw_phys(addr);
1626 break;
1627 case 4:
1628 ret = ldl_phys(addr);
1629 break;
1630 default:
1631 case 8:
1632 ret = ldq_phys(addr);
1633 break;
1634 }
1635 break;
1636 }
1637 case 0x04: // Nucleus
1638 case 0x0c: // Nucleus Little Endian (LE)
1639 case 0x11: // As if user secondary
1640 case 0x19: // As if user secondary LE
1641 case 0x24: // Nucleus quad LDD 128 bit atomic
1642 case 0x2c: // Nucleus quad LDD 128 bit atomic
1643 case 0x4a: // UPA config
1644 case 0x81: // Secondary
1645 case 0x83: // Secondary no-fault
1646 case 0x89: // Secondary LE
1647 case 0x8b: // Secondary no-fault LE
1648 // XXX
1649 break;
1650 case 0x45: // LSU
1651 ret = env->lsu;
1652 break;
1653 case 0x50: // I-MMU regs
1654 {
1655 int reg = (addr >> 3) & 0xf;
1656
1657 ret = env->immuregs[reg];
1658 break;
1659 }
1660 case 0x51: // I-MMU 8k TSB pointer
1661 case 0x52: // I-MMU 64k TSB pointer
1662 // XXX
1663 break;
1664 case 0x55: // I-MMU data access
1665 {
1666 int reg = (addr >> 3) & 0x3f;
1667
1668 ret = env->itlb_tte[reg];
1669 break;
1670 }
1671 case 0x56: // I-MMU tag read
1672 {
1673 unsigned int i;
1674
1675 for (i = 0; i < 64; i++) {
1676 // Valid, ctx match, vaddr match
1677 if ((env->itlb_tte[i] & 0x8000000000000000ULL) != 0) {
1678 uint64_t mask;
1679
1680 switch ((env->itlb_tte[i] >> 61) & 3) {
1681 default:
1682 case 0x0:
1683 mask = 0xffffffffffffffff;
1684 break;
1685 case 0x1:
1686 mask = 0xffffffffffff0fff;
1687 break;
1688 case 0x2:
1689 mask = 0xfffffffffff80fff;
1690 break;
1691 case 0x3:
1692 mask = 0xffffffffffc00fff;
1693 break;
1694 }
1695 if ((env->itlb_tag[i] & mask) == (addr & mask)) {
1696 ret = env->itlb_tte[i];
1697 break;
1698 }
1699 }
1700 }
1701 break;
1702 }
1703 case 0x58: // D-MMU regs
1704 {
1705 int reg = (addr >> 3) & 0xf;
1706
1707 ret = env->dmmuregs[reg];
1708 break;
1709 }
1710 case 0x5d: // D-MMU data access
1711 {
1712 int reg = (addr >> 3) & 0x3f;
1713
1714 ret = env->dtlb_tte[reg];
1715 break;
1716 }
1717 case 0x5e: // D-MMU tag read
1718 {
1719 unsigned int i;
1720
1721 for (i = 0; i < 64; i++) {
1722 // Valid, ctx match, vaddr match
1723 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) != 0) {
1724 uint64_t mask;
1725
1726 switch ((env->dtlb_tte[i] >> 61) & 3) {
1727 default:
1728 case 0x0:
1729 mask = 0xffffffffffffffff;
1730 break;
1731 case 0x1:
1732 mask = 0xffffffffffff0fff;
1733 break;
1734 case 0x2:
1735 mask = 0xfffffffffff80fff;
1736 break;
1737 case 0x3:
1738 mask = 0xffffffffffc00fff;
1739 break;
1740 }
1741 if ((env->dtlb_tag[i] & mask) == (addr & mask)) {
1742 ret = env->dtlb_tte[i];
1743 break;
1744 }
1745 }
1746 }
1747 break;
1748 }
1749 case 0x46: // D-cache data
1750 case 0x47: // D-cache tag access
1751 case 0x4b: // E-cache error enable
1752 case 0x4c: // E-cache asynchronous fault status
1753 case 0x4d: // E-cache asynchronous fault address
1754 case 0x4e: // E-cache tag data
1755 case 0x66: // I-cache instruction access
1756 case 0x67: // I-cache tag access
1757 case 0x6e: // I-cache predecode
1758 case 0x6f: // I-cache LRU etc.
1759 case 0x76: // E-cache tag
1760 case 0x7e: // E-cache tag
1761 break;
1762 case 0x59: // D-MMU 8k TSB pointer
1763 case 0x5a: // D-MMU 64k TSB pointer
1764 case 0x5b: // D-MMU data pointer
1765 case 0x48: // Interrupt dispatch, RO
1766 case 0x49: // Interrupt data receive
1767 case 0x7f: // Incoming interrupt vector, RO
1768 // XXX
1769 break;
1770 case 0x54: // I-MMU data in, WO
1771 case 0x57: // I-MMU demap, WO
1772 case 0x5c: // D-MMU data in, WO
1773 case 0x5f: // D-MMU demap, WO
1774 case 0x77: // Interrupt vector, WO
1775 default:
1776 do_unassigned_access(addr, 0, 0, 1);
1777 ret = 0;
1778 break;
1779 }
1780
1781 /* Convert from little endian */
1782 switch (asi) {
1783 case 0x0c: // Nucleus Little Endian (LE)
1784 case 0x18: // As if user primary LE
1785 case 0x19: // As if user secondary LE
1786 case 0x1c: // Bypass LE
1787 case 0x1d: // Bypass, non-cacheable LE
1788 case 0x88: // Primary LE
1789 case 0x89: // Secondary LE
1790 case 0x8a: // Primary no-fault LE
1791 case 0x8b: // Secondary no-fault LE
1792 switch(size) {
1793 case 2:
1794 ret = bswap16(ret);
1795 break;
1796 case 4:
1797 ret = bswap32(ret);
1798 break;
1799 case 8:
1800 ret = bswap64(ret);
1801 break;
1802 default:
1803 break;
1804 }
1805 default:
1806 break;
1807 }
1808
1809 /* Convert to signed number */
1810 if (sign) {
1811 switch(size) {
1812 case 1:
1813 ret = (int8_t) ret;
1814 break;
1815 case 2:
1816 ret = (int16_t) ret;
1817 break;
1818 case 4:
1819 ret = (int32_t) ret;
1820 break;
1821 default:
1822 break;
1823 }
1824 }
1825 #ifdef DEBUG_ASI
1826 dump_asi("read ", last_addr, asi, size, ret);
1827 #endif
1828 return ret;
1829 }
1830
1831 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1832 {
1833 #ifdef DEBUG_ASI
1834 dump_asi("write", addr, asi, size, val);
1835 #endif
1836 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1837 || (asi >= 0x30 && asi < 0x80 && !(env->hpstate & HS_PRIV)))
1838 raise_exception(TT_PRIV_ACT);
1839
1840 helper_check_align(addr, size - 1);
1841 /* Convert to little endian */
1842 switch (asi) {
1843 case 0x0c: // Nucleus Little Endian (LE)
1844 case 0x18: // As if user primary LE
1845 case 0x19: // As if user secondary LE
1846 case 0x1c: // Bypass LE
1847 case 0x1d: // Bypass, non-cacheable LE
1848 case 0x88: // Primary LE
1849 case 0x89: // Secondary LE
1850 switch(size) {
1851 case 2:
1852 addr = bswap16(addr);
1853 break;
1854 case 4:
1855 addr = bswap32(addr);
1856 break;
1857 case 8:
1858 addr = bswap64(addr);
1859 break;
1860 default:
1861 break;
1862 }
1863 default:
1864 break;
1865 }
1866
1867 switch(asi) {
1868 case 0x10: // As if user primary
1869 case 0x18: // As if user primary LE
1870 case 0x80: // Primary
1871 case 0x88: // Primary LE
1872 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1873 if (env->hpstate & HS_PRIV) {
1874 switch(size) {
1875 case 1:
1876 stb_hypv(addr, val);
1877 break;
1878 case 2:
1879 stw_hypv(addr, val);
1880 break;
1881 case 4:
1882 stl_hypv(addr, val);
1883 break;
1884 case 8:
1885 default:
1886 stq_hypv(addr, val);
1887 break;
1888 }
1889 } else {
1890 switch(size) {
1891 case 1:
1892 stb_kernel(addr, val);
1893 break;
1894 case 2:
1895 stw_kernel(addr, val);
1896 break;
1897 case 4:
1898 stl_kernel(addr, val);
1899 break;
1900 case 8:
1901 default:
1902 stq_kernel(addr, val);
1903 break;
1904 }
1905 }
1906 } else {
1907 switch(size) {
1908 case 1:
1909 stb_user(addr, val);
1910 break;
1911 case 2:
1912 stw_user(addr, val);
1913 break;
1914 case 4:
1915 stl_user(addr, val);
1916 break;
1917 case 8:
1918 default:
1919 stq_user(addr, val);
1920 break;
1921 }
1922 }
1923 break;
1924 case 0x14: // Bypass
1925 case 0x15: // Bypass, non-cacheable
1926 case 0x1c: // Bypass LE
1927 case 0x1d: // Bypass, non-cacheable LE
1928 {
1929 switch(size) {
1930 case 1:
1931 stb_phys(addr, val);
1932 break;
1933 case 2:
1934 stw_phys(addr, val);
1935 break;
1936 case 4:
1937 stl_phys(addr, val);
1938 break;
1939 case 8:
1940 default:
1941 stq_phys(addr, val);
1942 break;
1943 }
1944 }
1945 return;
1946 case 0x04: // Nucleus
1947 case 0x0c: // Nucleus Little Endian (LE)
1948 case 0x11: // As if user secondary
1949 case 0x19: // As if user secondary LE
1950 case 0x24: // Nucleus quad LDD 128 bit atomic
1951 case 0x2c: // Nucleus quad LDD 128 bit atomic
1952 case 0x4a: // UPA config
1953 case 0x81: // Secondary
1954 case 0x89: // Secondary LE
1955 // XXX
1956 return;
1957 case 0x45: // LSU
1958 {
1959 uint64_t oldreg;
1960
1961 oldreg = env->lsu;
1962 env->lsu = val & (DMMU_E | IMMU_E);
1963 // Mappings generated during D/I MMU disabled mode are
1964 // invalid in normal mode
1965 if (oldreg != env->lsu) {
1966 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1967 oldreg, env->lsu);
1968 #ifdef DEBUG_MMU
1969 dump_mmu(env);
1970 #endif
1971 tlb_flush(env, 1);
1972 }
1973 return;
1974 }
1975 case 0x50: // I-MMU regs
1976 {
1977 int reg = (addr >> 3) & 0xf;
1978 uint64_t oldreg;
1979
1980 oldreg = env->immuregs[reg];
1981 switch(reg) {
1982 case 0: // RO
1983 case 4:
1984 return;
1985 case 1: // Not in I-MMU
1986 case 2:
1987 case 7:
1988 case 8:
1989 return;
1990 case 3: // SFSR
1991 if ((val & 1) == 0)
1992 val = 0; // Clear SFSR
1993 break;
1994 case 5: // TSB access
1995 case 6: // Tag access
1996 default:
1997 break;
1998 }
1999 env->immuregs[reg] = val;
2000 if (oldreg != env->immuregs[reg]) {
2001 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2002 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
2003 }
2004 #ifdef DEBUG_MMU
2005 dump_mmu(env);
2006 #endif
2007 return;
2008 }
2009 case 0x54: // I-MMU data in
2010 {
2011 unsigned int i;
2012
2013 // Try finding an invalid entry
2014 for (i = 0; i < 64; i++) {
2015 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
2016 env->itlb_tag[i] = env->immuregs[6];
2017 env->itlb_tte[i] = val;
2018 return;
2019 }
2020 }
2021 // Try finding an unlocked entry
2022 for (i = 0; i < 64; i++) {
2023 if ((env->itlb_tte[i] & 0x40) == 0) {
2024 env->itlb_tag[i] = env->immuregs[6];
2025 env->itlb_tte[i] = val;
2026 return;
2027 }
2028 }
2029 // error state?
2030 return;
2031 }
2032 case 0x55: // I-MMU data access
2033 {
2034 unsigned int i = (addr >> 3) & 0x3f;
2035
2036 env->itlb_tag[i] = env->immuregs[6];
2037 env->itlb_tte[i] = val;
2038 return;
2039 }
2040 case 0x57: // I-MMU demap
2041 // XXX
2042 return;
2043 case 0x58: // D-MMU regs
2044 {
2045 int reg = (addr >> 3) & 0xf;
2046 uint64_t oldreg;
2047
2048 oldreg = env->dmmuregs[reg];
2049 switch(reg) {
2050 case 0: // RO
2051 case 4:
2052 return;
2053 case 3: // SFSR
2054 if ((val & 1) == 0) {
2055 val = 0; // Clear SFSR, Fault address
2056 env->dmmuregs[4] = 0;
2057 }
2058 env->dmmuregs[reg] = val;
2059 break;
2060 case 1: // Primary context
2061 case 2: // Secondary context
2062 case 5: // TSB access
2063 case 6: // Tag access
2064 case 7: // Virtual Watchpoint
2065 case 8: // Physical Watchpoint
2066 default:
2067 break;
2068 }
2069 env->dmmuregs[reg] = val;
2070 if (oldreg != env->dmmuregs[reg]) {
2071 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2072 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2073 }
2074 #ifdef DEBUG_MMU
2075 dump_mmu(env);
2076 #endif
2077 return;
2078 }
2079 case 0x5c: // D-MMU data in
2080 {
2081 unsigned int i;
2082
2083 // Try finding an invalid entry
2084 for (i = 0; i < 64; i++) {
2085 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2086 env->dtlb_tag[i] = env->dmmuregs[6];
2087 env->dtlb_tte[i] = val;
2088 return;
2089 }
2090 }
2091 // Try finding an unlocked entry
2092 for (i = 0; i < 64; i++) {
2093 if ((env->dtlb_tte[i] & 0x40) == 0) {
2094 env->dtlb_tag[i] = env->dmmuregs[6];
2095 env->dtlb_tte[i] = val;
2096 return;
2097 }
2098 }
2099 // error state?
2100 return;
2101 }
2102 case 0x5d: // D-MMU data access
2103 {
2104 unsigned int i = (addr >> 3) & 0x3f;
2105
2106 env->dtlb_tag[i] = env->dmmuregs[6];
2107 env->dtlb_tte[i] = val;
2108 return;
2109 }
2110 case 0x5f: // D-MMU demap
2111 case 0x49: // Interrupt data receive
2112 // XXX
2113 return;
2114 case 0x46: // D-cache data
2115 case 0x47: // D-cache tag access
2116 case 0x4b: // E-cache error enable
2117 case 0x4c: // E-cache asynchronous fault status
2118 case 0x4d: // E-cache asynchronous fault address
2119 case 0x4e: // E-cache tag data
2120 case 0x66: // I-cache instruction access
2121 case 0x67: // I-cache tag access
2122 case 0x6e: // I-cache predecode
2123 case 0x6f: // I-cache LRU etc.
2124 case 0x76: // E-cache tag
2125 case 0x7e: // E-cache tag
2126 return;
2127 case 0x51: // I-MMU 8k TSB pointer, RO
2128 case 0x52: // I-MMU 64k TSB pointer, RO
2129 case 0x56: // I-MMU tag read, RO
2130 case 0x59: // D-MMU 8k TSB pointer, RO
2131 case 0x5a: // D-MMU 64k TSB pointer, RO
2132 case 0x5b: // D-MMU data pointer, RO
2133 case 0x5e: // D-MMU tag read, RO
2134 case 0x48: // Interrupt dispatch, RO
2135 case 0x7f: // Incoming interrupt vector, RO
2136 case 0x82: // Primary no-fault, RO
2137 case 0x83: // Secondary no-fault, RO
2138 case 0x8a: // Primary no-fault LE, RO
2139 case 0x8b: // Secondary no-fault LE, RO
2140 default:
2141 do_unassigned_access(addr, 1, 0, 1);
2142 return;
2143 }
2144 }
2145 #endif /* CONFIG_USER_ONLY */
2146
2147 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2148 {
2149 unsigned int i;
2150 target_ulong val;
2151
2152 helper_check_align(addr, 3);
2153 switch (asi) {
2154 case 0xf0: // Block load primary
2155 case 0xf1: // Block load secondary
2156 case 0xf8: // Block load primary LE
2157 case 0xf9: // Block load secondary LE
2158 if (rd & 7) {
2159 raise_exception(TT_ILL_INSN);
2160 return;
2161 }
2162 helper_check_align(addr, 0x3f);
2163 for (i = 0; i < 16; i++) {
2164 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2165 0);
2166 addr += 4;
2167 }
2168
2169 return;
2170 default:
2171 break;
2172 }
2173
2174 val = helper_ld_asi(addr, asi, size, 0);
2175 switch(size) {
2176 default:
2177 case 4:
2178 *((uint32_t *)&FT0) = val;
2179 break;
2180 case 8:
2181 *((int64_t *)&DT0) = val;
2182 break;
2183 case 16:
2184 // XXX
2185 break;
2186 }
2187 }
2188
2189 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2190 {
2191 unsigned int i;
2192 target_ulong val = 0;
2193
2194 helper_check_align(addr, 3);
2195 switch (asi) {
2196 case 0xf0: // Block store primary
2197 case 0xf1: // Block store secondary
2198 case 0xf8: // Block store primary LE
2199 case 0xf9: // Block store secondary LE
2200 if (rd & 7) {
2201 raise_exception(TT_ILL_INSN);
2202 return;
2203 }
2204 helper_check_align(addr, 0x3f);
2205 for (i = 0; i < 16; i++) {
2206 val = *(uint32_t *)&env->fpr[rd++];
2207 helper_st_asi(addr, val, asi & 0x8f, 4);
2208 addr += 4;
2209 }
2210
2211 return;
2212 default:
2213 break;
2214 }
2215
2216 switch(size) {
2217 default:
2218 case 4:
2219 val = *((uint32_t *)&FT0);
2220 break;
2221 case 8:
2222 val = *((int64_t *)&DT0);
2223 break;
2224 case 16:
2225 // XXX
2226 break;
2227 }
2228 helper_st_asi(addr, val, asi, size);
2229 }
2230
2231 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2232 target_ulong val2, uint32_t asi)
2233 {
2234 target_ulong ret;
2235
2236 val1 &= 0xffffffffUL;
2237 ret = helper_ld_asi(addr, asi, 4, 0);
2238 ret &= 0xffffffffUL;
2239 if (val1 == ret)
2240 helper_st_asi(addr, val2 & 0xffffffffUL, asi, 4);
2241 return ret;
2242 }
2243
2244 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2245 target_ulong val2, uint32_t asi)
2246 {
2247 target_ulong ret;
2248
2249 ret = helper_ld_asi(addr, asi, 8, 0);
2250 if (val1 == ret)
2251 helper_st_asi(addr, val2, asi, 8);
2252 return ret;
2253 }
2254 #endif /* TARGET_SPARC64 */
2255
2256 #ifndef TARGET_SPARC64
2257 void helper_rett(void)
2258 {
2259 unsigned int cwp;
2260
2261 if (env->psret == 1)
2262 raise_exception(TT_ILL_INSN);
2263
2264 env->psret = 1;
2265 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2266 if (env->wim & (1 << cwp)) {
2267 raise_exception(TT_WIN_UNF);
2268 }
2269 set_cwp(cwp);
2270 env->psrs = env->psrps;
2271 }
2272 #endif
2273
2274 target_ulong helper_udiv(target_ulong a, target_ulong b)
2275 {
2276 uint64_t x0;
2277 uint32_t x1;
2278
2279 x0 = a | ((uint64_t) (env->y) << 32);
2280 x1 = b;
2281
2282 if (x1 == 0) {
2283 raise_exception(TT_DIV_ZERO);
2284 }
2285
2286 x0 = x0 / x1;
2287 if (x0 > 0xffffffff) {
2288 env->cc_src2 = 1;
2289 return 0xffffffff;
2290 } else {
2291 env->cc_src2 = 0;
2292 return x0;
2293 }
2294 }
2295
2296 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2297 {
2298 int64_t x0;
2299 int32_t x1;
2300
2301 x0 = a | ((int64_t) (env->y) << 32);
2302 x1 = b;
2303
2304 if (x1 == 0) {
2305 raise_exception(TT_DIV_ZERO);
2306 }
2307
2308 x0 = x0 / x1;
2309 if ((int32_t) x0 != x0) {
2310 env->cc_src2 = 1;
2311 return x0 < 0? 0x80000000: 0x7fffffff;
2312 } else {
2313 env->cc_src2 = 0;
2314 return x0;
2315 }
2316 }
2317
2318 uint64_t helper_pack64(target_ulong high, target_ulong low)
2319 {
2320 return ((uint64_t)high << 32) | (uint64_t)(low & 0xffffffff);
2321 }
2322
2323 void helper_stdf(target_ulong addr, int mem_idx)
2324 {
2325 helper_check_align(addr, 7);
2326 #if !defined(CONFIG_USER_ONLY)
2327 switch (mem_idx) {
2328 case 0:
2329 stfq_user(addr, DT0);
2330 break;
2331 case 1:
2332 stfq_kernel(addr, DT0);
2333 break;
2334 #ifdef TARGET_SPARC64
2335 case 2:
2336 stfq_hypv(addr, DT0);
2337 break;
2338 #endif
2339 default:
2340 break;
2341 }
2342 #else
2343 address_mask(env, &addr);
2344 stfq_raw(addr, DT0);
2345 #endif
2346 }
2347
2348 void helper_lddf(target_ulong addr, int mem_idx)
2349 {
2350 helper_check_align(addr, 7);
2351 #if !defined(CONFIG_USER_ONLY)
2352 switch (mem_idx) {
2353 case 0:
2354 DT0 = ldfq_user(addr);
2355 break;
2356 case 1:
2357 DT0 = ldfq_kernel(addr);
2358 break;
2359 #ifdef TARGET_SPARC64
2360 case 2:
2361 DT0 = ldfq_hypv(addr);
2362 break;
2363 #endif
2364 default:
2365 break;
2366 }
2367 #else
2368 address_mask(env, &addr);
2369 DT0 = ldfq_raw(addr);
2370 #endif
2371 }
2372
2373 void helper_ldqf(target_ulong addr, int mem_idx)
2374 {
2375 // XXX add 128 bit load
2376 CPU_QuadU u;
2377
2378 helper_check_align(addr, 7);
2379 #if !defined(CONFIG_USER_ONLY)
2380 switch (mem_idx) {
2381 case 0:
2382 u.ll.upper = ldq_user(addr);
2383 u.ll.lower = ldq_user(addr + 8);
2384 QT0 = u.q;
2385 break;
2386 case 1:
2387 u.ll.upper = ldq_kernel(addr);
2388 u.ll.lower = ldq_kernel(addr + 8);
2389 QT0 = u.q;
2390 break;
2391 #ifdef TARGET_SPARC64
2392 case 2:
2393 u.ll.upper = ldq_hypv(addr);
2394 u.ll.lower = ldq_hypv(addr + 8);
2395 QT0 = u.q;
2396 break;
2397 #endif
2398 default:
2399 break;
2400 }
2401 #else
2402 address_mask(env, &addr);
2403 u.ll.upper = ldq_raw(addr);
2404 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2405 QT0 = u.q;
2406 #endif
2407 }
2408
2409 void helper_stqf(target_ulong addr, int mem_idx)
2410 {
2411 // XXX add 128 bit store
2412 CPU_QuadU u;
2413
2414 helper_check_align(addr, 7);
2415 #if !defined(CONFIG_USER_ONLY)
2416 switch (mem_idx) {
2417 case 0:
2418 u.q = QT0;
2419 stq_user(addr, u.ll.upper);
2420 stq_user(addr + 8, u.ll.lower);
2421 break;
2422 case 1:
2423 u.q = QT0;
2424 stq_kernel(addr, u.ll.upper);
2425 stq_kernel(addr + 8, u.ll.lower);
2426 break;
2427 #ifdef TARGET_SPARC64
2428 case 2:
2429 u.q = QT0;
2430 stq_hypv(addr, u.ll.upper);
2431 stq_hypv(addr + 8, u.ll.lower);
2432 break;
2433 #endif
2434 default:
2435 break;
2436 }
2437 #else
2438 u.q = QT0;
2439 address_mask(env, &addr);
2440 stq_raw(addr, u.ll.upper);
2441 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2442 #endif
2443 }
2444
2445 void helper_ldfsr(void)
2446 {
2447 int rnd_mode;
2448
2449 PUT_FSR32(env, *((uint32_t *) &FT0));
2450 switch (env->fsr & FSR_RD_MASK) {
2451 case FSR_RD_NEAREST:
2452 rnd_mode = float_round_nearest_even;
2453 break;
2454 default:
2455 case FSR_RD_ZERO:
2456 rnd_mode = float_round_to_zero;
2457 break;
2458 case FSR_RD_POS:
2459 rnd_mode = float_round_up;
2460 break;
2461 case FSR_RD_NEG:
2462 rnd_mode = float_round_down;
2463 break;
2464 }
2465 set_float_rounding_mode(rnd_mode, &env->fp_status);
2466 }
2467
2468 void helper_stfsr(void)
2469 {
2470 *((uint32_t *) &FT0) = GET_FSR32(env);
2471 }
2472
2473 void helper_debug(void)
2474 {
2475 env->exception_index = EXCP_DEBUG;
2476 cpu_loop_exit();
2477 }
2478
2479 #ifndef TARGET_SPARC64
2480 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2481 handling ? */
2482 void helper_save(void)
2483 {
2484 uint32_t cwp;
2485
2486 cwp = cpu_cwp_dec(env, env->cwp - 1);
2487 if (env->wim & (1 << cwp)) {
2488 raise_exception(TT_WIN_OVF);
2489 }
2490 set_cwp(cwp);
2491 }
2492
2493 void helper_restore(void)
2494 {
2495 uint32_t cwp;
2496
2497 cwp = cpu_cwp_inc(env, env->cwp + 1);
2498 if (env->wim & (1 << cwp)) {
2499 raise_exception(TT_WIN_UNF);
2500 }
2501 set_cwp(cwp);
2502 }
2503
2504 void helper_wrpsr(target_ulong new_psr)
2505 {
2506 if ((new_psr & PSR_CWP) >= env->nwindows)
2507 raise_exception(TT_ILL_INSN);
2508 else
2509 PUT_PSR(env, new_psr);
2510 }
2511
2512 target_ulong helper_rdpsr(void)
2513 {
2514 return GET_PSR(env);
2515 }
2516
2517 #else
2518 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2519 handling ? */
2520 void helper_save(void)
2521 {
2522 uint32_t cwp;
2523
2524 cwp = cpu_cwp_dec(env, env->cwp - 1);
2525 if (env->cansave == 0) {
2526 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2527 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2528 ((env->wstate & 0x7) << 2)));
2529 } else {
2530 if (env->cleanwin - env->canrestore == 0) {
2531 // XXX Clean windows without trap
2532 raise_exception(TT_CLRWIN);
2533 } else {
2534 env->cansave--;
2535 env->canrestore++;
2536 set_cwp(cwp);
2537 }
2538 }
2539 }
2540
2541 void helper_restore(void)
2542 {
2543 uint32_t cwp;
2544
2545 cwp = cpu_cwp_inc(env, env->cwp + 1);
2546 if (env->canrestore == 0) {
2547 raise_exception(TT_FILL | (env->otherwin != 0 ?
2548 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2549 ((env->wstate & 0x7) << 2)));
2550 } else {
2551 env->cansave++;
2552 env->canrestore--;
2553 set_cwp(cwp);
2554 }
2555 }
2556
2557 void helper_flushw(void)
2558 {
2559 if (env->cansave != env->nwindows - 2) {
2560 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2561 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2562 ((env->wstate & 0x7) << 2)));
2563 }
2564 }
2565
2566 void helper_saved(void)
2567 {
2568 env->cansave++;
2569 if (env->otherwin == 0)
2570 env->canrestore--;
2571 else
2572 env->otherwin--;
2573 }
2574
2575 void helper_restored(void)
2576 {
2577 env->canrestore++;
2578 if (env->cleanwin < env->nwindows - 1)
2579 env->cleanwin++;
2580 if (env->otherwin == 0)
2581 env->cansave--;
2582 else
2583 env->otherwin--;
2584 }
2585
2586 target_ulong helper_rdccr(void)
2587 {
2588 return GET_CCR(env);
2589 }
2590
2591 void helper_wrccr(target_ulong new_ccr)
2592 {
2593 PUT_CCR(env, new_ccr);
2594 }
2595
2596 // CWP handling is reversed in V9, but we still use the V8 register
2597 // order.
2598 target_ulong helper_rdcwp(void)
2599 {
2600 return GET_CWP64(env);
2601 }
2602
2603 void helper_wrcwp(target_ulong new_cwp)
2604 {
2605 PUT_CWP64(env, new_cwp);
2606 }
2607
2608 // This function uses non-native bit order
2609 #define GET_FIELD(X, FROM, TO) \
2610 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2611
2612 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2613 #define GET_FIELD_SP(X, FROM, TO) \
2614 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2615
2616 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2617 {
2618 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2619 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2620 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2621 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2622 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2623 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2624 (((pixel_addr >> 55) & 1) << 4) |
2625 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2626 GET_FIELD_SP(pixel_addr, 11, 12);
2627 }
2628
2629 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2630 {
2631 uint64_t tmp;
2632
2633 tmp = addr + offset;
2634 env->gsr &= ~7ULL;
2635 env->gsr |= tmp & 7ULL;
2636 return tmp & ~7ULL;
2637 }
2638
2639 target_ulong helper_popc(target_ulong val)
2640 {
2641 return ctpop64(val);
2642 }
2643
2644 static inline uint64_t *get_gregset(uint64_t pstate)
2645 {
2646 switch (pstate) {
2647 default:
2648 case 0:
2649 return env->bgregs;
2650 case PS_AG:
2651 return env->agregs;
2652 case PS_MG:
2653 return env->mgregs;
2654 case PS_IG:
2655 return env->igregs;
2656 }
2657 }
2658
2659 void change_pstate(uint64_t new_pstate)
2660 {
2661 uint64_t pstate_regs, new_pstate_regs;
2662 uint64_t *src, *dst;
2663
2664 pstate_regs = env->pstate & 0xc01;
2665 new_pstate_regs = new_pstate & 0xc01;
2666 if (new_pstate_regs != pstate_regs) {
2667 // Switch global register bank
2668 src = get_gregset(new_pstate_regs);
2669 dst = get_gregset(pstate_regs);
2670 memcpy32(dst, env->gregs);
2671 memcpy32(env->gregs, src);
2672 }
2673 env->pstate = new_pstate;
2674 }
2675
2676 void helper_wrpstate(target_ulong new_state)
2677 {
2678 change_pstate(new_state & 0xf3f);
2679 }
2680
2681 void helper_done(void)
2682 {
2683 env->pc = env->tsptr->tpc;
2684 env->npc = env->tsptr->tnpc + 4;
2685 PUT_CCR(env, env->tsptr->tstate >> 32);
2686 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2687 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2688 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2689 env->tl--;
2690 env->tsptr = &env->ts[env->tl];
2691 }
2692
2693 void helper_retry(void)
2694 {
2695 env->pc = env->tsptr->tpc;
2696 env->npc = env->tsptr->tnpc;
2697 PUT_CCR(env, env->tsptr->tstate >> 32);
2698 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2699 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2700 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2701 env->tl--;
2702 env->tsptr = &env->ts[env->tl];
2703 }
2704 #endif
2705
2706 void cpu_set_cwp(CPUState *env1, int new_cwp)
2707 {
2708 /* put the modified wrap registers at their proper location */
2709 if (env1->cwp == env1->nwindows - 1)
2710 memcpy32(env1->regbase, env1->regbase + env1->nwindows * 16);
2711 env1->cwp = new_cwp;
2712 /* put the wrap registers at their temporary location */
2713 if (new_cwp == env1->nwindows - 1)
2714 memcpy32(env1->regbase + env1->nwindows * 16, env1->regbase);
2715 env1->regwptr = env1->regbase + (new_cwp * 16);
2716 }
2717
2718 void set_cwp(int new_cwp)
2719 {
2720 cpu_set_cwp(env, new_cwp);
2721 }
2722
2723 void helper_flush(target_ulong addr)
2724 {
2725 addr &= ~7;
2726 tb_invalidate_page_range(addr, addr + 8);
2727 }
2728
2729 #if !defined(CONFIG_USER_ONLY)
2730
2731 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2732 void *retaddr);
2733
2734 #define MMUSUFFIX _mmu
2735 #define ALIGNED_ONLY
2736
2737 #define SHIFT 0
2738 #include "softmmu_template.h"
2739
2740 #define SHIFT 1
2741 #include "softmmu_template.h"
2742
2743 #define SHIFT 2
2744 #include "softmmu_template.h"
2745
2746 #define SHIFT 3
2747 #include "softmmu_template.h"
2748
2749 /* XXX: make it generic ? */
2750 static void cpu_restore_state2(void *retaddr)
2751 {
2752 TranslationBlock *tb;
2753 unsigned long pc;
2754
2755 if (retaddr) {
2756 /* now we have a real cpu fault */
2757 pc = (unsigned long)retaddr;
2758 tb = tb_find_pc(pc);
2759 if (tb) {
2760 /* the PC is inside the translated code. It means that we have
2761 a virtual CPU fault */
2762 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
2763 }
2764 }
2765 }
2766
2767 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2768 void *retaddr)
2769 {
2770 #ifdef DEBUG_UNALIGNED
2771 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2772 "\n", addr, env->pc);
2773 #endif
2774 cpu_restore_state2(retaddr);
2775 raise_exception(TT_UNALIGNED);
2776 }
2777
2778 /* try to fill the TLB and return an exception if error. If retaddr is
2779 NULL, it means that the function was called in C code (i.e. not
2780 from generated code or from helper.c) */
2781 /* XXX: fix it to restore all registers */
2782 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2783 {
2784 int ret;
2785 CPUState *saved_env;
2786
2787 /* XXX: hack to restore env in all cases, even if not called from
2788 generated code */
2789 saved_env = env;
2790 env = cpu_single_env;
2791
2792 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2793 if (ret) {
2794 cpu_restore_state2(retaddr);
2795 cpu_loop_exit();
2796 }
2797 env = saved_env;
2798 }
2799
2800 #endif
2801
2802 #ifndef TARGET_SPARC64
2803 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2804 int is_asi)
2805 {
2806 CPUState *saved_env;
2807
2808 /* XXX: hack to restore env in all cases, even if not called from
2809 generated code */
2810 saved_env = env;
2811 env = cpu_single_env;
2812 #ifdef DEBUG_UNASSIGNED
2813 if (is_asi)
2814 printf("Unassigned mem %s access to " TARGET_FMT_plx
2815 " asi 0x%02x from " TARGET_FMT_lx "\n",
2816 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
2817 env->pc);
2818 else
2819 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
2820 TARGET_FMT_lx "\n",
2821 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
2822 #endif
2823 if (env->mmuregs[3]) /* Fault status register */
2824 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
2825 if (is_asi)
2826 env->mmuregs[3] |= 1 << 16;
2827 if (env->psrs)
2828 env->mmuregs[3] |= 1 << 5;
2829 if (is_exec)
2830 env->mmuregs[3] |= 1 << 6;
2831 if (is_write)
2832 env->mmuregs[3] |= 1 << 7;
2833 env->mmuregs[3] |= (5 << 2) | 2;
2834 env->mmuregs[4] = addr; /* Fault address register */
2835 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2836 if (is_exec)
2837 raise_exception(TT_CODE_ACCESS);
2838 else
2839 raise_exception(TT_DATA_ACCESS);
2840 }
2841 env = saved_env;
2842 }
2843 #else
2844 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2845 int is_asi)
2846 {
2847 #ifdef DEBUG_UNASSIGNED
2848 CPUState *saved_env;
2849
2850 /* XXX: hack to restore env in all cases, even if not called from
2851 generated code */
2852 saved_env = env;
2853 env = cpu_single_env;
2854 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2855 "\n", addr, env->pc);
2856 env = saved_env;
2857 #endif
2858 if (is_exec)
2859 raise_exception(TT_CODE_ACCESS);
2860 else
2861 raise_exception(TT_DATA_ACCESS);
2862 }
2863 #endif
2864