]> git.proxmox.com Git - qemu.git/blob - target-sparc/op_helper.c
Use initial CPU definition structure for some CPU fields instead of copying
[qemu.git] / target-sparc / op_helper.c
1 #include "exec.h"
2 #include "host-utils.h"
3 #include "helper.h"
4 #if !defined(CONFIG_USER_ONLY)
5 #include "softmmu_exec.h"
6 #endif /* !defined(CONFIG_USER_ONLY) */
7
8 //#define DEBUG_MMU
9 //#define DEBUG_MXCC
10 //#define DEBUG_UNALIGNED
11 //#define DEBUG_UNASSIGNED
12 //#define DEBUG_ASI
13
14 #ifdef DEBUG_MMU
15 #define DPRINTF_MMU(fmt, args...) \
16 do { printf("MMU: " fmt , ##args); } while (0)
17 #else
18 #define DPRINTF_MMU(fmt, args...) do {} while (0)
19 #endif
20
21 #ifdef DEBUG_MXCC
22 #define DPRINTF_MXCC(fmt, args...) \
23 do { printf("MXCC: " fmt , ##args); } while (0)
24 #else
25 #define DPRINTF_MXCC(fmt, args...) do {} while (0)
26 #endif
27
28 #ifdef DEBUG_ASI
29 #define DPRINTF_ASI(fmt, args...) \
30 do { printf("ASI: " fmt , ##args); } while (0)
31 #else
32 #define DPRINTF_ASI(fmt, args...) do {} while (0)
33 #endif
34
35 #ifdef TARGET_SPARC64
36 #ifndef TARGET_ABI32
37 #define AM_CHECK(env1) ((env1)->pstate & PS_AM)
38 #else
39 #define AM_CHECK(env1) (1)
40 #endif
41 #endif
42
43 static inline void address_mask(CPUState *env1, target_ulong *addr)
44 {
45 #ifdef TARGET_SPARC64
46 if (AM_CHECK(env1))
47 *addr &= 0xffffffffULL;
48 #endif
49 }
50
51 void raise_exception(int tt)
52 {
53 env->exception_index = tt;
54 cpu_loop_exit();
55 }
56
57 void helper_trap(target_ulong nb_trap)
58 {
59 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
60 cpu_loop_exit();
61 }
62
63 void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
64 {
65 if (do_trap) {
66 env->exception_index = TT_TRAP + (nb_trap & 0x7f);
67 cpu_loop_exit();
68 }
69 }
70
71 void helper_check_align(target_ulong addr, uint32_t align)
72 {
73 if (addr & align) {
74 #ifdef DEBUG_UNALIGNED
75 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
76 "\n", addr, env->pc);
77 #endif
78 raise_exception(TT_UNALIGNED);
79 }
80 }
81
82 #define F_HELPER(name, p) void helper_f##name##p(void)
83
84 #define F_BINOP(name) \
85 F_HELPER(name, s) \
86 { \
87 FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
88 } \
89 F_HELPER(name, d) \
90 { \
91 DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
92 } \
93 F_HELPER(name, q) \
94 { \
95 QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
96 }
97
98 F_BINOP(add);
99 F_BINOP(sub);
100 F_BINOP(mul);
101 F_BINOP(div);
102 #undef F_BINOP
103
104 void helper_fsmuld(void)
105 {
106 DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
107 float32_to_float64(FT1, &env->fp_status),
108 &env->fp_status);
109 }
110
111 void helper_fdmulq(void)
112 {
113 QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
114 float64_to_float128(DT1, &env->fp_status),
115 &env->fp_status);
116 }
117
118 F_HELPER(neg, s)
119 {
120 FT0 = float32_chs(FT1);
121 }
122
123 #ifdef TARGET_SPARC64
124 F_HELPER(neg, d)
125 {
126 DT0 = float64_chs(DT1);
127 }
128
129 F_HELPER(neg, q)
130 {
131 QT0 = float128_chs(QT1);
132 }
133 #endif
134
135 /* Integer to float conversion. */
136 F_HELPER(ito, s)
137 {
138 FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
139 }
140
141 F_HELPER(ito, d)
142 {
143 DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
144 }
145
146 F_HELPER(ito, q)
147 {
148 QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
149 }
150
151 #ifdef TARGET_SPARC64
152 F_HELPER(xto, s)
153 {
154 FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
155 }
156
157 F_HELPER(xto, d)
158 {
159 DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
160 }
161
162 F_HELPER(xto, q)
163 {
164 QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
165 }
166 #endif
167 #undef F_HELPER
168
169 /* floating point conversion */
170 void helper_fdtos(void)
171 {
172 FT0 = float64_to_float32(DT1, &env->fp_status);
173 }
174
175 void helper_fstod(void)
176 {
177 DT0 = float32_to_float64(FT1, &env->fp_status);
178 }
179
180 void helper_fqtos(void)
181 {
182 FT0 = float128_to_float32(QT1, &env->fp_status);
183 }
184
185 void helper_fstoq(void)
186 {
187 QT0 = float32_to_float128(FT1, &env->fp_status);
188 }
189
190 void helper_fqtod(void)
191 {
192 DT0 = float128_to_float64(QT1, &env->fp_status);
193 }
194
195 void helper_fdtoq(void)
196 {
197 QT0 = float64_to_float128(DT1, &env->fp_status);
198 }
199
200 /* Float to integer conversion. */
201 void helper_fstoi(void)
202 {
203 *((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
204 }
205
206 void helper_fdtoi(void)
207 {
208 *((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
209 }
210
211 void helper_fqtoi(void)
212 {
213 *((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
214 }
215
216 #ifdef TARGET_SPARC64
217 void helper_fstox(void)
218 {
219 *((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
220 }
221
222 void helper_fdtox(void)
223 {
224 *((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
225 }
226
227 void helper_fqtox(void)
228 {
229 *((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
230 }
231
232 void helper_faligndata(void)
233 {
234 uint64_t tmp;
235
236 tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
237 /* on many architectures a shift of 64 does nothing */
238 if ((env->gsr & 7) != 0) {
239 tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
240 }
241 *((uint64_t *)&DT0) = tmp;
242 }
243
244 void helper_movl_FT0_0(void)
245 {
246 *((uint32_t *)&FT0) = 0;
247 }
248
249 void helper_movl_DT0_0(void)
250 {
251 *((uint64_t *)&DT0) = 0;
252 }
253
254 void helper_movl_FT0_1(void)
255 {
256 *((uint32_t *)&FT0) = 0xffffffff;
257 }
258
259 void helper_movl_DT0_1(void)
260 {
261 *((uint64_t *)&DT0) = 0xffffffffffffffffULL;
262 }
263
264 void helper_fnot(void)
265 {
266 *(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
267 }
268
269 void helper_fnots(void)
270 {
271 *(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
272 }
273
274 void helper_fnor(void)
275 {
276 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
277 }
278
279 void helper_fnors(void)
280 {
281 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
282 }
283
284 void helper_for(void)
285 {
286 *(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
287 }
288
289 void helper_fors(void)
290 {
291 *(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
292 }
293
294 void helper_fxor(void)
295 {
296 *(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
297 }
298
299 void helper_fxors(void)
300 {
301 *(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
302 }
303
304 void helper_fand(void)
305 {
306 *(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
307 }
308
309 void helper_fands(void)
310 {
311 *(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
312 }
313
314 void helper_fornot(void)
315 {
316 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
317 }
318
319 void helper_fornots(void)
320 {
321 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
322 }
323
324 void helper_fandnot(void)
325 {
326 *(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
327 }
328
329 void helper_fandnots(void)
330 {
331 *(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
332 }
333
334 void helper_fnand(void)
335 {
336 *(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
337 }
338
339 void helper_fnands(void)
340 {
341 *(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
342 }
343
344 void helper_fxnor(void)
345 {
346 *(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
347 }
348
349 void helper_fxnors(void)
350 {
351 *(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
352 }
353
354 #ifdef WORDS_BIGENDIAN
355 #define VIS_B64(n) b[7 - (n)]
356 #define VIS_W64(n) w[3 - (n)]
357 #define VIS_SW64(n) sw[3 - (n)]
358 #define VIS_L64(n) l[1 - (n)]
359 #define VIS_B32(n) b[3 - (n)]
360 #define VIS_W32(n) w[1 - (n)]
361 #else
362 #define VIS_B64(n) b[n]
363 #define VIS_W64(n) w[n]
364 #define VIS_SW64(n) sw[n]
365 #define VIS_L64(n) l[n]
366 #define VIS_B32(n) b[n]
367 #define VIS_W32(n) w[n]
368 #endif
369
370 typedef union {
371 uint8_t b[8];
372 uint16_t w[4];
373 int16_t sw[4];
374 uint32_t l[2];
375 float64 d;
376 } vis64;
377
378 typedef union {
379 uint8_t b[4];
380 uint16_t w[2];
381 uint32_t l;
382 float32 f;
383 } vis32;
384
385 void helper_fpmerge(void)
386 {
387 vis64 s, d;
388
389 s.d = DT0;
390 d.d = DT1;
391
392 // Reverse calculation order to handle overlap
393 d.VIS_B64(7) = s.VIS_B64(3);
394 d.VIS_B64(6) = d.VIS_B64(3);
395 d.VIS_B64(5) = s.VIS_B64(2);
396 d.VIS_B64(4) = d.VIS_B64(2);
397 d.VIS_B64(3) = s.VIS_B64(1);
398 d.VIS_B64(2) = d.VIS_B64(1);
399 d.VIS_B64(1) = s.VIS_B64(0);
400 //d.VIS_B64(0) = d.VIS_B64(0);
401
402 DT0 = d.d;
403 }
404
405 void helper_fmul8x16(void)
406 {
407 vis64 s, d;
408 uint32_t tmp;
409
410 s.d = DT0;
411 d.d = DT1;
412
413 #define PMUL(r) \
414 tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
415 if ((tmp & 0xff) > 0x7f) \
416 tmp += 0x100; \
417 d.VIS_W64(r) = tmp >> 8;
418
419 PMUL(0);
420 PMUL(1);
421 PMUL(2);
422 PMUL(3);
423 #undef PMUL
424
425 DT0 = d.d;
426 }
427
428 void helper_fmul8x16al(void)
429 {
430 vis64 s, d;
431 uint32_t tmp;
432
433 s.d = DT0;
434 d.d = DT1;
435
436 #define PMUL(r) \
437 tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
438 if ((tmp & 0xff) > 0x7f) \
439 tmp += 0x100; \
440 d.VIS_W64(r) = tmp >> 8;
441
442 PMUL(0);
443 PMUL(1);
444 PMUL(2);
445 PMUL(3);
446 #undef PMUL
447
448 DT0 = d.d;
449 }
450
451 void helper_fmul8x16au(void)
452 {
453 vis64 s, d;
454 uint32_t tmp;
455
456 s.d = DT0;
457 d.d = DT1;
458
459 #define PMUL(r) \
460 tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
461 if ((tmp & 0xff) > 0x7f) \
462 tmp += 0x100; \
463 d.VIS_W64(r) = tmp >> 8;
464
465 PMUL(0);
466 PMUL(1);
467 PMUL(2);
468 PMUL(3);
469 #undef PMUL
470
471 DT0 = d.d;
472 }
473
474 void helper_fmul8sux16(void)
475 {
476 vis64 s, d;
477 uint32_t tmp;
478
479 s.d = DT0;
480 d.d = DT1;
481
482 #define PMUL(r) \
483 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
484 if ((tmp & 0xff) > 0x7f) \
485 tmp += 0x100; \
486 d.VIS_W64(r) = tmp >> 8;
487
488 PMUL(0);
489 PMUL(1);
490 PMUL(2);
491 PMUL(3);
492 #undef PMUL
493
494 DT0 = d.d;
495 }
496
497 void helper_fmul8ulx16(void)
498 {
499 vis64 s, d;
500 uint32_t tmp;
501
502 s.d = DT0;
503 d.d = DT1;
504
505 #define PMUL(r) \
506 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
507 if ((tmp & 0xff) > 0x7f) \
508 tmp += 0x100; \
509 d.VIS_W64(r) = tmp >> 8;
510
511 PMUL(0);
512 PMUL(1);
513 PMUL(2);
514 PMUL(3);
515 #undef PMUL
516
517 DT0 = d.d;
518 }
519
520 void helper_fmuld8sux16(void)
521 {
522 vis64 s, d;
523 uint32_t tmp;
524
525 s.d = DT0;
526 d.d = DT1;
527
528 #define PMUL(r) \
529 tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
530 if ((tmp & 0xff) > 0x7f) \
531 tmp += 0x100; \
532 d.VIS_L64(r) = tmp;
533
534 // Reverse calculation order to handle overlap
535 PMUL(1);
536 PMUL(0);
537 #undef PMUL
538
539 DT0 = d.d;
540 }
541
542 void helper_fmuld8ulx16(void)
543 {
544 vis64 s, d;
545 uint32_t tmp;
546
547 s.d = DT0;
548 d.d = DT1;
549
550 #define PMUL(r) \
551 tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
552 if ((tmp & 0xff) > 0x7f) \
553 tmp += 0x100; \
554 d.VIS_L64(r) = tmp;
555
556 // Reverse calculation order to handle overlap
557 PMUL(1);
558 PMUL(0);
559 #undef PMUL
560
561 DT0 = d.d;
562 }
563
564 void helper_fexpand(void)
565 {
566 vis32 s;
567 vis64 d;
568
569 s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
570 d.d = DT1;
571 d.VIS_L64(0) = s.VIS_W32(0) << 4;
572 d.VIS_L64(1) = s.VIS_W32(1) << 4;
573 d.VIS_L64(2) = s.VIS_W32(2) << 4;
574 d.VIS_L64(3) = s.VIS_W32(3) << 4;
575
576 DT0 = d.d;
577 }
578
579 #define VIS_HELPER(name, F) \
580 void name##16(void) \
581 { \
582 vis64 s, d; \
583 \
584 s.d = DT0; \
585 d.d = DT1; \
586 \
587 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
588 d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
589 d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
590 d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
591 \
592 DT0 = d.d; \
593 } \
594 \
595 void name##16s(void) \
596 { \
597 vis32 s, d; \
598 \
599 s.f = FT0; \
600 d.f = FT1; \
601 \
602 d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
603 d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
604 \
605 FT0 = d.f; \
606 } \
607 \
608 void name##32(void) \
609 { \
610 vis64 s, d; \
611 \
612 s.d = DT0; \
613 d.d = DT1; \
614 \
615 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
616 d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
617 \
618 DT0 = d.d; \
619 } \
620 \
621 void name##32s(void) \
622 { \
623 vis32 s, d; \
624 \
625 s.f = FT0; \
626 d.f = FT1; \
627 \
628 d.l = F(d.l, s.l); \
629 \
630 FT0 = d.f; \
631 }
632
633 #define FADD(a, b) ((a) + (b))
634 #define FSUB(a, b) ((a) - (b))
635 VIS_HELPER(helper_fpadd, FADD)
636 VIS_HELPER(helper_fpsub, FSUB)
637
638 #define VIS_CMPHELPER(name, F) \
639 void name##16(void) \
640 { \
641 vis64 s, d; \
642 \
643 s.d = DT0; \
644 d.d = DT1; \
645 \
646 d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
647 d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
648 d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
649 d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
650 \
651 DT0 = d.d; \
652 } \
653 \
654 void name##32(void) \
655 { \
656 vis64 s, d; \
657 \
658 s.d = DT0; \
659 d.d = DT1; \
660 \
661 d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
662 d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
663 \
664 DT0 = d.d; \
665 }
666
667 #define FCMPGT(a, b) ((a) > (b))
668 #define FCMPEQ(a, b) ((a) == (b))
669 #define FCMPLE(a, b) ((a) <= (b))
670 #define FCMPNE(a, b) ((a) != (b))
671
672 VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
673 VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
674 VIS_CMPHELPER(helper_fcmple, FCMPLE)
675 VIS_CMPHELPER(helper_fcmpne, FCMPNE)
676 #endif
677
678 void helper_check_ieee_exceptions(void)
679 {
680 target_ulong status;
681
682 status = get_float_exception_flags(&env->fp_status);
683 if (status) {
684 /* Copy IEEE 754 flags into FSR */
685 if (status & float_flag_invalid)
686 env->fsr |= FSR_NVC;
687 if (status & float_flag_overflow)
688 env->fsr |= FSR_OFC;
689 if (status & float_flag_underflow)
690 env->fsr |= FSR_UFC;
691 if (status & float_flag_divbyzero)
692 env->fsr |= FSR_DZC;
693 if (status & float_flag_inexact)
694 env->fsr |= FSR_NXC;
695
696 if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
697 /* Unmasked exception, generate a trap */
698 env->fsr |= FSR_FTT_IEEE_EXCP;
699 raise_exception(TT_FP_EXCP);
700 } else {
701 /* Accumulate exceptions */
702 env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
703 }
704 }
705 }
706
707 void helper_clear_float_exceptions(void)
708 {
709 set_float_exception_flags(0, &env->fp_status);
710 }
711
712 void helper_fabss(void)
713 {
714 FT0 = float32_abs(FT1);
715 }
716
717 #ifdef TARGET_SPARC64
718 void helper_fabsd(void)
719 {
720 DT0 = float64_abs(DT1);
721 }
722
723 void helper_fabsq(void)
724 {
725 QT0 = float128_abs(QT1);
726 }
727 #endif
728
729 void helper_fsqrts(void)
730 {
731 FT0 = float32_sqrt(FT1, &env->fp_status);
732 }
733
734 void helper_fsqrtd(void)
735 {
736 DT0 = float64_sqrt(DT1, &env->fp_status);
737 }
738
739 void helper_fsqrtq(void)
740 {
741 QT0 = float128_sqrt(QT1, &env->fp_status);
742 }
743
744 #define GEN_FCMP(name, size, reg1, reg2, FS, TRAP) \
745 void glue(helper_, name) (void) \
746 { \
747 target_ulong new_fsr; \
748 \
749 env->fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
750 switch (glue(size, _compare) (reg1, reg2, &env->fp_status)) { \
751 case float_relation_unordered: \
752 new_fsr = (FSR_FCC1 | FSR_FCC0) << FS; \
753 if ((env->fsr & FSR_NVM) || TRAP) { \
754 env->fsr |= new_fsr; \
755 env->fsr |= FSR_NVC; \
756 env->fsr |= FSR_FTT_IEEE_EXCP; \
757 raise_exception(TT_FP_EXCP); \
758 } else { \
759 env->fsr |= FSR_NVA; \
760 } \
761 break; \
762 case float_relation_less: \
763 new_fsr = FSR_FCC0 << FS; \
764 break; \
765 case float_relation_greater: \
766 new_fsr = FSR_FCC1 << FS; \
767 break; \
768 default: \
769 new_fsr = 0; \
770 break; \
771 } \
772 env->fsr |= new_fsr; \
773 }
774
775 GEN_FCMP(fcmps, float32, FT0, FT1, 0, 0);
776 GEN_FCMP(fcmpd, float64, DT0, DT1, 0, 0);
777
778 GEN_FCMP(fcmpes, float32, FT0, FT1, 0, 1);
779 GEN_FCMP(fcmped, float64, DT0, DT1, 0, 1);
780
781 GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
782 GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
783
784 #ifdef TARGET_SPARC64
785 GEN_FCMP(fcmps_fcc1, float32, FT0, FT1, 22, 0);
786 GEN_FCMP(fcmpd_fcc1, float64, DT0, DT1, 22, 0);
787 GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
788
789 GEN_FCMP(fcmps_fcc2, float32, FT0, FT1, 24, 0);
790 GEN_FCMP(fcmpd_fcc2, float64, DT0, DT1, 24, 0);
791 GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
792
793 GEN_FCMP(fcmps_fcc3, float32, FT0, FT1, 26, 0);
794 GEN_FCMP(fcmpd_fcc3, float64, DT0, DT1, 26, 0);
795 GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
796
797 GEN_FCMP(fcmpes_fcc1, float32, FT0, FT1, 22, 1);
798 GEN_FCMP(fcmped_fcc1, float64, DT0, DT1, 22, 1);
799 GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
800
801 GEN_FCMP(fcmpes_fcc2, float32, FT0, FT1, 24, 1);
802 GEN_FCMP(fcmped_fcc2, float64, DT0, DT1, 24, 1);
803 GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
804
805 GEN_FCMP(fcmpes_fcc3, float32, FT0, FT1, 26, 1);
806 GEN_FCMP(fcmped_fcc3, float64, DT0, DT1, 26, 1);
807 GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
808 #endif
809
810 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
811 defined(DEBUG_MXCC)
812 static void dump_mxcc(CPUState *env)
813 {
814 printf("mxccdata: %016llx %016llx %016llx %016llx\n",
815 env->mxccdata[0], env->mxccdata[1],
816 env->mxccdata[2], env->mxccdata[3]);
817 printf("mxccregs: %016llx %016llx %016llx %016llx\n"
818 " %016llx %016llx %016llx %016llx\n",
819 env->mxccregs[0], env->mxccregs[1],
820 env->mxccregs[2], env->mxccregs[3],
821 env->mxccregs[4], env->mxccregs[5],
822 env->mxccregs[6], env->mxccregs[7]);
823 }
824 #endif
825
826 #if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
827 && defined(DEBUG_ASI)
828 static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
829 uint64_t r1)
830 {
831 switch (size)
832 {
833 case 1:
834 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
835 addr, asi, r1 & 0xff);
836 break;
837 case 2:
838 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
839 addr, asi, r1 & 0xffff);
840 break;
841 case 4:
842 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
843 addr, asi, r1 & 0xffffffff);
844 break;
845 case 8:
846 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
847 addr, asi, r1);
848 break;
849 }
850 }
851 #endif
852
853 #ifndef TARGET_SPARC64
854 #ifndef CONFIG_USER_ONLY
855 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
856 {
857 uint64_t ret = 0;
858 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
859 uint32_t last_addr = addr;
860 #endif
861
862 helper_check_align(addr, size - 1);
863 switch (asi) {
864 case 2: /* SuperSparc MXCC registers */
865 switch (addr) {
866 case 0x01c00a00: /* MXCC control register */
867 if (size == 8)
868 ret = env->mxccregs[3];
869 else
870 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
871 size);
872 break;
873 case 0x01c00a04: /* MXCC control register */
874 if (size == 4)
875 ret = env->mxccregs[3];
876 else
877 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
878 size);
879 break;
880 case 0x01c00c00: /* Module reset register */
881 if (size == 8) {
882 ret = env->mxccregs[5];
883 // should we do something here?
884 } else
885 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
886 size);
887 break;
888 case 0x01c00f00: /* MBus port address register */
889 if (size == 8)
890 ret = env->mxccregs[7];
891 else
892 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
893 size);
894 break;
895 default:
896 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
897 size);
898 break;
899 }
900 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
901 "addr = %08x -> ret = %08x,"
902 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
903 #ifdef DEBUG_MXCC
904 dump_mxcc(env);
905 #endif
906 break;
907 case 3: /* MMU probe */
908 {
909 int mmulev;
910
911 mmulev = (addr >> 8) & 15;
912 if (mmulev > 4)
913 ret = 0;
914 else
915 ret = mmu_probe(env, addr, mmulev);
916 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
917 addr, mmulev, ret);
918 }
919 break;
920 case 4: /* read MMU regs */
921 {
922 int reg = (addr >> 8) & 0x1f;
923
924 ret = env->mmuregs[reg];
925 if (reg == 3) /* Fault status cleared on read */
926 env->mmuregs[3] = 0;
927 else if (reg == 0x13) /* Fault status read */
928 ret = env->mmuregs[3];
929 else if (reg == 0x14) /* Fault address read */
930 ret = env->mmuregs[4];
931 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
932 }
933 break;
934 case 5: // Turbosparc ITLB Diagnostic
935 case 6: // Turbosparc DTLB Diagnostic
936 case 7: // Turbosparc IOTLB Diagnostic
937 break;
938 case 9: /* Supervisor code access */
939 switch(size) {
940 case 1:
941 ret = ldub_code(addr);
942 break;
943 case 2:
944 ret = lduw_code(addr);
945 break;
946 default:
947 case 4:
948 ret = ldl_code(addr);
949 break;
950 case 8:
951 ret = ldq_code(addr);
952 break;
953 }
954 break;
955 case 0xa: /* User data access */
956 switch(size) {
957 case 1:
958 ret = ldub_user(addr);
959 break;
960 case 2:
961 ret = lduw_user(addr);
962 break;
963 default:
964 case 4:
965 ret = ldl_user(addr);
966 break;
967 case 8:
968 ret = ldq_user(addr);
969 break;
970 }
971 break;
972 case 0xb: /* Supervisor data access */
973 switch(size) {
974 case 1:
975 ret = ldub_kernel(addr);
976 break;
977 case 2:
978 ret = lduw_kernel(addr);
979 break;
980 default:
981 case 4:
982 ret = ldl_kernel(addr);
983 break;
984 case 8:
985 ret = ldq_kernel(addr);
986 break;
987 }
988 break;
989 case 0xc: /* I-cache tag */
990 case 0xd: /* I-cache data */
991 case 0xe: /* D-cache tag */
992 case 0xf: /* D-cache data */
993 break;
994 case 0x20: /* MMU passthrough */
995 switch(size) {
996 case 1:
997 ret = ldub_phys(addr);
998 break;
999 case 2:
1000 ret = lduw_phys(addr);
1001 break;
1002 default:
1003 case 4:
1004 ret = ldl_phys(addr);
1005 break;
1006 case 8:
1007 ret = ldq_phys(addr);
1008 break;
1009 }
1010 break;
1011 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1012 switch(size) {
1013 case 1:
1014 ret = ldub_phys((target_phys_addr_t)addr
1015 | ((target_phys_addr_t)(asi & 0xf) << 32));
1016 break;
1017 case 2:
1018 ret = lduw_phys((target_phys_addr_t)addr
1019 | ((target_phys_addr_t)(asi & 0xf) << 32));
1020 break;
1021 default:
1022 case 4:
1023 ret = ldl_phys((target_phys_addr_t)addr
1024 | ((target_phys_addr_t)(asi & 0xf) << 32));
1025 break;
1026 case 8:
1027 ret = ldq_phys((target_phys_addr_t)addr
1028 | ((target_phys_addr_t)(asi & 0xf) << 32));
1029 break;
1030 }
1031 break;
1032 case 0x30: // Turbosparc secondary cache diagnostic
1033 case 0x31: // Turbosparc RAM snoop
1034 case 0x32: // Turbosparc page table descriptor diagnostic
1035 case 0x39: /* data cache diagnostic register */
1036 ret = 0;
1037 break;
1038 case 8: /* User code access, XXX */
1039 default:
1040 do_unassigned_access(addr, 0, 0, asi);
1041 ret = 0;
1042 break;
1043 }
1044 if (sign) {
1045 switch(size) {
1046 case 1:
1047 ret = (int8_t) ret;
1048 break;
1049 case 2:
1050 ret = (int16_t) ret;
1051 break;
1052 case 4:
1053 ret = (int32_t) ret;
1054 break;
1055 default:
1056 break;
1057 }
1058 }
1059 #ifdef DEBUG_ASI
1060 dump_asi("read ", last_addr, asi, size, ret);
1061 #endif
1062 return ret;
1063 }
1064
1065 void helper_st_asi(target_ulong addr, uint64_t val, int asi, int size)
1066 {
1067 helper_check_align(addr, size - 1);
1068 switch(asi) {
1069 case 2: /* SuperSparc MXCC registers */
1070 switch (addr) {
1071 case 0x01c00000: /* MXCC stream data register 0 */
1072 if (size == 8)
1073 env->mxccdata[0] = val;
1074 else
1075 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1076 size);
1077 break;
1078 case 0x01c00008: /* MXCC stream data register 1 */
1079 if (size == 8)
1080 env->mxccdata[1] = val;
1081 else
1082 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1083 size);
1084 break;
1085 case 0x01c00010: /* MXCC stream data register 2 */
1086 if (size == 8)
1087 env->mxccdata[2] = val;
1088 else
1089 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1090 size);
1091 break;
1092 case 0x01c00018: /* MXCC stream data register 3 */
1093 if (size == 8)
1094 env->mxccdata[3] = val;
1095 else
1096 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1097 size);
1098 break;
1099 case 0x01c00100: /* MXCC stream source */
1100 if (size == 8)
1101 env->mxccregs[0] = val;
1102 else
1103 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1104 size);
1105 env->mxccdata[0] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1106 0);
1107 env->mxccdata[1] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1108 8);
1109 env->mxccdata[2] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1110 16);
1111 env->mxccdata[3] = ldq_phys((env->mxccregs[0] & 0xffffffffULL) +
1112 24);
1113 break;
1114 case 0x01c00200: /* MXCC stream destination */
1115 if (size == 8)
1116 env->mxccregs[1] = val;
1117 else
1118 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1119 size);
1120 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 0,
1121 env->mxccdata[0]);
1122 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 8,
1123 env->mxccdata[1]);
1124 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 16,
1125 env->mxccdata[2]);
1126 stq_phys((env->mxccregs[1] & 0xffffffffULL) + 24,
1127 env->mxccdata[3]);
1128 break;
1129 case 0x01c00a00: /* MXCC control register */
1130 if (size == 8)
1131 env->mxccregs[3] = val;
1132 else
1133 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1134 size);
1135 break;
1136 case 0x01c00a04: /* MXCC control register */
1137 if (size == 4)
1138 env->mxccregs[3] = (env->mxccregs[0xa] & 0xffffffff00000000ULL)
1139 | val;
1140 else
1141 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1142 size);
1143 break;
1144 case 0x01c00e00: /* MXCC error register */
1145 // writing a 1 bit clears the error
1146 if (size == 8)
1147 env->mxccregs[6] &= ~val;
1148 else
1149 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1150 size);
1151 break;
1152 case 0x01c00f00: /* MBus port address register */
1153 if (size == 8)
1154 env->mxccregs[7] = val;
1155 else
1156 DPRINTF_MXCC("%08x: unimplemented access size: %d\n", addr,
1157 size);
1158 break;
1159 default:
1160 DPRINTF_MXCC("%08x: unimplemented address, size: %d\n", addr,
1161 size);
1162 break;
1163 }
1164 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %08x\n", asi,
1165 size, addr, val);
1166 #ifdef DEBUG_MXCC
1167 dump_mxcc(env);
1168 #endif
1169 break;
1170 case 3: /* MMU flush */
1171 {
1172 int mmulev;
1173
1174 mmulev = (addr >> 8) & 15;
1175 DPRINTF_MMU("mmu flush level %d\n", mmulev);
1176 switch (mmulev) {
1177 case 0: // flush page
1178 tlb_flush_page(env, addr & 0xfffff000);
1179 break;
1180 case 1: // flush segment (256k)
1181 case 2: // flush region (16M)
1182 case 3: // flush context (4G)
1183 case 4: // flush entire
1184 tlb_flush(env, 1);
1185 break;
1186 default:
1187 break;
1188 }
1189 #ifdef DEBUG_MMU
1190 dump_mmu(env);
1191 #endif
1192 }
1193 break;
1194 case 4: /* write MMU regs */
1195 {
1196 int reg = (addr >> 8) & 0x1f;
1197 uint32_t oldreg;
1198
1199 oldreg = env->mmuregs[reg];
1200 switch(reg) {
1201 case 0: // Control Register
1202 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
1203 (val & 0x00ffffff);
1204 // Mappings generated during no-fault mode or MMU
1205 // disabled mode are invalid in normal mode
1206 if ((oldreg & (MMU_E | MMU_NF | env->def->mmu_bm)) !=
1207 (env->mmuregs[reg] & (MMU_E | MMU_NF | env->def->mmu_bm)))
1208 tlb_flush(env, 1);
1209 break;
1210 case 1: // Context Table Pointer Register
1211 env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
1212 break;
1213 case 2: // Context Register
1214 env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
1215 if (oldreg != env->mmuregs[reg]) {
1216 /* we flush when the MMU context changes because
1217 QEMU has no MMU context support */
1218 tlb_flush(env, 1);
1219 }
1220 break;
1221 case 3: // Synchronous Fault Status Register with Clear
1222 case 4: // Synchronous Fault Address Register
1223 break;
1224 case 0x10: // TLB Replacement Control Register
1225 env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
1226 break;
1227 case 0x13: // Synchronous Fault Status Register with Read and Clear
1228 env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
1229 break;
1230 case 0x14: // Synchronous Fault Address Register
1231 env->mmuregs[4] = val;
1232 break;
1233 default:
1234 env->mmuregs[reg] = val;
1235 break;
1236 }
1237 if (oldreg != env->mmuregs[reg]) {
1238 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
1239 reg, oldreg, env->mmuregs[reg]);
1240 }
1241 #ifdef DEBUG_MMU
1242 dump_mmu(env);
1243 #endif
1244 }
1245 break;
1246 case 5: // Turbosparc ITLB Diagnostic
1247 case 6: // Turbosparc DTLB Diagnostic
1248 case 7: // Turbosparc IOTLB Diagnostic
1249 break;
1250 case 0xa: /* User data access */
1251 switch(size) {
1252 case 1:
1253 stb_user(addr, val);
1254 break;
1255 case 2:
1256 stw_user(addr, val);
1257 break;
1258 default:
1259 case 4:
1260 stl_user(addr, val);
1261 break;
1262 case 8:
1263 stq_user(addr, val);
1264 break;
1265 }
1266 break;
1267 case 0xb: /* Supervisor data access */
1268 switch(size) {
1269 case 1:
1270 stb_kernel(addr, val);
1271 break;
1272 case 2:
1273 stw_kernel(addr, val);
1274 break;
1275 default:
1276 case 4:
1277 stl_kernel(addr, val);
1278 break;
1279 case 8:
1280 stq_kernel(addr, val);
1281 break;
1282 }
1283 break;
1284 case 0xc: /* I-cache tag */
1285 case 0xd: /* I-cache data */
1286 case 0xe: /* D-cache tag */
1287 case 0xf: /* D-cache data */
1288 case 0x10: /* I/D-cache flush page */
1289 case 0x11: /* I/D-cache flush segment */
1290 case 0x12: /* I/D-cache flush region */
1291 case 0x13: /* I/D-cache flush context */
1292 case 0x14: /* I/D-cache flush user */
1293 break;
1294 case 0x17: /* Block copy, sta access */
1295 {
1296 // val = src
1297 // addr = dst
1298 // copy 32 bytes
1299 unsigned int i;
1300 uint32_t src = val & ~3, dst = addr & ~3, temp;
1301
1302 for (i = 0; i < 32; i += 4, src += 4, dst += 4) {
1303 temp = ldl_kernel(src);
1304 stl_kernel(dst, temp);
1305 }
1306 }
1307 break;
1308 case 0x1f: /* Block fill, stda access */
1309 {
1310 // addr = dst
1311 // fill 32 bytes with val
1312 unsigned int i;
1313 uint32_t dst = addr & 7;
1314
1315 for (i = 0; i < 32; i += 8, dst += 8)
1316 stq_kernel(dst, val);
1317 }
1318 break;
1319 case 0x20: /* MMU passthrough */
1320 {
1321 switch(size) {
1322 case 1:
1323 stb_phys(addr, val);
1324 break;
1325 case 2:
1326 stw_phys(addr, val);
1327 break;
1328 case 4:
1329 default:
1330 stl_phys(addr, val);
1331 break;
1332 case 8:
1333 stq_phys(addr, val);
1334 break;
1335 }
1336 }
1337 break;
1338 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
1339 {
1340 switch(size) {
1341 case 1:
1342 stb_phys((target_phys_addr_t)addr
1343 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1344 break;
1345 case 2:
1346 stw_phys((target_phys_addr_t)addr
1347 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1348 break;
1349 case 4:
1350 default:
1351 stl_phys((target_phys_addr_t)addr
1352 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1353 break;
1354 case 8:
1355 stq_phys((target_phys_addr_t)addr
1356 | ((target_phys_addr_t)(asi & 0xf) << 32), val);
1357 break;
1358 }
1359 }
1360 break;
1361 case 0x30: // store buffer tags or Turbosparc secondary cache diagnostic
1362 case 0x31: // store buffer data, Ross RT620 I-cache flush or
1363 // Turbosparc snoop RAM
1364 case 0x32: // store buffer control or Turbosparc page table
1365 // descriptor diagnostic
1366 case 0x36: /* I-cache flash clear */
1367 case 0x37: /* D-cache flash clear */
1368 case 0x38: /* breakpoint diagnostics */
1369 case 0x4c: /* breakpoint action */
1370 break;
1371 case 8: /* User code access, XXX */
1372 case 9: /* Supervisor code access, XXX */
1373 default:
1374 do_unassigned_access(addr, 1, 0, asi);
1375 break;
1376 }
1377 #ifdef DEBUG_ASI
1378 dump_asi("write", addr, asi, size, val);
1379 #endif
1380 }
1381
1382 #endif /* CONFIG_USER_ONLY */
1383 #else /* TARGET_SPARC64 */
1384
1385 #ifdef CONFIG_USER_ONLY
1386 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1387 {
1388 uint64_t ret = 0;
1389 #if defined(DEBUG_ASI)
1390 target_ulong last_addr = addr;
1391 #endif
1392
1393 if (asi < 0x80)
1394 raise_exception(TT_PRIV_ACT);
1395
1396 helper_check_align(addr, size - 1);
1397 address_mask(env, &addr);
1398
1399 switch (asi) {
1400 case 0x80: // Primary
1401 case 0x82: // Primary no-fault
1402 case 0x88: // Primary LE
1403 case 0x8a: // Primary no-fault LE
1404 {
1405 switch(size) {
1406 case 1:
1407 ret = ldub_raw(addr);
1408 break;
1409 case 2:
1410 ret = lduw_raw(addr);
1411 break;
1412 case 4:
1413 ret = ldl_raw(addr);
1414 break;
1415 default:
1416 case 8:
1417 ret = ldq_raw(addr);
1418 break;
1419 }
1420 }
1421 break;
1422 case 0x81: // Secondary
1423 case 0x83: // Secondary no-fault
1424 case 0x89: // Secondary LE
1425 case 0x8b: // Secondary no-fault LE
1426 // XXX
1427 break;
1428 default:
1429 break;
1430 }
1431
1432 /* Convert from little endian */
1433 switch (asi) {
1434 case 0x88: // Primary LE
1435 case 0x89: // Secondary LE
1436 case 0x8a: // Primary no-fault LE
1437 case 0x8b: // Secondary no-fault LE
1438 switch(size) {
1439 case 2:
1440 ret = bswap16(ret);
1441 break;
1442 case 4:
1443 ret = bswap32(ret);
1444 break;
1445 case 8:
1446 ret = bswap64(ret);
1447 break;
1448 default:
1449 break;
1450 }
1451 default:
1452 break;
1453 }
1454
1455 /* Convert to signed number */
1456 if (sign) {
1457 switch(size) {
1458 case 1:
1459 ret = (int8_t) ret;
1460 break;
1461 case 2:
1462 ret = (int16_t) ret;
1463 break;
1464 case 4:
1465 ret = (int32_t) ret;
1466 break;
1467 default:
1468 break;
1469 }
1470 }
1471 #ifdef DEBUG_ASI
1472 dump_asi("read ", last_addr, asi, size, ret);
1473 #endif
1474 return ret;
1475 }
1476
1477 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1478 {
1479 #ifdef DEBUG_ASI
1480 dump_asi("write", addr, asi, size, val);
1481 #endif
1482 if (asi < 0x80)
1483 raise_exception(TT_PRIV_ACT);
1484
1485 helper_check_align(addr, size - 1);
1486 address_mask(env, &addr);
1487
1488 /* Convert to little endian */
1489 switch (asi) {
1490 case 0x88: // Primary LE
1491 case 0x89: // Secondary LE
1492 switch(size) {
1493 case 2:
1494 addr = bswap16(addr);
1495 break;
1496 case 4:
1497 addr = bswap32(addr);
1498 break;
1499 case 8:
1500 addr = bswap64(addr);
1501 break;
1502 default:
1503 break;
1504 }
1505 default:
1506 break;
1507 }
1508
1509 switch(asi) {
1510 case 0x80: // Primary
1511 case 0x88: // Primary LE
1512 {
1513 switch(size) {
1514 case 1:
1515 stb_raw(addr, val);
1516 break;
1517 case 2:
1518 stw_raw(addr, val);
1519 break;
1520 case 4:
1521 stl_raw(addr, val);
1522 break;
1523 case 8:
1524 default:
1525 stq_raw(addr, val);
1526 break;
1527 }
1528 }
1529 break;
1530 case 0x81: // Secondary
1531 case 0x89: // Secondary LE
1532 // XXX
1533 return;
1534
1535 case 0x82: // Primary no-fault, RO
1536 case 0x83: // Secondary no-fault, RO
1537 case 0x8a: // Primary no-fault LE, RO
1538 case 0x8b: // Secondary no-fault LE, RO
1539 default:
1540 do_unassigned_access(addr, 1, 0, 1);
1541 return;
1542 }
1543 }
1544
1545 #else /* CONFIG_USER_ONLY */
1546
1547 uint64_t helper_ld_asi(target_ulong addr, int asi, int size, int sign)
1548 {
1549 uint64_t ret = 0;
1550 #if defined(DEBUG_ASI)
1551 target_ulong last_addr = addr;
1552 #endif
1553
1554 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1555 || ((env->def->features & CPU_FEATURE_HYPV)
1556 && asi >= 0x30 && asi < 0x80
1557 && !(env->hpstate & HS_PRIV)))
1558 raise_exception(TT_PRIV_ACT);
1559
1560 helper_check_align(addr, size - 1);
1561 switch (asi) {
1562 case 0x10: // As if user primary
1563 case 0x18: // As if user primary LE
1564 case 0x80: // Primary
1565 case 0x82: // Primary no-fault
1566 case 0x88: // Primary LE
1567 case 0x8a: // Primary no-fault LE
1568 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1569 if ((env->def->features & CPU_FEATURE_HYPV)
1570 && env->hpstate & HS_PRIV) {
1571 switch(size) {
1572 case 1:
1573 ret = ldub_hypv(addr);
1574 break;
1575 case 2:
1576 ret = lduw_hypv(addr);
1577 break;
1578 case 4:
1579 ret = ldl_hypv(addr);
1580 break;
1581 default:
1582 case 8:
1583 ret = ldq_hypv(addr);
1584 break;
1585 }
1586 } else {
1587 switch(size) {
1588 case 1:
1589 ret = ldub_kernel(addr);
1590 break;
1591 case 2:
1592 ret = lduw_kernel(addr);
1593 break;
1594 case 4:
1595 ret = ldl_kernel(addr);
1596 break;
1597 default:
1598 case 8:
1599 ret = ldq_kernel(addr);
1600 break;
1601 }
1602 }
1603 } else {
1604 switch(size) {
1605 case 1:
1606 ret = ldub_user(addr);
1607 break;
1608 case 2:
1609 ret = lduw_user(addr);
1610 break;
1611 case 4:
1612 ret = ldl_user(addr);
1613 break;
1614 default:
1615 case 8:
1616 ret = ldq_user(addr);
1617 break;
1618 }
1619 }
1620 break;
1621 case 0x14: // Bypass
1622 case 0x15: // Bypass, non-cacheable
1623 case 0x1c: // Bypass LE
1624 case 0x1d: // Bypass, non-cacheable LE
1625 {
1626 switch(size) {
1627 case 1:
1628 ret = ldub_phys(addr);
1629 break;
1630 case 2:
1631 ret = lduw_phys(addr);
1632 break;
1633 case 4:
1634 ret = ldl_phys(addr);
1635 break;
1636 default:
1637 case 8:
1638 ret = ldq_phys(addr);
1639 break;
1640 }
1641 break;
1642 }
1643 case 0x24: // Nucleus quad LDD 128 bit atomic
1644 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1645 // Only ldda allowed
1646 raise_exception(TT_ILL_INSN);
1647 return 0;
1648 case 0x04: // Nucleus
1649 case 0x0c: // Nucleus Little Endian (LE)
1650 case 0x11: // As if user secondary
1651 case 0x19: // As if user secondary LE
1652 case 0x4a: // UPA config
1653 case 0x81: // Secondary
1654 case 0x83: // Secondary no-fault
1655 case 0x89: // Secondary LE
1656 case 0x8b: // Secondary no-fault LE
1657 // XXX
1658 break;
1659 case 0x45: // LSU
1660 ret = env->lsu;
1661 break;
1662 case 0x50: // I-MMU regs
1663 {
1664 int reg = (addr >> 3) & 0xf;
1665
1666 ret = env->immuregs[reg];
1667 break;
1668 }
1669 case 0x51: // I-MMU 8k TSB pointer
1670 case 0x52: // I-MMU 64k TSB pointer
1671 // XXX
1672 break;
1673 case 0x55: // I-MMU data access
1674 {
1675 int reg = (addr >> 3) & 0x3f;
1676
1677 ret = env->itlb_tte[reg];
1678 break;
1679 }
1680 case 0x56: // I-MMU tag read
1681 {
1682 int reg = (addr >> 3) & 0x3f;
1683
1684 ret = env->itlb_tag[reg];
1685 break;
1686 }
1687 case 0x58: // D-MMU regs
1688 {
1689 int reg = (addr >> 3) & 0xf;
1690
1691 ret = env->dmmuregs[reg];
1692 break;
1693 }
1694 case 0x5d: // D-MMU data access
1695 {
1696 int reg = (addr >> 3) & 0x3f;
1697
1698 ret = env->dtlb_tte[reg];
1699 break;
1700 }
1701 case 0x5e: // D-MMU tag read
1702 {
1703 int reg = (addr >> 3) & 0x3f;
1704
1705 ret = env->dtlb_tag[reg];
1706 break;
1707 }
1708 case 0x46: // D-cache data
1709 case 0x47: // D-cache tag access
1710 case 0x4b: // E-cache error enable
1711 case 0x4c: // E-cache asynchronous fault status
1712 case 0x4d: // E-cache asynchronous fault address
1713 case 0x4e: // E-cache tag data
1714 case 0x66: // I-cache instruction access
1715 case 0x67: // I-cache tag access
1716 case 0x6e: // I-cache predecode
1717 case 0x6f: // I-cache LRU etc.
1718 case 0x76: // E-cache tag
1719 case 0x7e: // E-cache tag
1720 break;
1721 case 0x59: // D-MMU 8k TSB pointer
1722 case 0x5a: // D-MMU 64k TSB pointer
1723 case 0x5b: // D-MMU data pointer
1724 case 0x48: // Interrupt dispatch, RO
1725 case 0x49: // Interrupt data receive
1726 case 0x7f: // Incoming interrupt vector, RO
1727 // XXX
1728 break;
1729 case 0x54: // I-MMU data in, WO
1730 case 0x57: // I-MMU demap, WO
1731 case 0x5c: // D-MMU data in, WO
1732 case 0x5f: // D-MMU demap, WO
1733 case 0x77: // Interrupt vector, WO
1734 default:
1735 do_unassigned_access(addr, 0, 0, 1);
1736 ret = 0;
1737 break;
1738 }
1739
1740 /* Convert from little endian */
1741 switch (asi) {
1742 case 0x0c: // Nucleus Little Endian (LE)
1743 case 0x18: // As if user primary LE
1744 case 0x19: // As if user secondary LE
1745 case 0x1c: // Bypass LE
1746 case 0x1d: // Bypass, non-cacheable LE
1747 case 0x88: // Primary LE
1748 case 0x89: // Secondary LE
1749 case 0x8a: // Primary no-fault LE
1750 case 0x8b: // Secondary no-fault LE
1751 switch(size) {
1752 case 2:
1753 ret = bswap16(ret);
1754 break;
1755 case 4:
1756 ret = bswap32(ret);
1757 break;
1758 case 8:
1759 ret = bswap64(ret);
1760 break;
1761 default:
1762 break;
1763 }
1764 default:
1765 break;
1766 }
1767
1768 /* Convert to signed number */
1769 if (sign) {
1770 switch(size) {
1771 case 1:
1772 ret = (int8_t) ret;
1773 break;
1774 case 2:
1775 ret = (int16_t) ret;
1776 break;
1777 case 4:
1778 ret = (int32_t) ret;
1779 break;
1780 default:
1781 break;
1782 }
1783 }
1784 #ifdef DEBUG_ASI
1785 dump_asi("read ", last_addr, asi, size, ret);
1786 #endif
1787 return ret;
1788 }
1789
1790 void helper_st_asi(target_ulong addr, target_ulong val, int asi, int size)
1791 {
1792 #ifdef DEBUG_ASI
1793 dump_asi("write", addr, asi, size, val);
1794 #endif
1795 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
1796 || ((env->def->features & CPU_FEATURE_HYPV)
1797 && asi >= 0x30 && asi < 0x80
1798 && !(env->hpstate & HS_PRIV)))
1799 raise_exception(TT_PRIV_ACT);
1800
1801 helper_check_align(addr, size - 1);
1802 /* Convert to little endian */
1803 switch (asi) {
1804 case 0x0c: // Nucleus Little Endian (LE)
1805 case 0x18: // As if user primary LE
1806 case 0x19: // As if user secondary LE
1807 case 0x1c: // Bypass LE
1808 case 0x1d: // Bypass, non-cacheable LE
1809 case 0x88: // Primary LE
1810 case 0x89: // Secondary LE
1811 switch(size) {
1812 case 2:
1813 addr = bswap16(addr);
1814 break;
1815 case 4:
1816 addr = bswap32(addr);
1817 break;
1818 case 8:
1819 addr = bswap64(addr);
1820 break;
1821 default:
1822 break;
1823 }
1824 default:
1825 break;
1826 }
1827
1828 switch(asi) {
1829 case 0x10: // As if user primary
1830 case 0x18: // As if user primary LE
1831 case 0x80: // Primary
1832 case 0x88: // Primary LE
1833 if ((asi & 0x80) && (env->pstate & PS_PRIV)) {
1834 if ((env->def->features & CPU_FEATURE_HYPV)
1835 && env->hpstate & HS_PRIV) {
1836 switch(size) {
1837 case 1:
1838 stb_hypv(addr, val);
1839 break;
1840 case 2:
1841 stw_hypv(addr, val);
1842 break;
1843 case 4:
1844 stl_hypv(addr, val);
1845 break;
1846 case 8:
1847 default:
1848 stq_hypv(addr, val);
1849 break;
1850 }
1851 } else {
1852 switch(size) {
1853 case 1:
1854 stb_kernel(addr, val);
1855 break;
1856 case 2:
1857 stw_kernel(addr, val);
1858 break;
1859 case 4:
1860 stl_kernel(addr, val);
1861 break;
1862 case 8:
1863 default:
1864 stq_kernel(addr, val);
1865 break;
1866 }
1867 }
1868 } else {
1869 switch(size) {
1870 case 1:
1871 stb_user(addr, val);
1872 break;
1873 case 2:
1874 stw_user(addr, val);
1875 break;
1876 case 4:
1877 stl_user(addr, val);
1878 break;
1879 case 8:
1880 default:
1881 stq_user(addr, val);
1882 break;
1883 }
1884 }
1885 break;
1886 case 0x14: // Bypass
1887 case 0x15: // Bypass, non-cacheable
1888 case 0x1c: // Bypass LE
1889 case 0x1d: // Bypass, non-cacheable LE
1890 {
1891 switch(size) {
1892 case 1:
1893 stb_phys(addr, val);
1894 break;
1895 case 2:
1896 stw_phys(addr, val);
1897 break;
1898 case 4:
1899 stl_phys(addr, val);
1900 break;
1901 case 8:
1902 default:
1903 stq_phys(addr, val);
1904 break;
1905 }
1906 }
1907 return;
1908 case 0x24: // Nucleus quad LDD 128 bit atomic
1909 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
1910 // Only ldda allowed
1911 raise_exception(TT_ILL_INSN);
1912 return;
1913 case 0x04: // Nucleus
1914 case 0x0c: // Nucleus Little Endian (LE)
1915 case 0x11: // As if user secondary
1916 case 0x19: // As if user secondary LE
1917 case 0x4a: // UPA config
1918 case 0x81: // Secondary
1919 case 0x89: // Secondary LE
1920 // XXX
1921 return;
1922 case 0x45: // LSU
1923 {
1924 uint64_t oldreg;
1925
1926 oldreg = env->lsu;
1927 env->lsu = val & (DMMU_E | IMMU_E);
1928 // Mappings generated during D/I MMU disabled mode are
1929 // invalid in normal mode
1930 if (oldreg != env->lsu) {
1931 DPRINTF_MMU("LSU change: 0x%" PRIx64 " -> 0x%" PRIx64 "\n",
1932 oldreg, env->lsu);
1933 #ifdef DEBUG_MMU
1934 dump_mmu(env);
1935 #endif
1936 tlb_flush(env, 1);
1937 }
1938 return;
1939 }
1940 case 0x50: // I-MMU regs
1941 {
1942 int reg = (addr >> 3) & 0xf;
1943 uint64_t oldreg;
1944
1945 oldreg = env->immuregs[reg];
1946 switch(reg) {
1947 case 0: // RO
1948 case 4:
1949 return;
1950 case 1: // Not in I-MMU
1951 case 2:
1952 case 7:
1953 case 8:
1954 return;
1955 case 3: // SFSR
1956 if ((val & 1) == 0)
1957 val = 0; // Clear SFSR
1958 break;
1959 case 5: // TSB access
1960 case 6: // Tag access
1961 default:
1962 break;
1963 }
1964 env->immuregs[reg] = val;
1965 if (oldreg != env->immuregs[reg]) {
1966 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
1967 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1968 }
1969 #ifdef DEBUG_MMU
1970 dump_mmu(env);
1971 #endif
1972 return;
1973 }
1974 case 0x54: // I-MMU data in
1975 {
1976 unsigned int i;
1977
1978 // Try finding an invalid entry
1979 for (i = 0; i < 64; i++) {
1980 if ((env->itlb_tte[i] & 0x8000000000000000ULL) == 0) {
1981 env->itlb_tag[i] = env->immuregs[6];
1982 env->itlb_tte[i] = val;
1983 return;
1984 }
1985 }
1986 // Try finding an unlocked entry
1987 for (i = 0; i < 64; i++) {
1988 if ((env->itlb_tte[i] & 0x40) == 0) {
1989 env->itlb_tag[i] = env->immuregs[6];
1990 env->itlb_tte[i] = val;
1991 return;
1992 }
1993 }
1994 // error state?
1995 return;
1996 }
1997 case 0x55: // I-MMU data access
1998 {
1999 unsigned int i = (addr >> 3) & 0x3f;
2000
2001 env->itlb_tag[i] = env->immuregs[6];
2002 env->itlb_tte[i] = val;
2003 return;
2004 }
2005 case 0x57: // I-MMU demap
2006 // XXX
2007 return;
2008 case 0x58: // D-MMU regs
2009 {
2010 int reg = (addr >> 3) & 0xf;
2011 uint64_t oldreg;
2012
2013 oldreg = env->dmmuregs[reg];
2014 switch(reg) {
2015 case 0: // RO
2016 case 4:
2017 return;
2018 case 3: // SFSR
2019 if ((val & 1) == 0) {
2020 val = 0; // Clear SFSR, Fault address
2021 env->dmmuregs[4] = 0;
2022 }
2023 env->dmmuregs[reg] = val;
2024 break;
2025 case 1: // Primary context
2026 case 2: // Secondary context
2027 case 5: // TSB access
2028 case 6: // Tag access
2029 case 7: // Virtual Watchpoint
2030 case 8: // Physical Watchpoint
2031 default:
2032 break;
2033 }
2034 env->dmmuregs[reg] = val;
2035 if (oldreg != env->dmmuregs[reg]) {
2036 DPRINTF_MMU("mmu change reg[%d]: 0x%08" PRIx64 " -> 0x%08"
2037 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
2038 }
2039 #ifdef DEBUG_MMU
2040 dump_mmu(env);
2041 #endif
2042 return;
2043 }
2044 case 0x5c: // D-MMU data in
2045 {
2046 unsigned int i;
2047
2048 // Try finding an invalid entry
2049 for (i = 0; i < 64; i++) {
2050 if ((env->dtlb_tte[i] & 0x8000000000000000ULL) == 0) {
2051 env->dtlb_tag[i] = env->dmmuregs[6];
2052 env->dtlb_tte[i] = val;
2053 return;
2054 }
2055 }
2056 // Try finding an unlocked entry
2057 for (i = 0; i < 64; i++) {
2058 if ((env->dtlb_tte[i] & 0x40) == 0) {
2059 env->dtlb_tag[i] = env->dmmuregs[6];
2060 env->dtlb_tte[i] = val;
2061 return;
2062 }
2063 }
2064 // error state?
2065 return;
2066 }
2067 case 0x5d: // D-MMU data access
2068 {
2069 unsigned int i = (addr >> 3) & 0x3f;
2070
2071 env->dtlb_tag[i] = env->dmmuregs[6];
2072 env->dtlb_tte[i] = val;
2073 return;
2074 }
2075 case 0x5f: // D-MMU demap
2076 case 0x49: // Interrupt data receive
2077 // XXX
2078 return;
2079 case 0x46: // D-cache data
2080 case 0x47: // D-cache tag access
2081 case 0x4b: // E-cache error enable
2082 case 0x4c: // E-cache asynchronous fault status
2083 case 0x4d: // E-cache asynchronous fault address
2084 case 0x4e: // E-cache tag data
2085 case 0x66: // I-cache instruction access
2086 case 0x67: // I-cache tag access
2087 case 0x6e: // I-cache predecode
2088 case 0x6f: // I-cache LRU etc.
2089 case 0x76: // E-cache tag
2090 case 0x7e: // E-cache tag
2091 return;
2092 case 0x51: // I-MMU 8k TSB pointer, RO
2093 case 0x52: // I-MMU 64k TSB pointer, RO
2094 case 0x56: // I-MMU tag read, RO
2095 case 0x59: // D-MMU 8k TSB pointer, RO
2096 case 0x5a: // D-MMU 64k TSB pointer, RO
2097 case 0x5b: // D-MMU data pointer, RO
2098 case 0x5e: // D-MMU tag read, RO
2099 case 0x48: // Interrupt dispatch, RO
2100 case 0x7f: // Incoming interrupt vector, RO
2101 case 0x82: // Primary no-fault, RO
2102 case 0x83: // Secondary no-fault, RO
2103 case 0x8a: // Primary no-fault LE, RO
2104 case 0x8b: // Secondary no-fault LE, RO
2105 default:
2106 do_unassigned_access(addr, 1, 0, 1);
2107 return;
2108 }
2109 }
2110 #endif /* CONFIG_USER_ONLY */
2111
2112 void helper_ldda_asi(target_ulong addr, int asi, int rd)
2113 {
2114 if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
2115 || ((env->def->features & CPU_FEATURE_HYPV)
2116 && asi >= 0x30 && asi < 0x80
2117 && !(env->hpstate & HS_PRIV)))
2118 raise_exception(TT_PRIV_ACT);
2119
2120 switch (asi) {
2121 case 0x24: // Nucleus quad LDD 128 bit atomic
2122 case 0x2c: // Nucleus quad LDD 128 bit atomic LE
2123 helper_check_align(addr, 0xf);
2124 if (rd == 0) {
2125 env->gregs[1] = ldq_kernel(addr + 8);
2126 if (asi == 0x2c)
2127 bswap64s(&env->gregs[1]);
2128 } else if (rd < 8) {
2129 env->gregs[rd] = ldq_kernel(addr);
2130 env->gregs[rd + 1] = ldq_kernel(addr + 8);
2131 if (asi == 0x2c) {
2132 bswap64s(&env->gregs[rd]);
2133 bswap64s(&env->gregs[rd + 1]);
2134 }
2135 } else {
2136 env->regwptr[rd] = ldq_kernel(addr);
2137 env->regwptr[rd + 1] = ldq_kernel(addr + 8);
2138 if (asi == 0x2c) {
2139 bswap64s(&env->regwptr[rd]);
2140 bswap64s(&env->regwptr[rd + 1]);
2141 }
2142 }
2143 break;
2144 default:
2145 helper_check_align(addr, 0x3);
2146 if (rd == 0)
2147 env->gregs[1] = helper_ld_asi(addr + 4, asi, 4, 0);
2148 else if (rd < 8) {
2149 env->gregs[rd] = helper_ld_asi(addr, asi, 4, 0);
2150 env->gregs[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2151 } else {
2152 env->regwptr[rd] = helper_ld_asi(addr, asi, 4, 0);
2153 env->regwptr[rd + 1] = helper_ld_asi(addr + 4, asi, 4, 0);
2154 }
2155 break;
2156 }
2157 }
2158
2159 void helper_ldf_asi(target_ulong addr, int asi, int size, int rd)
2160 {
2161 unsigned int i;
2162 target_ulong val;
2163
2164 helper_check_align(addr, 3);
2165 switch (asi) {
2166 case 0xf0: // Block load primary
2167 case 0xf1: // Block load secondary
2168 case 0xf8: // Block load primary LE
2169 case 0xf9: // Block load secondary LE
2170 if (rd & 7) {
2171 raise_exception(TT_ILL_INSN);
2172 return;
2173 }
2174 helper_check_align(addr, 0x3f);
2175 for (i = 0; i < 16; i++) {
2176 *(uint32_t *)&env->fpr[rd++] = helper_ld_asi(addr, asi & 0x8f, 4,
2177 0);
2178 addr += 4;
2179 }
2180
2181 return;
2182 default:
2183 break;
2184 }
2185
2186 val = helper_ld_asi(addr, asi, size, 0);
2187 switch(size) {
2188 default:
2189 case 4:
2190 *((uint32_t *)&FT0) = val;
2191 break;
2192 case 8:
2193 *((int64_t *)&DT0) = val;
2194 break;
2195 case 16:
2196 // XXX
2197 break;
2198 }
2199 }
2200
2201 void helper_stf_asi(target_ulong addr, int asi, int size, int rd)
2202 {
2203 unsigned int i;
2204 target_ulong val = 0;
2205
2206 helper_check_align(addr, 3);
2207 switch (asi) {
2208 case 0xf0: // Block store primary
2209 case 0xf1: // Block store secondary
2210 case 0xf8: // Block store primary LE
2211 case 0xf9: // Block store secondary LE
2212 if (rd & 7) {
2213 raise_exception(TT_ILL_INSN);
2214 return;
2215 }
2216 helper_check_align(addr, 0x3f);
2217 for (i = 0; i < 16; i++) {
2218 val = *(uint32_t *)&env->fpr[rd++];
2219 helper_st_asi(addr, val, asi & 0x8f, 4);
2220 addr += 4;
2221 }
2222
2223 return;
2224 default:
2225 break;
2226 }
2227
2228 switch(size) {
2229 default:
2230 case 4:
2231 val = *((uint32_t *)&FT0);
2232 break;
2233 case 8:
2234 val = *((int64_t *)&DT0);
2235 break;
2236 case 16:
2237 // XXX
2238 break;
2239 }
2240 helper_st_asi(addr, val, asi, size);
2241 }
2242
2243 target_ulong helper_cas_asi(target_ulong addr, target_ulong val1,
2244 target_ulong val2, uint32_t asi)
2245 {
2246 target_ulong ret;
2247
2248 val1 &= 0xffffffffUL;
2249 ret = helper_ld_asi(addr, asi, 4, 0);
2250 ret &= 0xffffffffUL;
2251 if (val1 == ret)
2252 helper_st_asi(addr, val2 & 0xffffffffUL, asi, 4);
2253 return ret;
2254 }
2255
2256 target_ulong helper_casx_asi(target_ulong addr, target_ulong val1,
2257 target_ulong val2, uint32_t asi)
2258 {
2259 target_ulong ret;
2260
2261 ret = helper_ld_asi(addr, asi, 8, 0);
2262 if (val1 == ret)
2263 helper_st_asi(addr, val2, asi, 8);
2264 return ret;
2265 }
2266 #endif /* TARGET_SPARC64 */
2267
2268 #ifndef TARGET_SPARC64
2269 void helper_rett(void)
2270 {
2271 unsigned int cwp;
2272
2273 if (env->psret == 1)
2274 raise_exception(TT_ILL_INSN);
2275
2276 env->psret = 1;
2277 cwp = cpu_cwp_inc(env, env->cwp + 1) ;
2278 if (env->wim & (1 << cwp)) {
2279 raise_exception(TT_WIN_UNF);
2280 }
2281 set_cwp(cwp);
2282 env->psrs = env->psrps;
2283 }
2284 #endif
2285
2286 target_ulong helper_udiv(target_ulong a, target_ulong b)
2287 {
2288 uint64_t x0;
2289 uint32_t x1;
2290
2291 x0 = a | ((uint64_t) (env->y) << 32);
2292 x1 = b;
2293
2294 if (x1 == 0) {
2295 raise_exception(TT_DIV_ZERO);
2296 }
2297
2298 x0 = x0 / x1;
2299 if (x0 > 0xffffffff) {
2300 env->cc_src2 = 1;
2301 return 0xffffffff;
2302 } else {
2303 env->cc_src2 = 0;
2304 return x0;
2305 }
2306 }
2307
2308 target_ulong helper_sdiv(target_ulong a, target_ulong b)
2309 {
2310 int64_t x0;
2311 int32_t x1;
2312
2313 x0 = a | ((int64_t) (env->y) << 32);
2314 x1 = b;
2315
2316 if (x1 == 0) {
2317 raise_exception(TT_DIV_ZERO);
2318 }
2319
2320 x0 = x0 / x1;
2321 if ((int32_t) x0 != x0) {
2322 env->cc_src2 = 1;
2323 return x0 < 0? 0x80000000: 0x7fffffff;
2324 } else {
2325 env->cc_src2 = 0;
2326 return x0;
2327 }
2328 }
2329
2330 uint64_t helper_pack64(target_ulong high, target_ulong low)
2331 {
2332 return ((uint64_t)high << 32) | (uint64_t)(low & 0xffffffff);
2333 }
2334
2335 void helper_stdf(target_ulong addr, int mem_idx)
2336 {
2337 helper_check_align(addr, 7);
2338 #if !defined(CONFIG_USER_ONLY)
2339 switch (mem_idx) {
2340 case 0:
2341 stfq_user(addr, DT0);
2342 break;
2343 case 1:
2344 stfq_kernel(addr, DT0);
2345 break;
2346 #ifdef TARGET_SPARC64
2347 case 2:
2348 stfq_hypv(addr, DT0);
2349 break;
2350 #endif
2351 default:
2352 break;
2353 }
2354 #else
2355 address_mask(env, &addr);
2356 stfq_raw(addr, DT0);
2357 #endif
2358 }
2359
2360 void helper_lddf(target_ulong addr, int mem_idx)
2361 {
2362 helper_check_align(addr, 7);
2363 #if !defined(CONFIG_USER_ONLY)
2364 switch (mem_idx) {
2365 case 0:
2366 DT0 = ldfq_user(addr);
2367 break;
2368 case 1:
2369 DT0 = ldfq_kernel(addr);
2370 break;
2371 #ifdef TARGET_SPARC64
2372 case 2:
2373 DT0 = ldfq_hypv(addr);
2374 break;
2375 #endif
2376 default:
2377 break;
2378 }
2379 #else
2380 address_mask(env, &addr);
2381 DT0 = ldfq_raw(addr);
2382 #endif
2383 }
2384
2385 void helper_ldqf(target_ulong addr, int mem_idx)
2386 {
2387 // XXX add 128 bit load
2388 CPU_QuadU u;
2389
2390 helper_check_align(addr, 7);
2391 #if !defined(CONFIG_USER_ONLY)
2392 switch (mem_idx) {
2393 case 0:
2394 u.ll.upper = ldq_user(addr);
2395 u.ll.lower = ldq_user(addr + 8);
2396 QT0 = u.q;
2397 break;
2398 case 1:
2399 u.ll.upper = ldq_kernel(addr);
2400 u.ll.lower = ldq_kernel(addr + 8);
2401 QT0 = u.q;
2402 break;
2403 #ifdef TARGET_SPARC64
2404 case 2:
2405 u.ll.upper = ldq_hypv(addr);
2406 u.ll.lower = ldq_hypv(addr + 8);
2407 QT0 = u.q;
2408 break;
2409 #endif
2410 default:
2411 break;
2412 }
2413 #else
2414 address_mask(env, &addr);
2415 u.ll.upper = ldq_raw(addr);
2416 u.ll.lower = ldq_raw((addr + 8) & 0xffffffffULL);
2417 QT0 = u.q;
2418 #endif
2419 }
2420
2421 void helper_stqf(target_ulong addr, int mem_idx)
2422 {
2423 // XXX add 128 bit store
2424 CPU_QuadU u;
2425
2426 helper_check_align(addr, 7);
2427 #if !defined(CONFIG_USER_ONLY)
2428 switch (mem_idx) {
2429 case 0:
2430 u.q = QT0;
2431 stq_user(addr, u.ll.upper);
2432 stq_user(addr + 8, u.ll.lower);
2433 break;
2434 case 1:
2435 u.q = QT0;
2436 stq_kernel(addr, u.ll.upper);
2437 stq_kernel(addr + 8, u.ll.lower);
2438 break;
2439 #ifdef TARGET_SPARC64
2440 case 2:
2441 u.q = QT0;
2442 stq_hypv(addr, u.ll.upper);
2443 stq_hypv(addr + 8, u.ll.lower);
2444 break;
2445 #endif
2446 default:
2447 break;
2448 }
2449 #else
2450 u.q = QT0;
2451 address_mask(env, &addr);
2452 stq_raw(addr, u.ll.upper);
2453 stq_raw((addr + 8) & 0xffffffffULL, u.ll.lower);
2454 #endif
2455 }
2456
2457 void helper_ldfsr(void)
2458 {
2459 int rnd_mode;
2460
2461 PUT_FSR32(env, *((uint32_t *) &FT0));
2462 switch (env->fsr & FSR_RD_MASK) {
2463 case FSR_RD_NEAREST:
2464 rnd_mode = float_round_nearest_even;
2465 break;
2466 default:
2467 case FSR_RD_ZERO:
2468 rnd_mode = float_round_to_zero;
2469 break;
2470 case FSR_RD_POS:
2471 rnd_mode = float_round_up;
2472 break;
2473 case FSR_RD_NEG:
2474 rnd_mode = float_round_down;
2475 break;
2476 }
2477 set_float_rounding_mode(rnd_mode, &env->fp_status);
2478 }
2479
2480 void helper_stfsr(void)
2481 {
2482 *((uint32_t *) &FT0) = GET_FSR32(env);
2483 }
2484
2485 void helper_debug(void)
2486 {
2487 env->exception_index = EXCP_DEBUG;
2488 cpu_loop_exit();
2489 }
2490
2491 #ifndef TARGET_SPARC64
2492 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2493 handling ? */
2494 void helper_save(void)
2495 {
2496 uint32_t cwp;
2497
2498 cwp = cpu_cwp_dec(env, env->cwp - 1);
2499 if (env->wim & (1 << cwp)) {
2500 raise_exception(TT_WIN_OVF);
2501 }
2502 set_cwp(cwp);
2503 }
2504
2505 void helper_restore(void)
2506 {
2507 uint32_t cwp;
2508
2509 cwp = cpu_cwp_inc(env, env->cwp + 1);
2510 if (env->wim & (1 << cwp)) {
2511 raise_exception(TT_WIN_UNF);
2512 }
2513 set_cwp(cwp);
2514 }
2515
2516 void helper_wrpsr(target_ulong new_psr)
2517 {
2518 if ((new_psr & PSR_CWP) >= env->nwindows)
2519 raise_exception(TT_ILL_INSN);
2520 else
2521 PUT_PSR(env, new_psr);
2522 }
2523
2524 target_ulong helper_rdpsr(void)
2525 {
2526 return GET_PSR(env);
2527 }
2528
2529 #else
2530 /* XXX: use another pointer for %iN registers to avoid slow wrapping
2531 handling ? */
2532 void helper_save(void)
2533 {
2534 uint32_t cwp;
2535
2536 cwp = cpu_cwp_dec(env, env->cwp - 1);
2537 if (env->cansave == 0) {
2538 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2539 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2540 ((env->wstate & 0x7) << 2)));
2541 } else {
2542 if (env->cleanwin - env->canrestore == 0) {
2543 // XXX Clean windows without trap
2544 raise_exception(TT_CLRWIN);
2545 } else {
2546 env->cansave--;
2547 env->canrestore++;
2548 set_cwp(cwp);
2549 }
2550 }
2551 }
2552
2553 void helper_restore(void)
2554 {
2555 uint32_t cwp;
2556
2557 cwp = cpu_cwp_inc(env, env->cwp + 1);
2558 if (env->canrestore == 0) {
2559 raise_exception(TT_FILL | (env->otherwin != 0 ?
2560 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2561 ((env->wstate & 0x7) << 2)));
2562 } else {
2563 env->cansave++;
2564 env->canrestore--;
2565 set_cwp(cwp);
2566 }
2567 }
2568
2569 void helper_flushw(void)
2570 {
2571 if (env->cansave != env->nwindows - 2) {
2572 raise_exception(TT_SPILL | (env->otherwin != 0 ?
2573 (TT_WOTHER | ((env->wstate & 0x38) >> 1)):
2574 ((env->wstate & 0x7) << 2)));
2575 }
2576 }
2577
2578 void helper_saved(void)
2579 {
2580 env->cansave++;
2581 if (env->otherwin == 0)
2582 env->canrestore--;
2583 else
2584 env->otherwin--;
2585 }
2586
2587 void helper_restored(void)
2588 {
2589 env->canrestore++;
2590 if (env->cleanwin < env->nwindows - 1)
2591 env->cleanwin++;
2592 if (env->otherwin == 0)
2593 env->cansave--;
2594 else
2595 env->otherwin--;
2596 }
2597
2598 target_ulong helper_rdccr(void)
2599 {
2600 return GET_CCR(env);
2601 }
2602
2603 void helper_wrccr(target_ulong new_ccr)
2604 {
2605 PUT_CCR(env, new_ccr);
2606 }
2607
2608 // CWP handling is reversed in V9, but we still use the V8 register
2609 // order.
2610 target_ulong helper_rdcwp(void)
2611 {
2612 return GET_CWP64(env);
2613 }
2614
2615 void helper_wrcwp(target_ulong new_cwp)
2616 {
2617 PUT_CWP64(env, new_cwp);
2618 }
2619
2620 // This function uses non-native bit order
2621 #define GET_FIELD(X, FROM, TO) \
2622 ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
2623
2624 // This function uses the order in the manuals, i.e. bit 0 is 2^0
2625 #define GET_FIELD_SP(X, FROM, TO) \
2626 GET_FIELD(X, 63 - (TO), 63 - (FROM))
2627
2628 target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
2629 {
2630 return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
2631 (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
2632 (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
2633 (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
2634 (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
2635 (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
2636 (((pixel_addr >> 55) & 1) << 4) |
2637 (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
2638 GET_FIELD_SP(pixel_addr, 11, 12);
2639 }
2640
2641 target_ulong helper_alignaddr(target_ulong addr, target_ulong offset)
2642 {
2643 uint64_t tmp;
2644
2645 tmp = addr + offset;
2646 env->gsr &= ~7ULL;
2647 env->gsr |= tmp & 7ULL;
2648 return tmp & ~7ULL;
2649 }
2650
2651 target_ulong helper_popc(target_ulong val)
2652 {
2653 return ctpop64(val);
2654 }
2655
2656 static inline uint64_t *get_gregset(uint64_t pstate)
2657 {
2658 switch (pstate) {
2659 default:
2660 case 0:
2661 return env->bgregs;
2662 case PS_AG:
2663 return env->agregs;
2664 case PS_MG:
2665 return env->mgregs;
2666 case PS_IG:
2667 return env->igregs;
2668 }
2669 }
2670
2671 void change_pstate(uint64_t new_pstate)
2672 {
2673 uint64_t pstate_regs, new_pstate_regs;
2674 uint64_t *src, *dst;
2675
2676 pstate_regs = env->pstate & 0xc01;
2677 new_pstate_regs = new_pstate & 0xc01;
2678 if (new_pstate_regs != pstate_regs) {
2679 // Switch global register bank
2680 src = get_gregset(new_pstate_regs);
2681 dst = get_gregset(pstate_regs);
2682 memcpy32(dst, env->gregs);
2683 memcpy32(env->gregs, src);
2684 }
2685 env->pstate = new_pstate;
2686 }
2687
2688 void helper_wrpstate(target_ulong new_state)
2689 {
2690 if (!(env->def->features & CPU_FEATURE_GL))
2691 change_pstate(new_state & 0xf3f);
2692 }
2693
2694 void helper_done(void)
2695 {
2696 env->pc = env->tsptr->tpc;
2697 env->npc = env->tsptr->tnpc + 4;
2698 PUT_CCR(env, env->tsptr->tstate >> 32);
2699 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2700 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2701 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2702 env->tl--;
2703 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2704 }
2705
2706 void helper_retry(void)
2707 {
2708 env->pc = env->tsptr->tpc;
2709 env->npc = env->tsptr->tnpc;
2710 PUT_CCR(env, env->tsptr->tstate >> 32);
2711 env->asi = (env->tsptr->tstate >> 24) & 0xff;
2712 change_pstate((env->tsptr->tstate >> 8) & 0xf3f);
2713 PUT_CWP64(env, env->tsptr->tstate & 0xff);
2714 env->tl--;
2715 env->tsptr = &env->ts[env->tl & MAXTL_MASK];
2716 }
2717 #endif
2718
2719 void cpu_set_cwp(CPUState *env1, int new_cwp)
2720 {
2721 /* put the modified wrap registers at their proper location */
2722 if (env1->cwp == env1->nwindows - 1)
2723 memcpy32(env1->regbase, env1->regbase + env1->nwindows * 16);
2724 env1->cwp = new_cwp;
2725 /* put the wrap registers at their temporary location */
2726 if (new_cwp == env1->nwindows - 1)
2727 memcpy32(env1->regbase + env1->nwindows * 16, env1->regbase);
2728 env1->regwptr = env1->regbase + (new_cwp * 16);
2729 }
2730
2731 void set_cwp(int new_cwp)
2732 {
2733 cpu_set_cwp(env, new_cwp);
2734 }
2735
2736 void helper_flush(target_ulong addr)
2737 {
2738 addr &= ~7;
2739 tb_invalidate_page_range(addr, addr + 8);
2740 }
2741
2742 #if !defined(CONFIG_USER_ONLY)
2743
2744 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2745 void *retaddr);
2746
2747 #define MMUSUFFIX _mmu
2748 #define ALIGNED_ONLY
2749
2750 #define SHIFT 0
2751 #include "softmmu_template.h"
2752
2753 #define SHIFT 1
2754 #include "softmmu_template.h"
2755
2756 #define SHIFT 2
2757 #include "softmmu_template.h"
2758
2759 #define SHIFT 3
2760 #include "softmmu_template.h"
2761
2762 /* XXX: make it generic ? */
2763 static void cpu_restore_state2(void *retaddr)
2764 {
2765 TranslationBlock *tb;
2766 unsigned long pc;
2767
2768 if (retaddr) {
2769 /* now we have a real cpu fault */
2770 pc = (unsigned long)retaddr;
2771 tb = tb_find_pc(pc);
2772 if (tb) {
2773 /* the PC is inside the translated code. It means that we have
2774 a virtual CPU fault */
2775 cpu_restore_state(tb, env, pc, (void *)(long)env->cond);
2776 }
2777 }
2778 }
2779
2780 static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
2781 void *retaddr)
2782 {
2783 #ifdef DEBUG_UNALIGNED
2784 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
2785 "\n", addr, env->pc);
2786 #endif
2787 cpu_restore_state2(retaddr);
2788 raise_exception(TT_UNALIGNED);
2789 }
2790
2791 /* try to fill the TLB and return an exception if error. If retaddr is
2792 NULL, it means that the function was called in C code (i.e. not
2793 from generated code or from helper.c) */
2794 /* XXX: fix it to restore all registers */
2795 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2796 {
2797 int ret;
2798 CPUState *saved_env;
2799
2800 /* XXX: hack to restore env in all cases, even if not called from
2801 generated code */
2802 saved_env = env;
2803 env = cpu_single_env;
2804
2805 ret = cpu_sparc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2806 if (ret) {
2807 cpu_restore_state2(retaddr);
2808 cpu_loop_exit();
2809 }
2810 env = saved_env;
2811 }
2812
2813 #endif
2814
2815 #ifndef TARGET_SPARC64
2816 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2817 int is_asi)
2818 {
2819 CPUState *saved_env;
2820
2821 /* XXX: hack to restore env in all cases, even if not called from
2822 generated code */
2823 saved_env = env;
2824 env = cpu_single_env;
2825 #ifdef DEBUG_UNASSIGNED
2826 if (is_asi)
2827 printf("Unassigned mem %s access to " TARGET_FMT_plx
2828 " asi 0x%02x from " TARGET_FMT_lx "\n",
2829 is_exec ? "exec" : is_write ? "write" : "read", addr, is_asi,
2830 env->pc);
2831 else
2832 printf("Unassigned mem %s access to " TARGET_FMT_plx " from "
2833 TARGET_FMT_lx "\n",
2834 is_exec ? "exec" : is_write ? "write" : "read", addr, env->pc);
2835 #endif
2836 if (env->mmuregs[3]) /* Fault status register */
2837 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
2838 if (is_asi)
2839 env->mmuregs[3] |= 1 << 16;
2840 if (env->psrs)
2841 env->mmuregs[3] |= 1 << 5;
2842 if (is_exec)
2843 env->mmuregs[3] |= 1 << 6;
2844 if (is_write)
2845 env->mmuregs[3] |= 1 << 7;
2846 env->mmuregs[3] |= (5 << 2) | 2;
2847 env->mmuregs[4] = addr; /* Fault address register */
2848 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
2849 if (is_exec)
2850 raise_exception(TT_CODE_ACCESS);
2851 else
2852 raise_exception(TT_DATA_ACCESS);
2853 }
2854 env = saved_env;
2855 }
2856 #else
2857 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
2858 int is_asi)
2859 {
2860 #ifdef DEBUG_UNASSIGNED
2861 CPUState *saved_env;
2862
2863 /* XXX: hack to restore env in all cases, even if not called from
2864 generated code */
2865 saved_env = env;
2866 env = cpu_single_env;
2867 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
2868 "\n", addr, env->pc);
2869 env = saved_env;
2870 #endif
2871 if (is_exec)
2872 raise_exception(TT_CODE_ACCESS);
2873 else
2874 raise_exception(TT_DATA_ACCESS);
2875 }
2876 #endif
2877