]> git.proxmox.com Git - qemu.git/blob - target-ppc/op_helper.c
Rework PowerPC exceptions model to make it more versatile:
[qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21
22 #include "op_helper.h"
23
24 #define MEMSUFFIX _raw
25 #include "op_helper.h"
26 #include "op_helper_mem.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #define MEMSUFFIX _user
29 #include "op_helper.h"
30 #include "op_helper_mem.h"
31 #define MEMSUFFIX _kernel
32 #include "op_helper.h"
33 #include "op_helper_mem.h"
34 #endif
35
36 //#define DEBUG_OP
37 //#define DEBUG_EXCEPTIONS
38 //#define DEBUG_SOFTWARE_TLB
39 //#define FLUSH_ALL_TLBS
40
41 /*****************************************************************************/
42 /* Exceptions processing helpers */
43
44 void do_raise_exception_err (uint32_t exception, int error_code)
45 {
46 #if 0
47 printf("Raise exception %3x code : %d\n", exception, error_code);
48 #endif
49 switch (exception) {
50 case POWERPC_EXCP_PROGRAM:
51 if (error_code == POWERPC_EXCP_FP && msr_fe0 == 0 && msr_fe1 == 0)
52 return;
53 break;
54 default:
55 break;
56 }
57 env->exception_index = exception;
58 env->error_code = error_code;
59 cpu_loop_exit();
60 }
61
62 void do_raise_exception (uint32_t exception)
63 {
64 do_raise_exception_err(exception, 0);
65 }
66
67 void cpu_dump_EA (target_ulong EA);
68 void do_print_mem_EA (target_ulong EA)
69 {
70 cpu_dump_EA(EA);
71 }
72
73 /*****************************************************************************/
74 /* Registers load and stores */
75 void do_load_cr (void)
76 {
77 T0 = (env->crf[0] << 28) |
78 (env->crf[1] << 24) |
79 (env->crf[2] << 20) |
80 (env->crf[3] << 16) |
81 (env->crf[4] << 12) |
82 (env->crf[5] << 8) |
83 (env->crf[6] << 4) |
84 (env->crf[7] << 0);
85 }
86
87 void do_store_cr (uint32_t mask)
88 {
89 int i, sh;
90
91 for (i = 0, sh = 7; i < 8; i++, sh--) {
92 if (mask & (1 << sh))
93 env->crf[i] = (T0 >> (sh * 4)) & 0xFUL;
94 }
95 }
96
97 void do_load_xer (void)
98 {
99 T0 = (xer_so << XER_SO) |
100 (xer_ov << XER_OV) |
101 (xer_ca << XER_CA) |
102 (xer_bc << XER_BC) |
103 (xer_cmp << XER_CMP);
104 }
105
106 void do_store_xer (void)
107 {
108 xer_so = (T0 >> XER_SO) & 0x01;
109 xer_ov = (T0 >> XER_OV) & 0x01;
110 xer_ca = (T0 >> XER_CA) & 0x01;
111 xer_cmp = (T0 >> XER_CMP) & 0xFF;
112 xer_bc = (T0 >> XER_BC) & 0x7F;
113 }
114
115 void do_load_fpscr (void)
116 {
117 /* The 32 MSB of the target fpr are undefined.
118 * They'll be zero...
119 */
120 union {
121 float64 d;
122 struct {
123 uint32_t u[2];
124 } s;
125 } u;
126 int i;
127
128 #if defined(WORDS_BIGENDIAN)
129 #define WORD0 0
130 #define WORD1 1
131 #else
132 #define WORD0 1
133 #define WORD1 0
134 #endif
135 u.s.u[WORD0] = 0;
136 u.s.u[WORD1] = 0;
137 for (i = 0; i < 8; i++)
138 u.s.u[WORD1] |= env->fpscr[i] << (4 * i);
139 FT0 = u.d;
140 }
141
142 void do_store_fpscr (uint32_t mask)
143 {
144 /*
145 * We use only the 32 LSB of the incoming fpr
146 */
147 union {
148 double d;
149 struct {
150 uint32_t u[2];
151 } s;
152 } u;
153 int i, rnd_type;
154
155 u.d = FT0;
156 if (mask & 0x80)
157 env->fpscr[0] = (env->fpscr[0] & 0x9) | ((u.s.u[WORD1] >> 28) & ~0x9);
158 for (i = 1; i < 7; i++) {
159 if (mask & (1 << (7 - i)))
160 env->fpscr[i] = (u.s.u[WORD1] >> (4 * (7 - i))) & 0xF;
161 }
162 /* TODO: update FEX & VX */
163 /* Set rounding mode */
164 switch (env->fpscr[0] & 0x3) {
165 case 0:
166 /* Best approximation (round to nearest) */
167 rnd_type = float_round_nearest_even;
168 break;
169 case 1:
170 /* Smaller magnitude (round toward zero) */
171 rnd_type = float_round_to_zero;
172 break;
173 case 2:
174 /* Round toward +infinite */
175 rnd_type = float_round_up;
176 break;
177 default:
178 case 3:
179 /* Round toward -infinite */
180 rnd_type = float_round_down;
181 break;
182 }
183 set_float_rounding_mode(rnd_type, &env->fp_status);
184 }
185
186 target_ulong ppc_load_dump_spr (int sprn)
187 {
188 if (loglevel != 0) {
189 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
190 sprn, sprn, env->spr[sprn]);
191 }
192
193 return env->spr[sprn];
194 }
195
196 void ppc_store_dump_spr (int sprn, target_ulong val)
197 {
198 if (loglevel != 0) {
199 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
200 sprn, sprn, env->spr[sprn], val);
201 }
202 env->spr[sprn] = val;
203 }
204
205 /*****************************************************************************/
206 /* Fixed point operations helpers */
207 #if defined(TARGET_PPC64)
208 static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
209 {
210 *plow += a;
211 /* carry test */
212 if (*plow < a)
213 (*phigh)++;
214 *phigh += b;
215 }
216
217 static void neg128 (uint64_t *plow, uint64_t *phigh)
218 {
219 *plow = ~*plow;
220 *phigh = ~*phigh;
221 add128(plow, phigh, 1, 0);
222 }
223
224 static void mul64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
225 {
226 uint32_t a0, a1, b0, b1;
227 uint64_t v;
228
229 a0 = a;
230 a1 = a >> 32;
231
232 b0 = b;
233 b1 = b >> 32;
234
235 v = (uint64_t)a0 * (uint64_t)b0;
236 *plow = v;
237 *phigh = 0;
238
239 v = (uint64_t)a0 * (uint64_t)b1;
240 add128(plow, phigh, v << 32, v >> 32);
241
242 v = (uint64_t)a1 * (uint64_t)b0;
243 add128(plow, phigh, v << 32, v >> 32);
244
245 v = (uint64_t)a1 * (uint64_t)b1;
246 *phigh += v;
247 #if defined(DEBUG_MULDIV)
248 printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
249 a, b, *phigh, *plow);
250 #endif
251 }
252
253 void do_mul64 (uint64_t *plow, uint64_t *phigh)
254 {
255 mul64(plow, phigh, T0, T1);
256 }
257
258 static void imul64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
259 {
260 int sa, sb;
261
262 sa = (a < 0);
263 if (sa)
264 a = -a;
265 sb = (b < 0);
266 if (sb)
267 b = -b;
268 mul64(plow, phigh, a, b);
269 if (sa ^ sb) {
270 neg128(plow, phigh);
271 }
272 }
273
274 void do_imul64 (uint64_t *plow, uint64_t *phigh)
275 {
276 imul64(plow, phigh, T0, T1);
277 }
278 #endif
279
280 void do_adde (void)
281 {
282 T2 = T0;
283 T0 += T1 + xer_ca;
284 if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
285 (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
286 xer_ca = 0;
287 } else {
288 xer_ca = 1;
289 }
290 }
291
292 #if defined(TARGET_PPC64)
293 void do_adde_64 (void)
294 {
295 T2 = T0;
296 T0 += T1 + xer_ca;
297 if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
298 (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
299 xer_ca = 0;
300 } else {
301 xer_ca = 1;
302 }
303 }
304 #endif
305
306 void do_addmeo (void)
307 {
308 T1 = T0;
309 T0 += xer_ca + (-1);
310 if (likely(!((uint32_t)T1 &
311 ((uint32_t)T1 ^ (uint32_t)T0) & (1UL << 31)))) {
312 xer_ov = 0;
313 } else {
314 xer_ov = 1;
315 xer_so = 1;
316 }
317 if (likely(T1 != 0))
318 xer_ca = 1;
319 }
320
321 #if defined(TARGET_PPC64)
322 void do_addmeo_64 (void)
323 {
324 T1 = T0;
325 T0 += xer_ca + (-1);
326 if (likely(!((uint64_t)T1 &
327 ((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) {
328 xer_ov = 0;
329 } else {
330 xer_ov = 1;
331 xer_so = 1;
332 }
333 if (likely(T1 != 0))
334 xer_ca = 1;
335 }
336 #endif
337
338 void do_divwo (void)
339 {
340 if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
341 (int32_t)T1 == 0))) {
342 xer_ov = 0;
343 T0 = (int32_t)T0 / (int32_t)T1;
344 } else {
345 xer_ov = 1;
346 xer_so = 1;
347 T0 = (-1) * ((uint32_t)T0 >> 31);
348 }
349 }
350
351 #if defined(TARGET_PPC64)
352 void do_divdo (void)
353 {
354 if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1ULL) ||
355 (int64_t)T1 == 0))) {
356 xer_ov = 0;
357 T0 = (int64_t)T0 / (int64_t)T1;
358 } else {
359 xer_ov = 1;
360 xer_so = 1;
361 T0 = (-1ULL) * ((uint64_t)T0 >> 63);
362 }
363 }
364 #endif
365
366 void do_divwuo (void)
367 {
368 if (likely((uint32_t)T1 != 0)) {
369 xer_ov = 0;
370 T0 = (uint32_t)T0 / (uint32_t)T1;
371 } else {
372 xer_ov = 1;
373 xer_so = 1;
374 T0 = 0;
375 }
376 }
377
378 #if defined(TARGET_PPC64)
379 void do_divduo (void)
380 {
381 if (likely((uint64_t)T1 != 0)) {
382 xer_ov = 0;
383 T0 = (uint64_t)T0 / (uint64_t)T1;
384 } else {
385 xer_ov = 1;
386 xer_so = 1;
387 T0 = 0;
388 }
389 }
390 #endif
391
392 void do_mullwo (void)
393 {
394 int64_t res = (int64_t)T0 * (int64_t)T1;
395
396 if (likely((int32_t)res == res)) {
397 xer_ov = 0;
398 } else {
399 xer_ov = 1;
400 xer_so = 1;
401 }
402 T0 = (int32_t)res;
403 }
404
405 #if defined(TARGET_PPC64)
406 void do_mulldo (void)
407 {
408 int64_t th;
409 uint64_t tl;
410
411 do_imul64(&tl, &th);
412 if (likely(th == 0)) {
413 xer_ov = 0;
414 } else {
415 xer_ov = 1;
416 xer_so = 1;
417 }
418 T0 = (int64_t)tl;
419 }
420 #endif
421
422 void do_nego (void)
423 {
424 if (likely((int32_t)T0 != INT32_MIN)) {
425 xer_ov = 0;
426 T0 = -(int32_t)T0;
427 } else {
428 xer_ov = 1;
429 xer_so = 1;
430 }
431 }
432
433 #if defined(TARGET_PPC64)
434 void do_nego_64 (void)
435 {
436 if (likely((int64_t)T0 != INT64_MIN)) {
437 xer_ov = 0;
438 T0 = -(int64_t)T0;
439 } else {
440 xer_ov = 1;
441 xer_so = 1;
442 }
443 }
444 #endif
445
446 void do_subfe (void)
447 {
448 T0 = T1 + ~T0 + xer_ca;
449 if (likely((uint32_t)T0 >= (uint32_t)T1 &&
450 (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
451 xer_ca = 0;
452 } else {
453 xer_ca = 1;
454 }
455 }
456
457 #if defined(TARGET_PPC64)
458 void do_subfe_64 (void)
459 {
460 T0 = T1 + ~T0 + xer_ca;
461 if (likely((uint64_t)T0 >= (uint64_t)T1 &&
462 (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
463 xer_ca = 0;
464 } else {
465 xer_ca = 1;
466 }
467 }
468 #endif
469
470 void do_subfmeo (void)
471 {
472 T1 = T0;
473 T0 = ~T0 + xer_ca - 1;
474 if (likely(!((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0) &
475 (1UL << 31)))) {
476 xer_ov = 0;
477 } else {
478 xer_ov = 1;
479 xer_so = 1;
480 }
481 if (likely((uint32_t)T1 != UINT32_MAX))
482 xer_ca = 1;
483 }
484
485 #if defined(TARGET_PPC64)
486 void do_subfmeo_64 (void)
487 {
488 T1 = T0;
489 T0 = ~T0 + xer_ca - 1;
490 if (likely(!((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0) &
491 (1ULL << 63)))) {
492 xer_ov = 0;
493 } else {
494 xer_ov = 1;
495 xer_so = 1;
496 }
497 if (likely((uint64_t)T1 != UINT64_MAX))
498 xer_ca = 1;
499 }
500 #endif
501
502 void do_subfzeo (void)
503 {
504 T1 = T0;
505 T0 = ~T0 + xer_ca;
506 if (likely(!(((uint32_t)~T1 ^ UINT32_MAX) &
507 ((uint32_t)(~T1) ^ (uint32_t)T0) & (1UL << 31)))) {
508 xer_ov = 0;
509 } else {
510 xer_ov = 1;
511 xer_so = 1;
512 }
513 if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
514 xer_ca = 0;
515 } else {
516 xer_ca = 1;
517 }
518 }
519
520 #if defined(TARGET_PPC64)
521 void do_subfzeo_64 (void)
522 {
523 T1 = T0;
524 T0 = ~T0 + xer_ca;
525 if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) &
526 ((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) {
527 xer_ov = 0;
528 } else {
529 xer_ov = 1;
530 xer_so = 1;
531 }
532 if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
533 xer_ca = 0;
534 } else {
535 xer_ca = 1;
536 }
537 }
538 #endif
539
540 /* shift right arithmetic helper */
541 void do_sraw (void)
542 {
543 int32_t ret;
544
545 if (likely(!(T1 & 0x20UL))) {
546 if (likely((uint32_t)T1 != 0)) {
547 ret = (int32_t)T0 >> (T1 & 0x1fUL);
548 if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
549 xer_ca = 0;
550 } else {
551 xer_ca = 1;
552 }
553 } else {
554 ret = T0;
555 xer_ca = 0;
556 }
557 } else {
558 ret = (-1) * ((uint32_t)T0 >> 31);
559 if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) {
560 xer_ca = 0;
561 } else {
562 xer_ca = 1;
563 }
564 }
565 T0 = ret;
566 }
567
568 #if defined(TARGET_PPC64)
569 void do_srad (void)
570 {
571 int64_t ret;
572
573 if (likely(!(T1 & 0x40UL))) {
574 if (likely((uint64_t)T1 != 0)) {
575 ret = (int64_t)T0 >> (T1 & 0x3FUL);
576 if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
577 xer_ca = 0;
578 } else {
579 xer_ca = 1;
580 }
581 } else {
582 ret = T0;
583 xer_ca = 0;
584 }
585 } else {
586 ret = (-1) * ((uint64_t)T0 >> 63);
587 if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
588 xer_ca = 0;
589 } else {
590 xer_ca = 1;
591 }
592 }
593 T0 = ret;
594 }
595 #endif
596
597 static inline int popcnt (uint32_t val)
598 {
599 int i;
600
601 for (i = 0; val != 0;)
602 val = val ^ (val - 1);
603
604 return i;
605 }
606
607 void do_popcntb (void)
608 {
609 uint32_t ret;
610 int i;
611
612 ret = 0;
613 for (i = 0; i < 32; i += 8)
614 ret |= popcnt((T0 >> i) & 0xFF) << i;
615 T0 = ret;
616 }
617
618 #if defined(TARGET_PPC64)
619 void do_popcntb_64 (void)
620 {
621 uint64_t ret;
622 int i;
623
624 ret = 0;
625 for (i = 0; i < 64; i += 8)
626 ret |= popcnt((T0 >> i) & 0xFF) << i;
627 T0 = ret;
628 }
629 #endif
630
631 /*****************************************************************************/
632 /* Floating point operations helpers */
633 void do_fctiw (void)
634 {
635 union {
636 double d;
637 uint64_t i;
638 } p;
639
640 p.i = float64_to_int32(FT0, &env->fp_status);
641 #if USE_PRECISE_EMULATION
642 /* XXX: higher bits are not supposed to be significant.
643 * to make tests easier, return the same as a real PowerPC 750 (aka G3)
644 */
645 p.i |= 0xFFF80000ULL << 32;
646 #endif
647 FT0 = p.d;
648 }
649
650 void do_fctiwz (void)
651 {
652 union {
653 double d;
654 uint64_t i;
655 } p;
656
657 p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status);
658 #if USE_PRECISE_EMULATION
659 /* XXX: higher bits are not supposed to be significant.
660 * to make tests easier, return the same as a real PowerPC 750 (aka G3)
661 */
662 p.i |= 0xFFF80000ULL << 32;
663 #endif
664 FT0 = p.d;
665 }
666
667 #if defined(TARGET_PPC64)
668 void do_fcfid (void)
669 {
670 union {
671 double d;
672 uint64_t i;
673 } p;
674
675 p.d = FT0;
676 FT0 = int64_to_float64(p.i, &env->fp_status);
677 }
678
679 void do_fctid (void)
680 {
681 union {
682 double d;
683 uint64_t i;
684 } p;
685
686 p.i = float64_to_int64(FT0, &env->fp_status);
687 FT0 = p.d;
688 }
689
690 void do_fctidz (void)
691 {
692 union {
693 double d;
694 uint64_t i;
695 } p;
696
697 p.i = float64_to_int64_round_to_zero(FT0, &env->fp_status);
698 FT0 = p.d;
699 }
700
701 #endif
702
703 #if USE_PRECISE_EMULATION
704 void do_fmadd (void)
705 {
706 #ifdef FLOAT128
707 float128 ft0_128, ft1_128;
708
709 ft0_128 = float64_to_float128(FT0, &env->fp_status);
710 ft1_128 = float64_to_float128(FT1, &env->fp_status);
711 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
712 ft1_128 = float64_to_float128(FT2, &env->fp_status);
713 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
714 FT0 = float128_to_float64(ft0_128, &env->fp_status);
715 #else
716 /* This is OK on x86 hosts */
717 FT0 = (FT0 * FT1) + FT2;
718 #endif
719 }
720
721 void do_fmsub (void)
722 {
723 #ifdef FLOAT128
724 float128 ft0_128, ft1_128;
725
726 ft0_128 = float64_to_float128(FT0, &env->fp_status);
727 ft1_128 = float64_to_float128(FT1, &env->fp_status);
728 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
729 ft1_128 = float64_to_float128(FT2, &env->fp_status);
730 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
731 FT0 = float128_to_float64(ft0_128, &env->fp_status);
732 #else
733 /* This is OK on x86 hosts */
734 FT0 = (FT0 * FT1) - FT2;
735 #endif
736 }
737 #endif /* USE_PRECISE_EMULATION */
738
739 void do_fnmadd (void)
740 {
741 #if USE_PRECISE_EMULATION
742 #ifdef FLOAT128
743 float128 ft0_128, ft1_128;
744
745 ft0_128 = float64_to_float128(FT0, &env->fp_status);
746 ft1_128 = float64_to_float128(FT1, &env->fp_status);
747 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
748 ft1_128 = float64_to_float128(FT2, &env->fp_status);
749 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
750 FT0 = float128_to_float64(ft0_128, &env->fp_status);
751 #else
752 /* This is OK on x86 hosts */
753 FT0 = (FT0 * FT1) + FT2;
754 #endif
755 #else
756 FT0 = float64_mul(FT0, FT1, &env->fp_status);
757 FT0 = float64_add(FT0, FT2, &env->fp_status);
758 #endif
759 if (likely(!isnan(FT0)))
760 FT0 = float64_chs(FT0);
761 }
762
763 void do_fnmsub (void)
764 {
765 #if USE_PRECISE_EMULATION
766 #ifdef FLOAT128
767 float128 ft0_128, ft1_128;
768
769 ft0_128 = float64_to_float128(FT0, &env->fp_status);
770 ft1_128 = float64_to_float128(FT1, &env->fp_status);
771 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
772 ft1_128 = float64_to_float128(FT2, &env->fp_status);
773 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
774 FT0 = float128_to_float64(ft0_128, &env->fp_status);
775 #else
776 /* This is OK on x86 hosts */
777 FT0 = (FT0 * FT1) - FT2;
778 #endif
779 #else
780 FT0 = float64_mul(FT0, FT1, &env->fp_status);
781 FT0 = float64_sub(FT0, FT2, &env->fp_status);
782 #endif
783 if (likely(!isnan(FT0)))
784 FT0 = float64_chs(FT0);
785 }
786
787 void do_fsqrt (void)
788 {
789 FT0 = float64_sqrt(FT0, &env->fp_status);
790 }
791
792 void do_fres (void)
793 {
794 union {
795 double d;
796 uint64_t i;
797 } p;
798
799 if (likely(isnormal(FT0))) {
800 #if USE_PRECISE_EMULATION
801 FT0 = float64_div(1.0, FT0, &env->fp_status);
802 FT0 = float64_to_float32(FT0, &env->fp_status);
803 #else
804 FT0 = float32_div(1.0, FT0, &env->fp_status);
805 #endif
806 } else {
807 p.d = FT0;
808 if (p.i == 0x8000000000000000ULL) {
809 p.i = 0xFFF0000000000000ULL;
810 } else if (p.i == 0x0000000000000000ULL) {
811 p.i = 0x7FF0000000000000ULL;
812 } else if (isnan(FT0)) {
813 p.i = 0x7FF8000000000000ULL;
814 } else if (FT0 < 0.0) {
815 p.i = 0x8000000000000000ULL;
816 } else {
817 p.i = 0x0000000000000000ULL;
818 }
819 FT0 = p.d;
820 }
821 }
822
823 void do_frsqrte (void)
824 {
825 union {
826 double d;
827 uint64_t i;
828 } p;
829
830 if (likely(isnormal(FT0) && FT0 > 0.0)) {
831 FT0 = float64_sqrt(FT0, &env->fp_status);
832 FT0 = float32_div(1.0, FT0, &env->fp_status);
833 } else {
834 p.d = FT0;
835 if (p.i == 0x8000000000000000ULL) {
836 p.i = 0xFFF0000000000000ULL;
837 } else if (p.i == 0x0000000000000000ULL) {
838 p.i = 0x7FF0000000000000ULL;
839 } else if (isnan(FT0)) {
840 if (!(p.i & 0x0008000000000000ULL))
841 p.i |= 0x000FFFFFFFFFFFFFULL;
842 } else if (FT0 < 0) {
843 p.i = 0x7FF8000000000000ULL;
844 } else {
845 p.i = 0x0000000000000000ULL;
846 }
847 FT0 = p.d;
848 }
849 }
850
851 void do_fsel (void)
852 {
853 if (FT0 >= 0)
854 FT0 = FT1;
855 else
856 FT0 = FT2;
857 }
858
859 void do_fcmpu (void)
860 {
861 if (likely(!isnan(FT0) && !isnan(FT1))) {
862 if (float64_lt(FT0, FT1, &env->fp_status)) {
863 T0 = 0x08UL;
864 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
865 T0 = 0x04UL;
866 } else {
867 T0 = 0x02UL;
868 }
869 } else {
870 T0 = 0x01UL;
871 env->fpscr[4] |= 0x1;
872 env->fpscr[6] |= 0x1;
873 }
874 env->fpscr[3] = T0;
875 }
876
877 void do_fcmpo (void)
878 {
879 env->fpscr[4] &= ~0x1;
880 if (likely(!isnan(FT0) && !isnan(FT1))) {
881 if (float64_lt(FT0, FT1, &env->fp_status)) {
882 T0 = 0x08UL;
883 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
884 T0 = 0x04UL;
885 } else {
886 T0 = 0x02UL;
887 }
888 } else {
889 T0 = 0x01UL;
890 env->fpscr[4] |= 0x1;
891 if (!float64_is_signaling_nan(FT0) || !float64_is_signaling_nan(FT1)) {
892 /* Quiet NaN case */
893 env->fpscr[6] |= 0x1;
894 if (!(env->fpscr[1] & 0x8))
895 env->fpscr[4] |= 0x8;
896 } else {
897 env->fpscr[4] |= 0x8;
898 }
899 }
900 env->fpscr[3] = T0;
901 }
902
903 #if !defined (CONFIG_USER_ONLY)
904 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
905 void do_rfi (void)
906 {
907 #if defined(TARGET_PPC64)
908 if (env->spr[SPR_SRR1] & (1ULL << MSR_SF)) {
909 env->nip = (uint64_t)(env->spr[SPR_SRR0] & ~0x00000003);
910 do_store_msr(env, (uint64_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
911 } else {
912 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
913 ppc_store_msr_32(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
914 }
915 #else
916 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
917 do_store_msr(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
918 #endif
919 #if defined (DEBUG_OP)
920 cpu_dump_rfi(env->nip, do_load_msr(env));
921 #endif
922 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
923 }
924
925 #if defined(TARGET_PPC64)
926 void do_rfid (void)
927 {
928 if (env->spr[SPR_SRR1] & (1ULL << MSR_SF)) {
929 env->nip = (uint64_t)(env->spr[SPR_SRR0] & ~0x00000003);
930 do_store_msr(env, (uint64_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
931 } else {
932 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
933 do_store_msr(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
934 }
935 #if defined (DEBUG_OP)
936 cpu_dump_rfi(env->nip, do_load_msr(env));
937 #endif
938 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
939 }
940 #endif
941 #endif
942
943 void do_tw (int flags)
944 {
945 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
946 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
947 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
948 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
949 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
950 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
951 }
952 }
953
954 #if defined(TARGET_PPC64)
955 void do_td (int flags)
956 {
957 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
958 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
959 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
960 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
961 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
962 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
963 }
964 #endif
965
966 /*****************************************************************************/
967 /* PowerPC 601 specific instructions (POWER bridge) */
968 void do_POWER_abso (void)
969 {
970 if ((uint32_t)T0 == INT32_MIN) {
971 T0 = INT32_MAX;
972 xer_ov = 1;
973 xer_so = 1;
974 } else {
975 T0 = -T0;
976 xer_ov = 0;
977 }
978 }
979
980 void do_POWER_clcs (void)
981 {
982 switch (T0) {
983 case 0x0CUL:
984 /* Instruction cache line size */
985 T0 = ICACHE_LINE_SIZE;
986 break;
987 case 0x0DUL:
988 /* Data cache line size */
989 T0 = DCACHE_LINE_SIZE;
990 break;
991 case 0x0EUL:
992 /* Minimum cache line size */
993 T0 = ICACHE_LINE_SIZE < DCACHE_LINE_SIZE ?
994 ICACHE_LINE_SIZE : DCACHE_LINE_SIZE;
995 break;
996 case 0x0FUL:
997 /* Maximum cache line size */
998 T0 = ICACHE_LINE_SIZE > DCACHE_LINE_SIZE ?
999 ICACHE_LINE_SIZE : DCACHE_LINE_SIZE;
1000 break;
1001 default:
1002 /* Undefined */
1003 break;
1004 }
1005 }
1006
1007 void do_POWER_div (void)
1008 {
1009 uint64_t tmp;
1010
1011 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1012 T0 = (long)((-1) * (T0 >> 31));
1013 env->spr[SPR_MQ] = 0;
1014 } else {
1015 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1016 env->spr[SPR_MQ] = tmp % T1;
1017 T0 = tmp / (int32_t)T1;
1018 }
1019 }
1020
1021 void do_POWER_divo (void)
1022 {
1023 int64_t tmp;
1024
1025 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1026 T0 = (long)((-1) * (T0 >> 31));
1027 env->spr[SPR_MQ] = 0;
1028 xer_ov = 1;
1029 xer_so = 1;
1030 } else {
1031 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1032 env->spr[SPR_MQ] = tmp % T1;
1033 tmp /= (int32_t)T1;
1034 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1035 xer_ov = 1;
1036 xer_so = 1;
1037 } else {
1038 xer_ov = 0;
1039 }
1040 T0 = tmp;
1041 }
1042 }
1043
1044 void do_POWER_divs (void)
1045 {
1046 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1047 T0 = (long)((-1) * (T0 >> 31));
1048 env->spr[SPR_MQ] = 0;
1049 } else {
1050 env->spr[SPR_MQ] = T0 % T1;
1051 T0 = (int32_t)T0 / (int32_t)T1;
1052 }
1053 }
1054
1055 void do_POWER_divso (void)
1056 {
1057 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1058 T0 = (long)((-1) * (T0 >> 31));
1059 env->spr[SPR_MQ] = 0;
1060 xer_ov = 1;
1061 xer_so = 1;
1062 } else {
1063 T0 = (int32_t)T0 / (int32_t)T1;
1064 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1065 xer_ov = 0;
1066 }
1067 }
1068
1069 void do_POWER_dozo (void)
1070 {
1071 if ((int32_t)T1 > (int32_t)T0) {
1072 T2 = T0;
1073 T0 = T1 - T0;
1074 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1075 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1076 xer_ov = 1;
1077 xer_so = 1;
1078 } else {
1079 xer_ov = 0;
1080 }
1081 } else {
1082 T0 = 0;
1083 xer_ov = 0;
1084 }
1085 }
1086
1087 void do_POWER_maskg (void)
1088 {
1089 uint32_t ret;
1090
1091 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1092 ret = -1;
1093 } else {
1094 ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
1095 (((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
1096 if ((uint32_t)T0 > (uint32_t)T1)
1097 ret = ~ret;
1098 }
1099 T0 = ret;
1100 }
1101
1102 void do_POWER_mulo (void)
1103 {
1104 uint64_t tmp;
1105
1106 tmp = (uint64_t)T0 * (uint64_t)T1;
1107 env->spr[SPR_MQ] = tmp >> 32;
1108 T0 = tmp;
1109 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1110 xer_ov = 1;
1111 xer_so = 1;
1112 } else {
1113 xer_ov = 0;
1114 }
1115 }
1116
1117 #if !defined (CONFIG_USER_ONLY)
1118 void do_POWER_rac (void)
1119 {
1120 #if 0
1121 mmu_ctx_t ctx;
1122
1123 /* We don't have to generate many instances of this instruction,
1124 * as rac is supervisor only.
1125 */
1126 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT, 1) == 0)
1127 T0 = ctx.raddr;
1128 #endif
1129 }
1130
1131 void do_POWER_rfsvc (void)
1132 {
1133 env->nip = env->lr & ~0x00000003UL;
1134 T0 = env->ctr & 0x0000FFFFUL;
1135 do_store_msr(env, T0);
1136 #if defined (DEBUG_OP)
1137 cpu_dump_rfi(env->nip, do_load_msr(env));
1138 #endif
1139 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1140 }
1141
1142 /* PowerPC 601 BAT management helper */
1143 void do_store_601_batu (int nr)
1144 {
1145 do_store_ibatu(env, nr, (uint32_t)T0);
1146 env->DBAT[0][nr] = env->IBAT[0][nr];
1147 env->DBAT[1][nr] = env->IBAT[1][nr];
1148 }
1149 #endif
1150
1151 /*****************************************************************************/
1152 /* 602 specific instructions */
1153 /* mfrom is the most crazy instruction ever seen, imho ! */
1154 /* Real implementation uses a ROM table. Do the same */
1155 #define USE_MFROM_ROM_TABLE
1156 void do_op_602_mfrom (void)
1157 {
1158 if (likely(T0 < 602)) {
1159 #if defined(USE_MFROM_ROM_TABLE)
1160 #include "mfrom_table.c"
1161 T0 = mfrom_ROM_table[T0];
1162 #else
1163 double d;
1164 /* Extremly decomposed:
1165 * -T0 / 256
1166 * T0 = 256 * log10(10 + 1.0) + 0.5
1167 */
1168 d = T0;
1169 d = float64_div(d, 256, &env->fp_status);
1170 d = float64_chs(d);
1171 d = exp10(d); // XXX: use float emulation function
1172 d = float64_add(d, 1.0, &env->fp_status);
1173 d = log10(d); // XXX: use float emulation function
1174 d = float64_mul(d, 256, &env->fp_status);
1175 d = float64_add(d, 0.5, &env->fp_status);
1176 T0 = float64_round_to_int(d, &env->fp_status);
1177 #endif
1178 } else {
1179 T0 = 0;
1180 }
1181 }
1182
1183 /*****************************************************************************/
1184 /* Embedded PowerPC specific helpers */
1185 void do_405_check_ov (void)
1186 {
1187 if (likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1188 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1189 xer_ov = 0;
1190 } else {
1191 xer_ov = 1;
1192 xer_so = 1;
1193 }
1194 }
1195
1196 void do_405_check_sat (void)
1197 {
1198 if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1199 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1200 /* Saturate result */
1201 if (T2 >> 31) {
1202 T0 = INT32_MIN;
1203 } else {
1204 T0 = INT32_MAX;
1205 }
1206 }
1207 }
1208
1209 /* XXX: to be improved to check access rights when in user-mode */
1210 void do_load_dcr (void)
1211 {
1212 target_ulong val;
1213
1214 if (unlikely(env->dcr_env == NULL)) {
1215 if (loglevel != 0) {
1216 fprintf(logfile, "No DCR environment\n");
1217 }
1218 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1219 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1220 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1221 if (loglevel != 0) {
1222 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1223 }
1224 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1225 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1226 } else {
1227 T0 = val;
1228 }
1229 }
1230
1231 void do_store_dcr (void)
1232 {
1233 if (unlikely(env->dcr_env == NULL)) {
1234 if (loglevel != 0) {
1235 fprintf(logfile, "No DCR environment\n");
1236 }
1237 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1238 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1239 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1240 if (loglevel != 0) {
1241 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1242 }
1243 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1244 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1245 }
1246 }
1247
1248 #if !defined(CONFIG_USER_ONLY)
1249 void do_40x_rfci (void)
1250 {
1251 env->nip = env->spr[SPR_40x_SRR2];
1252 do_store_msr(env, env->spr[SPR_40x_SRR3] & ~0xFFFF0000);
1253 #if defined (DEBUG_OP)
1254 cpu_dump_rfi(env->nip, do_load_msr(env));
1255 #endif
1256 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1257 }
1258
1259 void do_rfci (void)
1260 {
1261 #if defined(TARGET_PPC64)
1262 if (env->spr[SPR_BOOKE_CSRR1] & (1 << MSR_CM)) {
1263 env->nip = (uint64_t)env->spr[SPR_BOOKE_CSRR0];
1264 } else
1265 #endif
1266 {
1267 env->nip = (uint32_t)env->spr[SPR_BOOKE_CSRR0];
1268 }
1269 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_CSRR1] & ~0x3FFF0000);
1270 #if defined (DEBUG_OP)
1271 cpu_dump_rfi(env->nip, do_load_msr(env));
1272 #endif
1273 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1274 }
1275
1276 void do_rfdi (void)
1277 {
1278 #if defined(TARGET_PPC64)
1279 if (env->spr[SPR_BOOKE_DSRR1] & (1 << MSR_CM)) {
1280 env->nip = (uint64_t)env->spr[SPR_BOOKE_DSRR0];
1281 } else
1282 #endif
1283 {
1284 env->nip = (uint32_t)env->spr[SPR_BOOKE_DSRR0];
1285 }
1286 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_DSRR1] & ~0x3FFF0000);
1287 #if defined (DEBUG_OP)
1288 cpu_dump_rfi(env->nip, do_load_msr(env));
1289 #endif
1290 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1291 }
1292
1293 void do_rfmci (void)
1294 {
1295 #if defined(TARGET_PPC64)
1296 if (env->spr[SPR_BOOKE_MCSRR1] & (1 << MSR_CM)) {
1297 env->nip = (uint64_t)env->spr[SPR_BOOKE_MCSRR0];
1298 } else
1299 #endif
1300 {
1301 env->nip = (uint32_t)env->spr[SPR_BOOKE_MCSRR0];
1302 }
1303 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_MCSRR1] & ~0x3FFF0000);
1304 #if defined (DEBUG_OP)
1305 cpu_dump_rfi(env->nip, do_load_msr(env));
1306 #endif
1307 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1308 }
1309
1310 void do_load_403_pb (int num)
1311 {
1312 T0 = env->pb[num];
1313 }
1314
1315 void do_store_403_pb (int num)
1316 {
1317 if (likely(env->pb[num] != T0)) {
1318 env->pb[num] = T0;
1319 /* Should be optimized */
1320 tlb_flush(env, 1);
1321 }
1322 }
1323 #endif
1324
1325 /* 440 specific */
1326 void do_440_dlmzb (void)
1327 {
1328 target_ulong mask;
1329 int i;
1330
1331 i = 1;
1332 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1333 if ((T0 & mask) == 0)
1334 goto done;
1335 i++;
1336 }
1337 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1338 if ((T1 & mask) == 0)
1339 break;
1340 i++;
1341 }
1342 done:
1343 T0 = i;
1344 }
1345
1346 #if defined(TARGET_PPCEMB)
1347 /* SPE extension helpers */
1348 /* Use a table to make this quicker */
1349 static uint8_t hbrev[16] = {
1350 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1351 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1352 };
1353
1354 static inline uint8_t byte_reverse (uint8_t val)
1355 {
1356 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1357 }
1358
1359 static inline uint32_t word_reverse (uint32_t val)
1360 {
1361 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1362 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1363 }
1364
1365 #define MASKBITS 16 // Random value - to be fixed
1366 void do_brinc (void)
1367 {
1368 uint32_t a, b, d, mask;
1369
1370 mask = (uint32_t)(-1UL) >> MASKBITS;
1371 b = T1_64 & mask;
1372 a = T0_64 & mask;
1373 d = word_reverse(1 + word_reverse(a | ~mask));
1374 T0_64 = (T0_64 & ~mask) | (d & mask);
1375 }
1376
1377 #define DO_SPE_OP2(name) \
1378 void do_ev##name (void) \
1379 { \
1380 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1381 (uint64_t)_do_e##name(T0_64, T1_64); \
1382 }
1383
1384 #define DO_SPE_OP1(name) \
1385 void do_ev##name (void) \
1386 { \
1387 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1388 (uint64_t)_do_e##name(T0_64); \
1389 }
1390
1391 /* Fixed-point vector arithmetic */
1392 static inline uint32_t _do_eabs (uint32_t val)
1393 {
1394 if (val != 0x80000000)
1395 val &= ~0x80000000;
1396
1397 return val;
1398 }
1399
1400 static inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1401 {
1402 return op1 + op2;
1403 }
1404
1405 static inline int _do_ecntlsw (uint32_t val)
1406 {
1407 if (val & 0x80000000)
1408 return _do_cntlzw(~val);
1409 else
1410 return _do_cntlzw(val);
1411 }
1412
1413 static inline int _do_ecntlzw (uint32_t val)
1414 {
1415 return _do_cntlzw(val);
1416 }
1417
1418 static inline uint32_t _do_eneg (uint32_t val)
1419 {
1420 if (val != 0x80000000)
1421 val ^= 0x80000000;
1422
1423 return val;
1424 }
1425
1426 static inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1427 {
1428 return rotl32(op1, op2);
1429 }
1430
1431 static inline uint32_t _do_erndw (uint32_t val)
1432 {
1433 return (val + 0x000080000000) & 0xFFFF0000;
1434 }
1435
1436 static inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1437 {
1438 /* No error here: 6 bits are used */
1439 return op1 << (op2 & 0x3F);
1440 }
1441
1442 static inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1443 {
1444 /* No error here: 6 bits are used */
1445 return op1 >> (op2 & 0x3F);
1446 }
1447
1448 static inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1449 {
1450 /* No error here: 6 bits are used */
1451 return op1 >> (op2 & 0x3F);
1452 }
1453
1454 static inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1455 {
1456 return op2 - op1;
1457 }
1458
1459 /* evabs */
1460 DO_SPE_OP1(abs);
1461 /* evaddw */
1462 DO_SPE_OP2(addw);
1463 /* evcntlsw */
1464 DO_SPE_OP1(cntlsw);
1465 /* evcntlzw */
1466 DO_SPE_OP1(cntlzw);
1467 /* evneg */
1468 DO_SPE_OP1(neg);
1469 /* evrlw */
1470 DO_SPE_OP2(rlw);
1471 /* evrnd */
1472 DO_SPE_OP1(rndw);
1473 /* evslw */
1474 DO_SPE_OP2(slw);
1475 /* evsrws */
1476 DO_SPE_OP2(srws);
1477 /* evsrwu */
1478 DO_SPE_OP2(srwu);
1479 /* evsubfw */
1480 DO_SPE_OP2(subfw);
1481
1482 /* evsel is a little bit more complicated... */
1483 static inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
1484 {
1485 if (n)
1486 return op1;
1487 else
1488 return op2;
1489 }
1490
1491 void do_evsel (void)
1492 {
1493 T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
1494 (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
1495 }
1496
1497 /* Fixed-point vector comparisons */
1498 #define DO_SPE_CMP(name) \
1499 void do_ev##name (void) \
1500 { \
1501 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
1502 T1_64 >> 32) << 32, \
1503 _do_e##name(T0_64, T1_64)); \
1504 }
1505
1506 static inline uint32_t _do_evcmp_merge (int t0, int t1)
1507 {
1508 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1509 }
1510 static inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
1511 {
1512 return op1 == op2 ? 1 : 0;
1513 }
1514
1515 static inline int _do_ecmpgts (int32_t op1, int32_t op2)
1516 {
1517 return op1 > op2 ? 1 : 0;
1518 }
1519
1520 static inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
1521 {
1522 return op1 > op2 ? 1 : 0;
1523 }
1524
1525 static inline int _do_ecmplts (int32_t op1, int32_t op2)
1526 {
1527 return op1 < op2 ? 1 : 0;
1528 }
1529
1530 static inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
1531 {
1532 return op1 < op2 ? 1 : 0;
1533 }
1534
1535 /* evcmpeq */
1536 DO_SPE_CMP(cmpeq);
1537 /* evcmpgts */
1538 DO_SPE_CMP(cmpgts);
1539 /* evcmpgtu */
1540 DO_SPE_CMP(cmpgtu);
1541 /* evcmplts */
1542 DO_SPE_CMP(cmplts);
1543 /* evcmpltu */
1544 DO_SPE_CMP(cmpltu);
1545
1546 /* Single precision floating-point conversions from/to integer */
1547 static inline uint32_t _do_efscfsi (int32_t val)
1548 {
1549 union {
1550 uint32_t u;
1551 float32 f;
1552 } u;
1553
1554 u.f = int32_to_float32(val, &env->spe_status);
1555
1556 return u.u;
1557 }
1558
1559 static inline uint32_t _do_efscfui (uint32_t val)
1560 {
1561 union {
1562 uint32_t u;
1563 float32 f;
1564 } u;
1565
1566 u.f = uint32_to_float32(val, &env->spe_status);
1567
1568 return u.u;
1569 }
1570
1571 static inline int32_t _do_efsctsi (uint32_t val)
1572 {
1573 union {
1574 int32_t u;
1575 float32 f;
1576 } u;
1577
1578 u.u = val;
1579 /* NaN are not treated the same way IEEE 754 does */
1580 if (unlikely(isnan(u.f)))
1581 return 0;
1582
1583 return float32_to_int32(u.f, &env->spe_status);
1584 }
1585
1586 static inline uint32_t _do_efsctui (uint32_t val)
1587 {
1588 union {
1589 int32_t u;
1590 float32 f;
1591 } u;
1592
1593 u.u = val;
1594 /* NaN are not treated the same way IEEE 754 does */
1595 if (unlikely(isnan(u.f)))
1596 return 0;
1597
1598 return float32_to_uint32(u.f, &env->spe_status);
1599 }
1600
1601 static inline int32_t _do_efsctsiz (uint32_t val)
1602 {
1603 union {
1604 int32_t u;
1605 float32 f;
1606 } u;
1607
1608 u.u = val;
1609 /* NaN are not treated the same way IEEE 754 does */
1610 if (unlikely(isnan(u.f)))
1611 return 0;
1612
1613 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1614 }
1615
1616 static inline uint32_t _do_efsctuiz (uint32_t val)
1617 {
1618 union {
1619 int32_t u;
1620 float32 f;
1621 } u;
1622
1623 u.u = val;
1624 /* NaN are not treated the same way IEEE 754 does */
1625 if (unlikely(isnan(u.f)))
1626 return 0;
1627
1628 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1629 }
1630
1631 void do_efscfsi (void)
1632 {
1633 T0_64 = _do_efscfsi(T0_64);
1634 }
1635
1636 void do_efscfui (void)
1637 {
1638 T0_64 = _do_efscfui(T0_64);
1639 }
1640
1641 void do_efsctsi (void)
1642 {
1643 T0_64 = _do_efsctsi(T0_64);
1644 }
1645
1646 void do_efsctui (void)
1647 {
1648 T0_64 = _do_efsctui(T0_64);
1649 }
1650
1651 void do_efsctsiz (void)
1652 {
1653 T0_64 = _do_efsctsiz(T0_64);
1654 }
1655
1656 void do_efsctuiz (void)
1657 {
1658 T0_64 = _do_efsctuiz(T0_64);
1659 }
1660
1661 /* Single precision floating-point conversion to/from fractional */
1662 static inline uint32_t _do_efscfsf (uint32_t val)
1663 {
1664 union {
1665 uint32_t u;
1666 float32 f;
1667 } u;
1668 float32 tmp;
1669
1670 u.f = int32_to_float32(val, &env->spe_status);
1671 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1672 u.f = float32_div(u.f, tmp, &env->spe_status);
1673
1674 return u.u;
1675 }
1676
1677 static inline uint32_t _do_efscfuf (uint32_t val)
1678 {
1679 union {
1680 uint32_t u;
1681 float32 f;
1682 } u;
1683 float32 tmp;
1684
1685 u.f = uint32_to_float32(val, &env->spe_status);
1686 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1687 u.f = float32_div(u.f, tmp, &env->spe_status);
1688
1689 return u.u;
1690 }
1691
1692 static inline int32_t _do_efsctsf (uint32_t val)
1693 {
1694 union {
1695 int32_t u;
1696 float32 f;
1697 } u;
1698 float32 tmp;
1699
1700 u.u = val;
1701 /* NaN are not treated the same way IEEE 754 does */
1702 if (unlikely(isnan(u.f)))
1703 return 0;
1704 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1705 u.f = float32_mul(u.f, tmp, &env->spe_status);
1706
1707 return float32_to_int32(u.f, &env->spe_status);
1708 }
1709
1710 static inline uint32_t _do_efsctuf (uint32_t val)
1711 {
1712 union {
1713 int32_t u;
1714 float32 f;
1715 } u;
1716 float32 tmp;
1717
1718 u.u = val;
1719 /* NaN are not treated the same way IEEE 754 does */
1720 if (unlikely(isnan(u.f)))
1721 return 0;
1722 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1723 u.f = float32_mul(u.f, tmp, &env->spe_status);
1724
1725 return float32_to_uint32(u.f, &env->spe_status);
1726 }
1727
1728 static inline int32_t _do_efsctsfz (uint32_t val)
1729 {
1730 union {
1731 int32_t u;
1732 float32 f;
1733 } u;
1734 float32 tmp;
1735
1736 u.u = val;
1737 /* NaN are not treated the same way IEEE 754 does */
1738 if (unlikely(isnan(u.f)))
1739 return 0;
1740 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1741 u.f = float32_mul(u.f, tmp, &env->spe_status);
1742
1743 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1744 }
1745
1746 static inline uint32_t _do_efsctufz (uint32_t val)
1747 {
1748 union {
1749 int32_t u;
1750 float32 f;
1751 } u;
1752 float32 tmp;
1753
1754 u.u = val;
1755 /* NaN are not treated the same way IEEE 754 does */
1756 if (unlikely(isnan(u.f)))
1757 return 0;
1758 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1759 u.f = float32_mul(u.f, tmp, &env->spe_status);
1760
1761 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1762 }
1763
1764 void do_efscfsf (void)
1765 {
1766 T0_64 = _do_efscfsf(T0_64);
1767 }
1768
1769 void do_efscfuf (void)
1770 {
1771 T0_64 = _do_efscfuf(T0_64);
1772 }
1773
1774 void do_efsctsf (void)
1775 {
1776 T0_64 = _do_efsctsf(T0_64);
1777 }
1778
1779 void do_efsctuf (void)
1780 {
1781 T0_64 = _do_efsctuf(T0_64);
1782 }
1783
1784 void do_efsctsfz (void)
1785 {
1786 T0_64 = _do_efsctsfz(T0_64);
1787 }
1788
1789 void do_efsctufz (void)
1790 {
1791 T0_64 = _do_efsctufz(T0_64);
1792 }
1793
1794 /* Double precision floating point helpers */
1795 static inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
1796 {
1797 /* XXX: TODO: test special values (NaN, infinites, ...) */
1798 return _do_efdtstlt(op1, op2);
1799 }
1800
1801 static inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
1802 {
1803 /* XXX: TODO: test special values (NaN, infinites, ...) */
1804 return _do_efdtstgt(op1, op2);
1805 }
1806
1807 static inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
1808 {
1809 /* XXX: TODO: test special values (NaN, infinites, ...) */
1810 return _do_efdtsteq(op1, op2);
1811 }
1812
1813 void do_efdcmplt (void)
1814 {
1815 T0 = _do_efdcmplt(T0_64, T1_64);
1816 }
1817
1818 void do_efdcmpgt (void)
1819 {
1820 T0 = _do_efdcmpgt(T0_64, T1_64);
1821 }
1822
1823 void do_efdcmpeq (void)
1824 {
1825 T0 = _do_efdcmpeq(T0_64, T1_64);
1826 }
1827
1828 /* Double precision floating-point conversion to/from integer */
1829 static inline uint64_t _do_efdcfsi (int64_t val)
1830 {
1831 union {
1832 uint64_t u;
1833 float64 f;
1834 } u;
1835
1836 u.f = int64_to_float64(val, &env->spe_status);
1837
1838 return u.u;
1839 }
1840
1841 static inline uint64_t _do_efdcfui (uint64_t val)
1842 {
1843 union {
1844 uint64_t u;
1845 float64 f;
1846 } u;
1847
1848 u.f = uint64_to_float64(val, &env->spe_status);
1849
1850 return u.u;
1851 }
1852
1853 static inline int64_t _do_efdctsi (uint64_t val)
1854 {
1855 union {
1856 int64_t u;
1857 float64 f;
1858 } u;
1859
1860 u.u = val;
1861 /* NaN are not treated the same way IEEE 754 does */
1862 if (unlikely(isnan(u.f)))
1863 return 0;
1864
1865 return float64_to_int64(u.f, &env->spe_status);
1866 }
1867
1868 static inline uint64_t _do_efdctui (uint64_t val)
1869 {
1870 union {
1871 int64_t u;
1872 float64 f;
1873 } u;
1874
1875 u.u = val;
1876 /* NaN are not treated the same way IEEE 754 does */
1877 if (unlikely(isnan(u.f)))
1878 return 0;
1879
1880 return float64_to_uint64(u.f, &env->spe_status);
1881 }
1882
1883 static inline int64_t _do_efdctsiz (uint64_t val)
1884 {
1885 union {
1886 int64_t u;
1887 float64 f;
1888 } u;
1889
1890 u.u = val;
1891 /* NaN are not treated the same way IEEE 754 does */
1892 if (unlikely(isnan(u.f)))
1893 return 0;
1894
1895 return float64_to_int64_round_to_zero(u.f, &env->spe_status);
1896 }
1897
1898 static inline uint64_t _do_efdctuiz (uint64_t val)
1899 {
1900 union {
1901 int64_t u;
1902 float64 f;
1903 } u;
1904
1905 u.u = val;
1906 /* NaN are not treated the same way IEEE 754 does */
1907 if (unlikely(isnan(u.f)))
1908 return 0;
1909
1910 return float64_to_uint64_round_to_zero(u.f, &env->spe_status);
1911 }
1912
1913 void do_efdcfsi (void)
1914 {
1915 T0_64 = _do_efdcfsi(T0_64);
1916 }
1917
1918 void do_efdcfui (void)
1919 {
1920 T0_64 = _do_efdcfui(T0_64);
1921 }
1922
1923 void do_efdctsi (void)
1924 {
1925 T0_64 = _do_efdctsi(T0_64);
1926 }
1927
1928 void do_efdctui (void)
1929 {
1930 T0_64 = _do_efdctui(T0_64);
1931 }
1932
1933 void do_efdctsiz (void)
1934 {
1935 T0_64 = _do_efdctsiz(T0_64);
1936 }
1937
1938 void do_efdctuiz (void)
1939 {
1940 T0_64 = _do_efdctuiz(T0_64);
1941 }
1942
1943 /* Double precision floating-point conversion to/from fractional */
1944 static inline uint64_t _do_efdcfsf (int64_t val)
1945 {
1946 union {
1947 uint64_t u;
1948 float64 f;
1949 } u;
1950 float64 tmp;
1951
1952 u.f = int32_to_float64(val, &env->spe_status);
1953 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1954 u.f = float64_div(u.f, tmp, &env->spe_status);
1955
1956 return u.u;
1957 }
1958
1959 static inline uint64_t _do_efdcfuf (uint64_t val)
1960 {
1961 union {
1962 uint64_t u;
1963 float64 f;
1964 } u;
1965 float64 tmp;
1966
1967 u.f = uint32_to_float64(val, &env->spe_status);
1968 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1969 u.f = float64_div(u.f, tmp, &env->spe_status);
1970
1971 return u.u;
1972 }
1973
1974 static inline int64_t _do_efdctsf (uint64_t val)
1975 {
1976 union {
1977 int64_t u;
1978 float64 f;
1979 } u;
1980 float64 tmp;
1981
1982 u.u = val;
1983 /* NaN are not treated the same way IEEE 754 does */
1984 if (unlikely(isnan(u.f)))
1985 return 0;
1986 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1987 u.f = float64_mul(u.f, tmp, &env->spe_status);
1988
1989 return float64_to_int32(u.f, &env->spe_status);
1990 }
1991
1992 static inline uint64_t _do_efdctuf (uint64_t val)
1993 {
1994 union {
1995 int64_t u;
1996 float64 f;
1997 } u;
1998 float64 tmp;
1999
2000 u.u = val;
2001 /* NaN are not treated the same way IEEE 754 does */
2002 if (unlikely(isnan(u.f)))
2003 return 0;
2004 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2005 u.f = float64_mul(u.f, tmp, &env->spe_status);
2006
2007 return float64_to_uint32(u.f, &env->spe_status);
2008 }
2009
2010 static inline int64_t _do_efdctsfz (uint64_t val)
2011 {
2012 union {
2013 int64_t u;
2014 float64 f;
2015 } u;
2016 float64 tmp;
2017
2018 u.u = val;
2019 /* NaN are not treated the same way IEEE 754 does */
2020 if (unlikely(isnan(u.f)))
2021 return 0;
2022 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2023 u.f = float64_mul(u.f, tmp, &env->spe_status);
2024
2025 return float64_to_int32_round_to_zero(u.f, &env->spe_status);
2026 }
2027
2028 static inline uint64_t _do_efdctufz (uint64_t val)
2029 {
2030 union {
2031 int64_t u;
2032 float64 f;
2033 } u;
2034 float64 tmp;
2035
2036 u.u = val;
2037 /* NaN are not treated the same way IEEE 754 does */
2038 if (unlikely(isnan(u.f)))
2039 return 0;
2040 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2041 u.f = float64_mul(u.f, tmp, &env->spe_status);
2042
2043 return float64_to_uint32_round_to_zero(u.f, &env->spe_status);
2044 }
2045
2046 void do_efdcfsf (void)
2047 {
2048 T0_64 = _do_efdcfsf(T0_64);
2049 }
2050
2051 void do_efdcfuf (void)
2052 {
2053 T0_64 = _do_efdcfuf(T0_64);
2054 }
2055
2056 void do_efdctsf (void)
2057 {
2058 T0_64 = _do_efdctsf(T0_64);
2059 }
2060
2061 void do_efdctuf (void)
2062 {
2063 T0_64 = _do_efdctuf(T0_64);
2064 }
2065
2066 void do_efdctsfz (void)
2067 {
2068 T0_64 = _do_efdctsfz(T0_64);
2069 }
2070
2071 void do_efdctufz (void)
2072 {
2073 T0_64 = _do_efdctufz(T0_64);
2074 }
2075
2076 /* Floating point conversion between single and double precision */
2077 static inline uint32_t _do_efscfd (uint64_t val)
2078 {
2079 union {
2080 uint64_t u;
2081 float64 f;
2082 } u1;
2083 union {
2084 uint32_t u;
2085 float32 f;
2086 } u2;
2087
2088 u1.u = val;
2089 u2.f = float64_to_float32(u1.f, &env->spe_status);
2090
2091 return u2.u;
2092 }
2093
2094 static inline uint64_t _do_efdcfs (uint32_t val)
2095 {
2096 union {
2097 uint64_t u;
2098 float64 f;
2099 } u2;
2100 union {
2101 uint32_t u;
2102 float32 f;
2103 } u1;
2104
2105 u1.u = val;
2106 u2.f = float32_to_float64(u1.f, &env->spe_status);
2107
2108 return u2.u;
2109 }
2110
2111 void do_efscfd (void)
2112 {
2113 T0_64 = _do_efscfd(T0_64);
2114 }
2115
2116 void do_efdcfs (void)
2117 {
2118 T0_64 = _do_efdcfs(T0_64);
2119 }
2120
2121 /* Single precision fixed-point vector arithmetic */
2122 /* evfsabs */
2123 DO_SPE_OP1(fsabs);
2124 /* evfsnabs */
2125 DO_SPE_OP1(fsnabs);
2126 /* evfsneg */
2127 DO_SPE_OP1(fsneg);
2128 /* evfsadd */
2129 DO_SPE_OP2(fsadd);
2130 /* evfssub */
2131 DO_SPE_OP2(fssub);
2132 /* evfsmul */
2133 DO_SPE_OP2(fsmul);
2134 /* evfsdiv */
2135 DO_SPE_OP2(fsdiv);
2136
2137 /* Single-precision floating-point comparisons */
2138 static inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2139 {
2140 /* XXX: TODO: test special values (NaN, infinites, ...) */
2141 return _do_efststlt(op1, op2);
2142 }
2143
2144 static inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2145 {
2146 /* XXX: TODO: test special values (NaN, infinites, ...) */
2147 return _do_efststgt(op1, op2);
2148 }
2149
2150 static inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2151 {
2152 /* XXX: TODO: test special values (NaN, infinites, ...) */
2153 return _do_efststeq(op1, op2);
2154 }
2155
2156 void do_efscmplt (void)
2157 {
2158 T0 = _do_efscmplt(T0_64, T1_64);
2159 }
2160
2161 void do_efscmpgt (void)
2162 {
2163 T0 = _do_efscmpgt(T0_64, T1_64);
2164 }
2165
2166 void do_efscmpeq (void)
2167 {
2168 T0 = _do_efscmpeq(T0_64, T1_64);
2169 }
2170
2171 /* Single-precision floating-point vector comparisons */
2172 /* evfscmplt */
2173 DO_SPE_CMP(fscmplt);
2174 /* evfscmpgt */
2175 DO_SPE_CMP(fscmpgt);
2176 /* evfscmpeq */
2177 DO_SPE_CMP(fscmpeq);
2178 /* evfststlt */
2179 DO_SPE_CMP(fststlt);
2180 /* evfststgt */
2181 DO_SPE_CMP(fststgt);
2182 /* evfststeq */
2183 DO_SPE_CMP(fststeq);
2184
2185 /* Single-precision floating-point vector conversions */
2186 /* evfscfsi */
2187 DO_SPE_OP1(fscfsi);
2188 /* evfscfui */
2189 DO_SPE_OP1(fscfui);
2190 /* evfscfuf */
2191 DO_SPE_OP1(fscfuf);
2192 /* evfscfsf */
2193 DO_SPE_OP1(fscfsf);
2194 /* evfsctsi */
2195 DO_SPE_OP1(fsctsi);
2196 /* evfsctui */
2197 DO_SPE_OP1(fsctui);
2198 /* evfsctsiz */
2199 DO_SPE_OP1(fsctsiz);
2200 /* evfsctuiz */
2201 DO_SPE_OP1(fsctuiz);
2202 /* evfsctsf */
2203 DO_SPE_OP1(fsctsf);
2204 /* evfsctuf */
2205 DO_SPE_OP1(fsctuf);
2206 #endif /* defined(TARGET_PPCEMB) */
2207
2208 /*****************************************************************************/
2209 /* Softmmu support */
2210 #if !defined (CONFIG_USER_ONLY)
2211
2212 #define MMUSUFFIX _mmu
2213 #define GETPC() (__builtin_return_address(0))
2214
2215 #define SHIFT 0
2216 #include "softmmu_template.h"
2217
2218 #define SHIFT 1
2219 #include "softmmu_template.h"
2220
2221 #define SHIFT 2
2222 #include "softmmu_template.h"
2223
2224 #define SHIFT 3
2225 #include "softmmu_template.h"
2226
2227 /* try to fill the TLB and return an exception if error. If retaddr is
2228 NULL, it means that the function was called in C code (i.e. not
2229 from generated code or from helper.c) */
2230 /* XXX: fix it to restore all registers */
2231 void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
2232 {
2233 TranslationBlock *tb;
2234 CPUState *saved_env;
2235 target_phys_addr_t pc;
2236 int ret;
2237
2238 /* XXX: hack to restore env in all cases, even if not called from
2239 generated code */
2240 saved_env = env;
2241 env = cpu_single_env;
2242 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, is_user, 1);
2243 if (unlikely(ret != 0)) {
2244 if (likely(retaddr)) {
2245 /* now we have a real cpu fault */
2246 pc = (target_phys_addr_t)(unsigned long)retaddr;
2247 tb = tb_find_pc(pc);
2248 if (likely(tb)) {
2249 /* the PC is inside the translated code. It means that we have
2250 a virtual CPU fault */
2251 cpu_restore_state(tb, env, pc, NULL);
2252 }
2253 }
2254 do_raise_exception_err(env->exception_index, env->error_code);
2255 }
2256 env = saved_env;
2257 }
2258
2259 /* TLB invalidation helpers */
2260 void do_tlbia (void)
2261 {
2262 ppc_tlb_invalidate_all(env);
2263 }
2264
2265 void do_tlbie (void)
2266 {
2267 T0 = (uint32_t)T0;
2268 #if !defined(FLUSH_ALL_TLBS)
2269 /* XXX: Remove thoses tests */
2270 if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx)) {
2271 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
2272 if (env->id_tlbs == 1)
2273 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
2274 } else if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_4xx)) {
2275 ppc4xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
2276 env->spr[SPR_40x_PID]);
2277 } else {
2278 /* tlbie invalidate TLBs for all segments */
2279 T0 &= TARGET_PAGE_MASK;
2280 T0 &= ~((target_ulong)-1 << 28);
2281 /* XXX: this case should be optimized,
2282 * giving a mask to tlb_flush_page
2283 */
2284 tlb_flush_page(env, T0 | (0x0 << 28));
2285 tlb_flush_page(env, T0 | (0x1 << 28));
2286 tlb_flush_page(env, T0 | (0x2 << 28));
2287 tlb_flush_page(env, T0 | (0x3 << 28));
2288 tlb_flush_page(env, T0 | (0x4 << 28));
2289 tlb_flush_page(env, T0 | (0x5 << 28));
2290 tlb_flush_page(env, T0 | (0x6 << 28));
2291 tlb_flush_page(env, T0 | (0x7 << 28));
2292 tlb_flush_page(env, T0 | (0x8 << 28));
2293 tlb_flush_page(env, T0 | (0x9 << 28));
2294 tlb_flush_page(env, T0 | (0xA << 28));
2295 tlb_flush_page(env, T0 | (0xB << 28));
2296 tlb_flush_page(env, T0 | (0xC << 28));
2297 tlb_flush_page(env, T0 | (0xD << 28));
2298 tlb_flush_page(env, T0 | (0xE << 28));
2299 tlb_flush_page(env, T0 | (0xF << 28));
2300 }
2301 #else
2302 do_tlbia();
2303 #endif
2304 }
2305
2306 #if defined(TARGET_PPC64)
2307 void do_tlbie_64 (void)
2308 {
2309 T0 = (uint64_t)T0;
2310 #if !defined(FLUSH_ALL_TLBS)
2311 if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_6xx)) {
2312 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
2313 if (env->id_tlbs == 1)
2314 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
2315 } else if (unlikely(env->mmu_model == POWERPC_MMU_SOFT_4xx)) {
2316 /* XXX: TODO */
2317 #if 0
2318 ppcbooke_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
2319 env->spr[SPR_BOOKE_PID]);
2320 #endif
2321 } else {
2322 /* tlbie invalidate TLBs for all segments
2323 * As we have 2^36 segments, invalidate all qemu TLBs
2324 */
2325 #if 0
2326 T0 &= TARGET_PAGE_MASK;
2327 T0 &= ~((target_ulong)-1 << 28);
2328 /* XXX: this case should be optimized,
2329 * giving a mask to tlb_flush_page
2330 */
2331 tlb_flush_page(env, T0 | (0x0 << 28));
2332 tlb_flush_page(env, T0 | (0x1 << 28));
2333 tlb_flush_page(env, T0 | (0x2 << 28));
2334 tlb_flush_page(env, T0 | (0x3 << 28));
2335 tlb_flush_page(env, T0 | (0x4 << 28));
2336 tlb_flush_page(env, T0 | (0x5 << 28));
2337 tlb_flush_page(env, T0 | (0x6 << 28));
2338 tlb_flush_page(env, T0 | (0x7 << 28));
2339 tlb_flush_page(env, T0 | (0x8 << 28));
2340 tlb_flush_page(env, T0 | (0x9 << 28));
2341 tlb_flush_page(env, T0 | (0xA << 28));
2342 tlb_flush_page(env, T0 | (0xB << 28));
2343 tlb_flush_page(env, T0 | (0xC << 28));
2344 tlb_flush_page(env, T0 | (0xD << 28));
2345 tlb_flush_page(env, T0 | (0xE << 28));
2346 tlb_flush_page(env, T0 | (0xF << 28));
2347 #else
2348 tlb_flush(env, 1);
2349 #endif
2350 }
2351 #else
2352 do_tlbia();
2353 #endif
2354 }
2355 #endif
2356
2357 #if defined(TARGET_PPC64)
2358 void do_slbia (void)
2359 {
2360 /* XXX: TODO */
2361 tlb_flush(env, 1);
2362 }
2363
2364 void do_slbie (void)
2365 {
2366 /* XXX: TODO */
2367 tlb_flush(env, 1);
2368 }
2369 #endif
2370
2371 /* Software driven TLBs management */
2372 /* PowerPC 602/603 software TLB load instructions helpers */
2373 void do_load_6xx_tlb (int is_code)
2374 {
2375 target_ulong RPN, CMP, EPN;
2376 int way;
2377
2378 RPN = env->spr[SPR_RPA];
2379 if (is_code) {
2380 CMP = env->spr[SPR_ICMP];
2381 EPN = env->spr[SPR_IMISS];
2382 } else {
2383 CMP = env->spr[SPR_DCMP];
2384 EPN = env->spr[SPR_DMISS];
2385 }
2386 way = (env->spr[SPR_SRR1] >> 17) & 1;
2387 #if defined (DEBUG_SOFTWARE_TLB)
2388 if (loglevel != 0) {
2389 fprintf(logfile, "%s: EPN %08lx %08lx PTE0 %08lx PTE1 %08lx way %d\n",
2390 __func__, (unsigned long)T0, (unsigned long)EPN,
2391 (unsigned long)CMP, (unsigned long)RPN, way);
2392 }
2393 #endif
2394 /* Store this TLB */
2395 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2396 way, is_code, CMP, RPN);
2397 }
2398
2399 static target_ulong booke_tlb_to_page_size (int size)
2400 {
2401 return 1024 << (2 * size);
2402 }
2403
2404 static int booke_page_size_to_tlb (target_ulong page_size)
2405 {
2406 int size;
2407
2408 switch (page_size) {
2409 case 0x00000400UL:
2410 size = 0x0;
2411 break;
2412 case 0x00001000UL:
2413 size = 0x1;
2414 break;
2415 case 0x00004000UL:
2416 size = 0x2;
2417 break;
2418 case 0x00010000UL:
2419 size = 0x3;
2420 break;
2421 case 0x00040000UL:
2422 size = 0x4;
2423 break;
2424 case 0x00100000UL:
2425 size = 0x5;
2426 break;
2427 case 0x00400000UL:
2428 size = 0x6;
2429 break;
2430 case 0x01000000UL:
2431 size = 0x7;
2432 break;
2433 case 0x04000000UL:
2434 size = 0x8;
2435 break;
2436 case 0x10000000UL:
2437 size = 0x9;
2438 break;
2439 case 0x40000000UL:
2440 size = 0xA;
2441 break;
2442 #if defined (TARGET_PPC64)
2443 case 0x000100000000ULL:
2444 size = 0xB;
2445 break;
2446 case 0x000400000000ULL:
2447 size = 0xC;
2448 break;
2449 case 0x001000000000ULL:
2450 size = 0xD;
2451 break;
2452 case 0x004000000000ULL:
2453 size = 0xE;
2454 break;
2455 case 0x010000000000ULL:
2456 size = 0xF;
2457 break;
2458 #endif
2459 default:
2460 size = -1;
2461 break;
2462 }
2463
2464 return size;
2465 }
2466
2467 /* Helpers for 4xx TLB management */
2468 void do_4xx_tlbre_lo (void)
2469 {
2470 ppcemb_tlb_t *tlb;
2471 int size;
2472
2473 T0 &= 0x3F;
2474 tlb = &env->tlb[T0].tlbe;
2475 T0 = tlb->EPN;
2476 if (tlb->prot & PAGE_VALID)
2477 T0 |= 0x400;
2478 size = booke_page_size_to_tlb(tlb->size);
2479 if (size < 0 || size > 0x7)
2480 size = 1;
2481 T0 |= size << 7;
2482 env->spr[SPR_40x_PID] = tlb->PID;
2483 }
2484
2485 void do_4xx_tlbre_hi (void)
2486 {
2487 ppcemb_tlb_t *tlb;
2488
2489 T0 &= 0x3F;
2490 tlb = &env->tlb[T0].tlbe;
2491 T0 = tlb->RPN;
2492 if (tlb->prot & PAGE_EXEC)
2493 T0 |= 0x200;
2494 if (tlb->prot & PAGE_WRITE)
2495 T0 |= 0x100;
2496 }
2497
2498 void do_4xx_tlbsx (void)
2499 {
2500 T0 = ppcemb_tlb_search(env, T0, env->spr[SPR_40x_PID]);
2501 }
2502
2503 void do_4xx_tlbsx_ (void)
2504 {
2505 int tmp = xer_so;
2506
2507 T0 = ppcemb_tlb_search(env, T0, env->spr[SPR_40x_PID]);
2508 if (T0 != -1)
2509 tmp |= 0x02;
2510 env->crf[0] = tmp;
2511 }
2512
2513 void do_4xx_tlbwe_hi (void)
2514 {
2515 ppcemb_tlb_t *tlb;
2516 target_ulong page, end;
2517
2518 #if defined (DEBUG_SOFTWARE_TLB)
2519 if (loglevel != 0) {
2520 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
2521 }
2522 #endif
2523 T0 &= 0x3F;
2524 tlb = &env->tlb[T0].tlbe;
2525 /* Invalidate previous TLB (if it's valid) */
2526 if (tlb->prot & PAGE_VALID) {
2527 end = tlb->EPN + tlb->size;
2528 #if defined (DEBUG_SOFTWARE_TLB)
2529 if (loglevel != 0) {
2530 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2531 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2532 }
2533 #endif
2534 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2535 tlb_flush_page(env, page);
2536 }
2537 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2538 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2539 * If this ever occurs, one should use the ppcemb target instead
2540 * of the ppc or ppc64 one
2541 */
2542 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2543 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2544 "are not supported (%d)\n",
2545 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2546 }
2547 tlb->EPN = T1 & ~(tlb->size - 1);
2548 if (T1 & 0x40)
2549 tlb->prot |= PAGE_VALID;
2550 else
2551 tlb->prot &= ~PAGE_VALID;
2552 if (T1 & 0x20) {
2553 /* XXX: TO BE FIXED */
2554 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2555 }
2556 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2557 tlb->attr = T1 & 0xFF;
2558 #if defined (DEBUG_SOFTWARE_TLB)
2559 if (loglevel != 0) {
2560 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2561 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2562 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2563 tlb->prot & PAGE_READ ? 'r' : '-',
2564 tlb->prot & PAGE_WRITE ? 'w' : '-',
2565 tlb->prot & PAGE_EXEC ? 'x' : '-',
2566 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2567 }
2568 #endif
2569 /* Invalidate new TLB (if valid) */
2570 if (tlb->prot & PAGE_VALID) {
2571 end = tlb->EPN + tlb->size;
2572 #if defined (DEBUG_SOFTWARE_TLB)
2573 if (loglevel != 0) {
2574 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2575 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2576 }
2577 #endif
2578 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2579 tlb_flush_page(env, page);
2580 }
2581 }
2582
2583 void do_4xx_tlbwe_lo (void)
2584 {
2585 ppcemb_tlb_t *tlb;
2586
2587 #if defined (DEBUG_SOFTWARE_TLB)
2588 if (loglevel != 0) {
2589 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
2590 }
2591 #endif
2592 T0 &= 0x3F;
2593 tlb = &env->tlb[T0].tlbe;
2594 tlb->RPN = T1 & 0xFFFFFC00;
2595 tlb->prot = PAGE_READ;
2596 if (T1 & 0x200)
2597 tlb->prot |= PAGE_EXEC;
2598 if (T1 & 0x100)
2599 tlb->prot |= PAGE_WRITE;
2600 #if defined (DEBUG_SOFTWARE_TLB)
2601 if (loglevel != 0) {
2602 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2603 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2604 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2605 tlb->prot & PAGE_READ ? 'r' : '-',
2606 tlb->prot & PAGE_WRITE ? 'w' : '-',
2607 tlb->prot & PAGE_EXEC ? 'x' : '-',
2608 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2609 }
2610 #endif
2611 }
2612
2613 /* PowerPC 440 TLB management */
2614 void do_440_tlbwe (int word)
2615 {
2616 ppcemb_tlb_t *tlb;
2617 target_ulong EPN, RPN, size;
2618 int do_flush_tlbs;
2619
2620 #if defined (DEBUG_SOFTWARE_TLB)
2621 if (loglevel != 0) {
2622 fprintf(logfile, "%s word %d T0 " REGX " T1 " REGX "\n",
2623 __func__, word, T0, T1);
2624 }
2625 #endif
2626 do_flush_tlbs = 0;
2627 T0 &= 0x3F;
2628 tlb = &env->tlb[T0].tlbe;
2629 switch (word) {
2630 default:
2631 /* Just here to please gcc */
2632 case 0:
2633 EPN = T1 & 0xFFFFFC00;
2634 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2635 do_flush_tlbs = 1;
2636 tlb->EPN = EPN;
2637 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2638 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2639 do_flush_tlbs = 1;
2640 tlb->size = size;
2641 tlb->attr &= ~0x1;
2642 tlb->attr |= (T1 >> 8) & 1;
2643 if (T1 & 0x200) {
2644 tlb->prot |= PAGE_VALID;
2645 } else {
2646 if (tlb->prot & PAGE_VALID) {
2647 tlb->prot &= ~PAGE_VALID;
2648 do_flush_tlbs = 1;
2649 }
2650 }
2651 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2652 if (do_flush_tlbs)
2653 tlb_flush(env, 1);
2654 break;
2655 case 1:
2656 RPN = T1 & 0xFFFFFC0F;
2657 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2658 tlb_flush(env, 1);
2659 tlb->RPN = RPN;
2660 break;
2661 case 2:
2662 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2663 tlb->prot = tlb->prot & PAGE_VALID;
2664 if (T1 & 0x1)
2665 tlb->prot |= PAGE_READ << 4;
2666 if (T1 & 0x2)
2667 tlb->prot |= PAGE_WRITE << 4;
2668 if (T1 & 0x4)
2669 tlb->prot |= PAGE_EXEC << 4;
2670 if (T1 & 0x8)
2671 tlb->prot |= PAGE_READ;
2672 if (T1 & 0x10)
2673 tlb->prot |= PAGE_WRITE;
2674 if (T1 & 0x20)
2675 tlb->prot |= PAGE_EXEC;
2676 break;
2677 }
2678 }
2679
2680 void do_440_tlbsx (void)
2681 {
2682 T0 = ppcemb_tlb_search(env, T0, env->spr[SPR_440_MMUCR] & 0xFF);
2683 }
2684
2685 void do_440_tlbsx_ (void)
2686 {
2687 int tmp = xer_so;
2688
2689 T0 = ppcemb_tlb_search(env, T0, env->spr[SPR_440_MMUCR] & 0xFF);
2690 if (T0 != -1)
2691 tmp |= 0x02;
2692 env->crf[0] = tmp;
2693 }
2694
2695 void do_440_tlbre (int word)
2696 {
2697 ppcemb_tlb_t *tlb;
2698 int size;
2699
2700 T0 &= 0x3F;
2701 tlb = &env->tlb[T0].tlbe;
2702 switch (word) {
2703 default:
2704 /* Just here to please gcc */
2705 case 0:
2706 T0 = tlb->EPN;
2707 size = booke_page_size_to_tlb(tlb->size);
2708 if (size < 0 || size > 0xF)
2709 size = 1;
2710 T0 |= size << 4;
2711 if (tlb->attr & 0x1)
2712 T0 |= 0x100;
2713 if (tlb->prot & PAGE_VALID)
2714 T0 |= 0x200;
2715 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2716 env->spr[SPR_440_MMUCR] |= tlb->PID;
2717 break;
2718 case 1:
2719 T0 = tlb->RPN;
2720 break;
2721 case 2:
2722 T0 = tlb->attr & ~0x1;
2723 if (tlb->prot & (PAGE_READ << 4))
2724 T0 |= 0x1;
2725 if (tlb->prot & (PAGE_WRITE << 4))
2726 T0 |= 0x2;
2727 if (tlb->prot & (PAGE_EXEC << 4))
2728 T0 |= 0x4;
2729 if (tlb->prot & PAGE_READ)
2730 T0 |= 0x8;
2731 if (tlb->prot & PAGE_WRITE)
2732 T0 |= 0x10;
2733 if (tlb->prot & PAGE_EXEC)
2734 T0 |= 0x20;
2735 break;
2736 }
2737 }
2738 #endif /* !CONFIG_USER_ONLY */