]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/op_helper.c
Debug traces fixes.
[mirror_qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21
22 #include "op_helper.h"
23
24 #define MEMSUFFIX _raw
25 #include "op_helper.h"
26 #include "op_helper_mem.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #define MEMSUFFIX _user
29 #include "op_helper.h"
30 #include "op_helper_mem.h"
31 #define MEMSUFFIX _kernel
32 #include "op_helper.h"
33 #include "op_helper_mem.h"
34 #endif
35
36 //#define DEBUG_OP
37 //#define DEBUG_EXCEPTIONS
38 //#define DEBUG_SOFTWARE_TLB
39 //#define FLUSH_ALL_TLBS
40
41 /*****************************************************************************/
42 /* Exceptions processing helpers */
43 void cpu_loop_exit (void)
44 {
45 longjmp(env->jmp_env, 1);
46 }
47
48 void do_raise_exception_err (uint32_t exception, int error_code)
49 {
50 #if 0
51 printf("Raise exception %3x code : %d\n", exception, error_code);
52 #endif
53 switch (exception) {
54 case EXCP_PROGRAM:
55 if (error_code == EXCP_FP && msr_fe0 == 0 && msr_fe1 == 0)
56 return;
57 break;
58 default:
59 break;
60 }
61 env->exception_index = exception;
62 env->error_code = error_code;
63 cpu_loop_exit();
64 }
65
66 void do_raise_exception (uint32_t exception)
67 {
68 do_raise_exception_err(exception, 0);
69 }
70
71 void cpu_dump_EA (target_ulong EA);
72 void do_print_mem_EA (target_ulong EA)
73 {
74 cpu_dump_EA(EA);
75 }
76
77 /*****************************************************************************/
78 /* Registers load and stores */
79 void do_load_cr (void)
80 {
81 T0 = (env->crf[0] << 28) |
82 (env->crf[1] << 24) |
83 (env->crf[2] << 20) |
84 (env->crf[3] << 16) |
85 (env->crf[4] << 12) |
86 (env->crf[5] << 8) |
87 (env->crf[6] << 4) |
88 (env->crf[7] << 0);
89 }
90
91 void do_store_cr (uint32_t mask)
92 {
93 int i, sh;
94
95 for (i = 0, sh = 7; i < 8; i++, sh --) {
96 if (mask & (1 << sh))
97 env->crf[i] = (T0 >> (sh * 4)) & 0xFUL;
98 }
99 }
100
101 void do_load_xer (void)
102 {
103 T0 = (xer_so << XER_SO) |
104 (xer_ov << XER_OV) |
105 (xer_ca << XER_CA) |
106 (xer_bc << XER_BC) |
107 (xer_cmp << XER_CMP);
108 }
109
110 void do_store_xer (void)
111 {
112 xer_so = (T0 >> XER_SO) & 0x01;
113 xer_ov = (T0 >> XER_OV) & 0x01;
114 xer_ca = (T0 >> XER_CA) & 0x01;
115 xer_cmp = (T0 >> XER_CMP) & 0xFF;
116 xer_bc = (T0 >> XER_BC) & 0x7F;
117 }
118
119 void do_load_fpscr (void)
120 {
121 /* The 32 MSB of the target fpr are undefined.
122 * They'll be zero...
123 */
124 union {
125 float64 d;
126 struct {
127 uint32_t u[2];
128 } s;
129 } u;
130 int i;
131
132 #if defined(WORDS_BIGENDIAN)
133 #define WORD0 0
134 #define WORD1 1
135 #else
136 #define WORD0 1
137 #define WORD1 0
138 #endif
139 u.s.u[WORD0] = 0;
140 u.s.u[WORD1] = 0;
141 for (i = 0; i < 8; i++)
142 u.s.u[WORD1] |= env->fpscr[i] << (4 * i);
143 FT0 = u.d;
144 }
145
146 void do_store_fpscr (uint32_t mask)
147 {
148 /*
149 * We use only the 32 LSB of the incoming fpr
150 */
151 union {
152 double d;
153 struct {
154 uint32_t u[2];
155 } s;
156 } u;
157 int i, rnd_type;
158
159 u.d = FT0;
160 if (mask & 0x80)
161 env->fpscr[0] = (env->fpscr[0] & 0x9) | ((u.s.u[WORD1] >> 28) & ~0x9);
162 for (i = 1; i < 7; i++) {
163 if (mask & (1 << (7 - i)))
164 env->fpscr[i] = (u.s.u[WORD1] >> (4 * (7 - i))) & 0xF;
165 }
166 /* TODO: update FEX & VX */
167 /* Set rounding mode */
168 switch (env->fpscr[0] & 0x3) {
169 case 0:
170 /* Best approximation (round to nearest) */
171 rnd_type = float_round_nearest_even;
172 break;
173 case 1:
174 /* Smaller magnitude (round toward zero) */
175 rnd_type = float_round_to_zero;
176 break;
177 case 2:
178 /* Round toward +infinite */
179 rnd_type = float_round_up;
180 break;
181 default:
182 case 3:
183 /* Round toward -infinite */
184 rnd_type = float_round_down;
185 break;
186 }
187 set_float_rounding_mode(rnd_type, &env->fp_status);
188 }
189
190 target_ulong ppc_load_dump_spr (int sprn)
191 {
192 if (loglevel != 0) {
193 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
194 sprn, sprn, env->spr[sprn]);
195 }
196
197 return env->spr[sprn];
198 }
199
200 void ppc_store_dump_spr (int sprn, target_ulong val)
201 {
202 if (loglevel != 0) {
203 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
204 sprn, sprn, env->spr[sprn], val);
205 }
206 env->spr[sprn] = val;
207 }
208
209 /*****************************************************************************/
210 /* Fixed point operations helpers */
211 #if defined(TARGET_PPC64)
212 static void add128 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
213 {
214 *plow += a;
215 /* carry test */
216 if (*plow < a)
217 (*phigh)++;
218 *phigh += b;
219 }
220
221 static void neg128 (uint64_t *plow, uint64_t *phigh)
222 {
223 *plow = ~ *plow;
224 *phigh = ~ *phigh;
225 add128(plow, phigh, 1, 0);
226 }
227
228 static void mul64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
229 {
230 uint32_t a0, a1, b0, b1;
231 uint64_t v;
232
233 a0 = a;
234 a1 = a >> 32;
235
236 b0 = b;
237 b1 = b >> 32;
238
239 v = (uint64_t)a0 * (uint64_t)b0;
240 *plow = v;
241 *phigh = 0;
242
243 v = (uint64_t)a0 * (uint64_t)b1;
244 add128(plow, phigh, v << 32, v >> 32);
245
246 v = (uint64_t)a1 * (uint64_t)b0;
247 add128(plow, phigh, v << 32, v >> 32);
248
249 v = (uint64_t)a1 * (uint64_t)b1;
250 *phigh += v;
251 #if defined(DEBUG_MULDIV)
252 printf("mul: 0x%016llx * 0x%016llx = 0x%016llx%016llx\n",
253 a, b, *phigh, *plow);
254 #endif
255 }
256
257 void do_mul64 (uint64_t *plow, uint64_t *phigh)
258 {
259 mul64(plow, phigh, T0, T1);
260 }
261
262 static void imul64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
263 {
264 int sa, sb;
265 sa = (a < 0);
266 if (sa)
267 a = -a;
268 sb = (b < 0);
269 if (sb)
270 b = -b;
271 mul64(plow, phigh, a, b);
272 if (sa ^ sb) {
273 neg128(plow, phigh);
274 }
275 }
276
277 void do_imul64 (uint64_t *plow, uint64_t *phigh)
278 {
279 imul64(plow, phigh, T0, T1);
280 }
281 #endif
282
283 void do_adde (void)
284 {
285 T2 = T0;
286 T0 += T1 + xer_ca;
287 if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
288 (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
289 xer_ca = 0;
290 } else {
291 xer_ca = 1;
292 }
293 }
294
295 #if defined(TARGET_PPC64)
296 void do_adde_64 (void)
297 {
298 T2 = T0;
299 T0 += T1 + xer_ca;
300 if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
301 (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
302 xer_ca = 0;
303 } else {
304 xer_ca = 1;
305 }
306 }
307 #endif
308
309 void do_addmeo (void)
310 {
311 T1 = T0;
312 T0 += xer_ca + (-1);
313 if (likely(!((uint32_t)T1 &
314 ((uint32_t)T1 ^ (uint32_t)T0) & (1UL << 31)))) {
315 xer_ov = 0;
316 } else {
317 xer_so = 1;
318 xer_ov = 1;
319 }
320 if (likely(T1 != 0))
321 xer_ca = 1;
322 }
323
324 #if defined(TARGET_PPC64)
325 void do_addmeo_64 (void)
326 {
327 T1 = T0;
328 T0 += xer_ca + (-1);
329 if (likely(!((uint64_t)T1 &
330 ((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) {
331 xer_ov = 0;
332 } else {
333 xer_so = 1;
334 xer_ov = 1;
335 }
336 if (likely(T1 != 0))
337 xer_ca = 1;
338 }
339 #endif
340
341 void do_divwo (void)
342 {
343 if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
344 (int32_t)T1 == 0))) {
345 xer_ov = 0;
346 T0 = (int32_t)T0 / (int32_t)T1;
347 } else {
348 xer_so = 1;
349 xer_ov = 1;
350 T0 = (-1) * ((uint32_t)T0 >> 31);
351 }
352 }
353
354 #if defined(TARGET_PPC64)
355 void do_divdo (void)
356 {
357 if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1ULL) ||
358 (int64_t)T1 == 0))) {
359 xer_ov = 0;
360 T0 = (int64_t)T0 / (int64_t)T1;
361 } else {
362 xer_so = 1;
363 xer_ov = 1;
364 T0 = (-1ULL) * ((uint64_t)T0 >> 63);
365 }
366 }
367 #endif
368
369 void do_divwuo (void)
370 {
371 if (likely((uint32_t)T1 != 0)) {
372 xer_ov = 0;
373 T0 = (uint32_t)T0 / (uint32_t)T1;
374 } else {
375 xer_so = 1;
376 xer_ov = 1;
377 T0 = 0;
378 }
379 }
380
381 #if defined(TARGET_PPC64)
382 void do_divduo (void)
383 {
384 if (likely((uint64_t)T1 != 0)) {
385 xer_ov = 0;
386 T0 = (uint64_t)T0 / (uint64_t)T1;
387 } else {
388 xer_so = 1;
389 xer_ov = 1;
390 T0 = 0;
391 }
392 }
393 #endif
394
395 void do_mullwo (void)
396 {
397 int64_t res = (int64_t)T0 * (int64_t)T1;
398
399 if (likely((int32_t)res == res)) {
400 xer_ov = 0;
401 } else {
402 xer_ov = 1;
403 xer_so = 1;
404 }
405 T0 = (int32_t)res;
406 }
407
408 #if defined(TARGET_PPC64)
409 void do_mulldo (void)
410 {
411 int64_t th;
412 uint64_t tl;
413
414 do_imul64(&tl, &th);
415 if (likely(th == 0)) {
416 xer_ov = 0;
417 } else {
418 xer_ov = 1;
419 xer_so = 1;
420 }
421 T0 = (int64_t)tl;
422 }
423 #endif
424
425 void do_nego (void)
426 {
427 if (likely((int32_t)T0 != INT32_MIN)) {
428 xer_ov = 0;
429 T0 = -(int32_t)T0;
430 } else {
431 xer_ov = 1;
432 xer_so = 1;
433 }
434 }
435
436 #if defined(TARGET_PPC64)
437 void do_nego_64 (void)
438 {
439 if (likely((int64_t)T0 != INT64_MIN)) {
440 xer_ov = 0;
441 T0 = -(int64_t)T0;
442 } else {
443 xer_ov = 1;
444 xer_so = 1;
445 }
446 }
447 #endif
448
449 void do_subfe (void)
450 {
451 T0 = T1 + ~T0 + xer_ca;
452 if (likely((uint32_t)T0 >= (uint32_t)T1 &&
453 (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
454 xer_ca = 0;
455 } else {
456 xer_ca = 1;
457 }
458 }
459
460 #if defined(TARGET_PPC64)
461 void do_subfe_64 (void)
462 {
463 T0 = T1 + ~T0 + xer_ca;
464 if (likely((uint64_t)T0 >= (uint64_t)T1 &&
465 (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
466 xer_ca = 0;
467 } else {
468 xer_ca = 1;
469 }
470 }
471 #endif
472
473 void do_subfmeo (void)
474 {
475 T1 = T0;
476 T0 = ~T0 + xer_ca - 1;
477 if (likely(!((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0) &
478 (1UL << 31)))) {
479 xer_ov = 0;
480 } else {
481 xer_so = 1;
482 xer_ov = 1;
483 }
484 if (likely((uint32_t)T1 != UINT32_MAX))
485 xer_ca = 1;
486 }
487
488 #if defined(TARGET_PPC64)
489 void do_subfmeo_64 (void)
490 {
491 T1 = T0;
492 T0 = ~T0 + xer_ca - 1;
493 if (likely(!((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0) &
494 (1ULL << 63)))) {
495 xer_ov = 0;
496 } else {
497 xer_so = 1;
498 xer_ov = 1;
499 }
500 if (likely((uint64_t)T1 != UINT64_MAX))
501 xer_ca = 1;
502 }
503 #endif
504
505 void do_subfzeo (void)
506 {
507 T1 = T0;
508 T0 = ~T0 + xer_ca;
509 if (likely(!(((uint32_t)~T1 ^ UINT32_MAX) &
510 ((uint32_t)(~T1) ^ (uint32_t)T0) & (1UL << 31)))) {
511 xer_ov = 0;
512 } else {
513 xer_ov = 1;
514 xer_so = 1;
515 }
516 if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
517 xer_ca = 0;
518 } else {
519 xer_ca = 1;
520 }
521 }
522
523 #if defined(TARGET_PPC64)
524 void do_subfzeo_64 (void)
525 {
526 T1 = T0;
527 T0 = ~T0 + xer_ca;
528 if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) &
529 ((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) {
530 xer_ov = 0;
531 } else {
532 xer_ov = 1;
533 xer_so = 1;
534 }
535 if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
536 xer_ca = 0;
537 } else {
538 xer_ca = 1;
539 }
540 }
541 #endif
542
543 /* shift right arithmetic helper */
544 void do_sraw (void)
545 {
546 int32_t ret;
547
548 if (likely(!(T1 & 0x20UL))) {
549 if (likely((uint32_t)T1 != 0)) {
550 ret = (int32_t)T0 >> (T1 & 0x1fUL);
551 if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
552 xer_ca = 0;
553 } else {
554 xer_ca = 1;
555 }
556 } else {
557 ret = T0;
558 xer_ca = 0;
559 }
560 } else {
561 ret = (-1) * ((uint32_t)T0 >> 31);
562 if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) {
563 xer_ca = 0;
564 } else {
565 xer_ca = 1;
566 }
567 }
568 T0 = ret;
569 }
570
571 #if defined(TARGET_PPC64)
572 void do_srad (void)
573 {
574 int64_t ret;
575
576 if (likely(!(T1 & 0x40UL))) {
577 if (likely((uint64_t)T1 != 0)) {
578 ret = (int64_t)T0 >> (T1 & 0x3FUL);
579 if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
580 xer_ca = 0;
581 } else {
582 xer_ca = 1;
583 }
584 } else {
585 ret = T0;
586 xer_ca = 0;
587 }
588 } else {
589 ret = (-1) * ((uint64_t)T0 >> 63);
590 if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
591 xer_ca = 0;
592 } else {
593 xer_ca = 1;
594 }
595 }
596 T0 = ret;
597 }
598 #endif
599
600 static inline int popcnt (uint32_t val)
601 {
602 int i;
603
604 for (i = 0; val != 0;)
605 val = val ^ (val - 1);
606
607 return i;
608 }
609
610 void do_popcntb (void)
611 {
612 uint32_t ret;
613 int i;
614
615 ret = 0;
616 for (i = 0; i < 32; i += 8)
617 ret |= popcnt((T0 >> i) & 0xFF) << i;
618 T0 = ret;
619 }
620
621 #if defined(TARGET_PPC64)
622 void do_popcntb_64 (void)
623 {
624 uint64_t ret;
625 int i;
626
627 ret = 0;
628 for (i = 0; i < 64; i += 8)
629 ret |= popcnt((T0 >> i) & 0xFF) << i;
630 T0 = ret;
631 }
632 #endif
633
634 /*****************************************************************************/
635 /* Floating point operations helpers */
636 void do_fctiw (void)
637 {
638 union {
639 double d;
640 uint64_t i;
641 } p;
642
643 p.i = float64_to_int32(FT0, &env->fp_status);
644 #if USE_PRECISE_EMULATION
645 /* XXX: higher bits are not supposed to be significant.
646 * to make tests easier, return the same as a real PowerPC 750 (aka G3)
647 */
648 p.i |= 0xFFF80000ULL << 32;
649 #endif
650 FT0 = p.d;
651 }
652
653 void do_fctiwz (void)
654 {
655 union {
656 double d;
657 uint64_t i;
658 } p;
659
660 p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status);
661 #if USE_PRECISE_EMULATION
662 /* XXX: higher bits are not supposed to be significant.
663 * to make tests easier, return the same as a real PowerPC 750 (aka G3)
664 */
665 p.i |= 0xFFF80000ULL << 32;
666 #endif
667 FT0 = p.d;
668 }
669
670 #if defined(TARGET_PPC64)
671 void do_fcfid (void)
672 {
673 union {
674 double d;
675 uint64_t i;
676 } p;
677
678 p.d = FT0;
679 FT0 = int64_to_float64(p.i, &env->fp_status);
680 }
681
682 void do_fctid (void)
683 {
684 union {
685 double d;
686 uint64_t i;
687 } p;
688
689 p.i = float64_to_int64(FT0, &env->fp_status);
690 FT0 = p.d;
691 }
692
693 void do_fctidz (void)
694 {
695 union {
696 double d;
697 uint64_t i;
698 } p;
699
700 p.i = float64_to_int64_round_to_zero(FT0, &env->fp_status);
701 FT0 = p.d;
702 }
703
704 #endif
705
706 #if USE_PRECISE_EMULATION
707 void do_fmadd (void)
708 {
709 #ifdef FLOAT128
710 float128 ft0_128, ft1_128;
711
712 ft0_128 = float64_to_float128(FT0, &env->fp_status);
713 ft1_128 = float64_to_float128(FT1, &env->fp_status);
714 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
715 ft1_128 = float64_to_float128(FT2, &env->fp_status);
716 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
717 FT0 = float128_to_float64(ft0_128, &env->fp_status);
718 #else
719 /* This is OK on x86 hosts */
720 FT0 = (FT0 * FT1) + FT2;
721 #endif
722 }
723
724 void do_fmsub (void)
725 {
726 #ifdef FLOAT128
727 float128 ft0_128, ft1_128;
728
729 ft0_128 = float64_to_float128(FT0, &env->fp_status);
730 ft1_128 = float64_to_float128(FT1, &env->fp_status);
731 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
732 ft1_128 = float64_to_float128(FT2, &env->fp_status);
733 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
734 FT0 = float128_to_float64(ft0_128, &env->fp_status);
735 #else
736 /* This is OK on x86 hosts */
737 FT0 = (FT0 * FT1) - FT2;
738 #endif
739 }
740 #endif /* USE_PRECISE_EMULATION */
741
742 void do_fnmadd (void)
743 {
744 #if USE_PRECISE_EMULATION
745 #ifdef FLOAT128
746 float128 ft0_128, ft1_128;
747
748 ft0_128 = float64_to_float128(FT0, &env->fp_status);
749 ft1_128 = float64_to_float128(FT1, &env->fp_status);
750 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
751 ft1_128 = float64_to_float128(FT2, &env->fp_status);
752 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
753 FT0 = float128_to_float64(ft0_128, &env->fp_status);
754 #else
755 /* This is OK on x86 hosts */
756 FT0 = (FT0 * FT1) + FT2;
757 #endif
758 #else
759 FT0 = float64_mul(FT0, FT1, &env->fp_status);
760 FT0 = float64_add(FT0, FT2, &env->fp_status);
761 #endif
762 if (likely(!isnan(FT0)))
763 FT0 = float64_chs(FT0);
764 }
765
766 void do_fnmsub (void)
767 {
768 #if USE_PRECISE_EMULATION
769 #ifdef FLOAT128
770 float128 ft0_128, ft1_128;
771
772 ft0_128 = float64_to_float128(FT0, &env->fp_status);
773 ft1_128 = float64_to_float128(FT1, &env->fp_status);
774 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
775 ft1_128 = float64_to_float128(FT2, &env->fp_status);
776 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
777 FT0 = float128_to_float64(ft0_128, &env->fp_status);
778 #else
779 /* This is OK on x86 hosts */
780 FT0 = (FT0 * FT1) - FT2;
781 #endif
782 #else
783 FT0 = float64_mul(FT0, FT1, &env->fp_status);
784 FT0 = float64_sub(FT0, FT2, &env->fp_status);
785 #endif
786 if (likely(!isnan(FT0)))
787 FT0 = float64_chs(FT0);
788 }
789
790 void do_fsqrt (void)
791 {
792 FT0 = float64_sqrt(FT0, &env->fp_status);
793 }
794
795 void do_fres (void)
796 {
797 union {
798 double d;
799 uint64_t i;
800 } p;
801
802 if (likely(isnormal(FT0))) {
803 #if USE_PRECISE_EMULATION
804 FT0 = float64_div(1.0, FT0, &env->fp_status);
805 FT0 = float64_to_float32(FT0, &env->fp_status);
806 #else
807 FT0 = float32_div(1.0, FT0, &env->fp_status);
808 #endif
809 } else {
810 p.d = FT0;
811 if (p.i == 0x8000000000000000ULL) {
812 p.i = 0xFFF0000000000000ULL;
813 } else if (p.i == 0x0000000000000000ULL) {
814 p.i = 0x7FF0000000000000ULL;
815 } else if (isnan(FT0)) {
816 p.i = 0x7FF8000000000000ULL;
817 } else if (FT0 < 0.0) {
818 p.i = 0x8000000000000000ULL;
819 } else {
820 p.i = 0x0000000000000000ULL;
821 }
822 FT0 = p.d;
823 }
824 }
825
826 void do_frsqrte (void)
827 {
828 union {
829 double d;
830 uint64_t i;
831 } p;
832
833 if (likely(isnormal(FT0) && FT0 > 0.0)) {
834 FT0 = float64_sqrt(FT0, &env->fp_status);
835 FT0 = float32_div(1.0, FT0, &env->fp_status);
836 } else {
837 p.d = FT0;
838 if (p.i == 0x8000000000000000ULL) {
839 p.i = 0xFFF0000000000000ULL;
840 } else if (p.i == 0x0000000000000000ULL) {
841 p.i = 0x7FF0000000000000ULL;
842 } else if (isnan(FT0)) {
843 if (!(p.i & 0x0008000000000000ULL))
844 p.i |= 0x000FFFFFFFFFFFFFULL;
845 } else if (FT0 < 0) {
846 p.i = 0x7FF8000000000000ULL;
847 } else {
848 p.i = 0x0000000000000000ULL;
849 }
850 FT0 = p.d;
851 }
852 }
853
854 void do_fsel (void)
855 {
856 if (FT0 >= 0)
857 FT0 = FT1;
858 else
859 FT0 = FT2;
860 }
861
862 void do_fcmpu (void)
863 {
864 if (likely(!isnan(FT0) && !isnan(FT1))) {
865 if (float64_lt(FT0, FT1, &env->fp_status)) {
866 T0 = 0x08UL;
867 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
868 T0 = 0x04UL;
869 } else {
870 T0 = 0x02UL;
871 }
872 } else {
873 T0 = 0x01UL;
874 env->fpscr[4] |= 0x1;
875 env->fpscr[6] |= 0x1;
876 }
877 env->fpscr[3] = T0;
878 }
879
880 void do_fcmpo (void)
881 {
882 env->fpscr[4] &= ~0x1;
883 if (likely(!isnan(FT0) && !isnan(FT1))) {
884 if (float64_lt(FT0, FT1, &env->fp_status)) {
885 T0 = 0x08UL;
886 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
887 T0 = 0x04UL;
888 } else {
889 T0 = 0x02UL;
890 }
891 } else {
892 T0 = 0x01UL;
893 env->fpscr[4] |= 0x1;
894 if (!float64_is_signaling_nan(FT0) || !float64_is_signaling_nan(FT1)) {
895 /* Quiet NaN case */
896 env->fpscr[6] |= 0x1;
897 if (!(env->fpscr[1] & 0x8))
898 env->fpscr[4] |= 0x8;
899 } else {
900 env->fpscr[4] |= 0x8;
901 }
902 }
903 env->fpscr[3] = T0;
904 }
905
906 #if !defined (CONFIG_USER_ONLY)
907 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
908 void do_rfi (void)
909 {
910 #if defined(TARGET_PPC64)
911 if (env->spr[SPR_SRR1] & (1ULL << MSR_SF)) {
912 env->nip = (uint64_t)(env->spr[SPR_SRR0] & ~0x00000003);
913 do_store_msr(env, (uint64_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
914 } else {
915 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
916 ppc_store_msr_32(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
917 }
918 #else
919 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
920 do_store_msr(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
921 #endif
922 #if defined (DEBUG_OP)
923 cpu_dump_rfi(env->nip, do_load_msr(env));
924 #endif
925 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
926 }
927
928 #if defined(TARGET_PPC64)
929 void do_rfid (void)
930 {
931 if (env->spr[SPR_SRR1] & (1ULL << MSR_SF)) {
932 env->nip = (uint64_t)(env->spr[SPR_SRR0] & ~0x00000003);
933 do_store_msr(env, (uint64_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
934 } else {
935 env->nip = (uint32_t)(env->spr[SPR_SRR0] & ~0x00000003);
936 do_store_msr(env, (uint32_t)(env->spr[SPR_SRR1] & ~0xFFFF0000UL));
937 }
938 #if defined (DEBUG_OP)
939 cpu_dump_rfi(env->nip, do_load_msr(env));
940 #endif
941 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
942 }
943 #endif
944 #endif
945
946 void do_tw (int flags)
947 {
948 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
949 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
950 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
951 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
952 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
953 do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
954 }
955 }
956
957 #if defined(TARGET_PPC64)
958 void do_td (int flags)
959 {
960 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
961 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
962 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
963 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
964 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
965 do_raise_exception_err(EXCP_PROGRAM, EXCP_TRAP);
966 }
967 #endif
968
969 /*****************************************************************************/
970 /* PowerPC 601 specific instructions (POWER bridge) */
971 void do_POWER_abso (void)
972 {
973 if ((uint32_t)T0 == INT32_MIN) {
974 T0 = INT32_MAX;
975 xer_ov = 1;
976 xer_so = 1;
977 } else {
978 T0 = -T0;
979 xer_ov = 0;
980 }
981 }
982
983 void do_POWER_clcs (void)
984 {
985 switch (T0) {
986 case 0x0CUL:
987 /* Instruction cache line size */
988 T0 = ICACHE_LINE_SIZE;
989 break;
990 case 0x0DUL:
991 /* Data cache line size */
992 T0 = DCACHE_LINE_SIZE;
993 break;
994 case 0x0EUL:
995 /* Minimum cache line size */
996 T0 = ICACHE_LINE_SIZE < DCACHE_LINE_SIZE ?
997 ICACHE_LINE_SIZE : DCACHE_LINE_SIZE;
998 break;
999 case 0x0FUL:
1000 /* Maximum cache line size */
1001 T0 = ICACHE_LINE_SIZE > DCACHE_LINE_SIZE ?
1002 ICACHE_LINE_SIZE : DCACHE_LINE_SIZE;
1003 break;
1004 default:
1005 /* Undefined */
1006 break;
1007 }
1008 }
1009
1010 void do_POWER_div (void)
1011 {
1012 uint64_t tmp;
1013
1014 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1015 T0 = (long)((-1) * (T0 >> 31));
1016 env->spr[SPR_MQ] = 0;
1017 } else {
1018 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1019 env->spr[SPR_MQ] = tmp % T1;
1020 T0 = tmp / (int32_t)T1;
1021 }
1022 }
1023
1024 void do_POWER_divo (void)
1025 {
1026 int64_t tmp;
1027
1028 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1029 T0 = (long)((-1) * (T0 >> 31));
1030 env->spr[SPR_MQ] = 0;
1031 xer_ov = 1;
1032 xer_so = 1;
1033 } else {
1034 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1035 env->spr[SPR_MQ] = tmp % T1;
1036 tmp /= (int32_t)T1;
1037 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1038 xer_ov = 1;
1039 xer_so = 1;
1040 } else {
1041 xer_ov = 0;
1042 }
1043 T0 = tmp;
1044 }
1045 }
1046
1047 void do_POWER_divs (void)
1048 {
1049 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1050 T0 = (long)((-1) * (T0 >> 31));
1051 env->spr[SPR_MQ] = 0;
1052 } else {
1053 env->spr[SPR_MQ] = T0 % T1;
1054 T0 = (int32_t)T0 / (int32_t)T1;
1055 }
1056 }
1057
1058 void do_POWER_divso (void)
1059 {
1060 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1061 T0 = (long)((-1) * (T0 >> 31));
1062 env->spr[SPR_MQ] = 0;
1063 xer_ov = 1;
1064 xer_so = 1;
1065 } else {
1066 T0 = (int32_t)T0 / (int32_t)T1;
1067 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1068 xer_ov = 0;
1069 }
1070 }
1071
1072 void do_POWER_dozo (void)
1073 {
1074 if ((int32_t)T1 > (int32_t)T0) {
1075 T2 = T0;
1076 T0 = T1 - T0;
1077 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1078 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1079 xer_so = 1;
1080 xer_ov = 1;
1081 } else {
1082 xer_ov = 0;
1083 }
1084 } else {
1085 T0 = 0;
1086 xer_ov = 0;
1087 }
1088 }
1089
1090 void do_POWER_maskg (void)
1091 {
1092 uint32_t ret;
1093
1094 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1095 ret = -1;
1096 } else {
1097 ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
1098 (((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
1099 if ((uint32_t)T0 > (uint32_t)T1)
1100 ret = ~ret;
1101 }
1102 T0 = ret;
1103 }
1104
1105 void do_POWER_mulo (void)
1106 {
1107 uint64_t tmp;
1108
1109 tmp = (uint64_t)T0 * (uint64_t)T1;
1110 env->spr[SPR_MQ] = tmp >> 32;
1111 T0 = tmp;
1112 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1113 xer_ov = 1;
1114 xer_so = 1;
1115 } else {
1116 xer_ov = 0;
1117 }
1118 }
1119
1120 #if !defined (CONFIG_USER_ONLY)
1121 void do_POWER_rac (void)
1122 {
1123 #if 0
1124 mmu_ctx_t ctx;
1125
1126 /* We don't have to generate many instances of this instruction,
1127 * as rac is supervisor only.
1128 */
1129 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT, 1) == 0)
1130 T0 = ctx.raddr;
1131 #endif
1132 }
1133
1134 void do_POWER_rfsvc (void)
1135 {
1136 env->nip = env->lr & ~0x00000003UL;
1137 T0 = env->ctr & 0x0000FFFFUL;
1138 do_store_msr(env, T0);
1139 #if defined (DEBUG_OP)
1140 cpu_dump_rfi(env->nip, do_load_msr(env));
1141 #endif
1142 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1143 }
1144
1145 /* PowerPC 601 BAT management helper */
1146 void do_store_601_batu (int nr)
1147 {
1148 do_store_ibatu(env, nr, (uint32_t)T0);
1149 env->DBAT[0][nr] = env->IBAT[0][nr];
1150 env->DBAT[1][nr] = env->IBAT[1][nr];
1151 }
1152 #endif
1153
1154 /*****************************************************************************/
1155 /* 602 specific instructions */
1156 /* mfrom is the most crazy instruction ever seen, imho ! */
1157 /* Real implementation uses a ROM table. Do the same */
1158 #define USE_MFROM_ROM_TABLE
1159 void do_op_602_mfrom (void)
1160 {
1161 if (likely(T0 < 602)) {
1162 #if defined(USE_MFROM_ROM_TABLE)
1163 #include "mfrom_table.c"
1164 T0 = mfrom_ROM_table[T0];
1165 #else
1166 double d;
1167 /* Extremly decomposed:
1168 * -T0 / 256
1169 * T0 = 256 * log10(10 + 1.0) + 0.5
1170 */
1171 d = T0;
1172 d = float64_div(d, 256, &env->fp_status);
1173 d = float64_chs(d);
1174 d = exp10(d); // XXX: use float emulation function
1175 d = float64_add(d, 1.0, &env->fp_status);
1176 d = log10(d); // XXX: use float emulation function
1177 d = float64_mul(d, 256, &env->fp_status);
1178 d = float64_add(d, 0.5, &env->fp_status);
1179 T0 = float64_round_to_int(d, &env->fp_status);
1180 #endif
1181 } else {
1182 T0 = 0;
1183 }
1184 }
1185
1186 /*****************************************************************************/
1187 /* Embedded PowerPC specific helpers */
1188 void do_405_check_ov (void)
1189 {
1190 if (likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1191 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1192 xer_ov = 0;
1193 } else {
1194 xer_ov = 1;
1195 xer_so = 1;
1196 }
1197 }
1198
1199 void do_405_check_sat (void)
1200 {
1201 if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1202 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1203 /* Saturate result */
1204 if (T2 >> 31) {
1205 T0 = INT32_MIN;
1206 } else {
1207 T0 = INT32_MAX;
1208 }
1209 }
1210 }
1211
1212 #if !defined(CONFIG_USER_ONLY)
1213 void do_40x_rfci (void)
1214 {
1215 env->nip = env->spr[SPR_40x_SRR2];
1216 do_store_msr(env, env->spr[SPR_40x_SRR3] & ~0xFFFF0000);
1217 #if defined (DEBUG_OP)
1218 cpu_dump_rfi(env->nip, do_load_msr(env));
1219 #endif
1220 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1221 }
1222
1223 void do_rfci (void)
1224 {
1225 #if defined(TARGET_PPC64)
1226 if (env->spr[SPR_BOOKE_CSRR1] & (1 << MSR_CM)) {
1227 env->nip = (uint64_t)env->spr[SPR_BOOKE_CSRR0];
1228 } else
1229 #endif
1230 {
1231 env->nip = (uint32_t)env->spr[SPR_BOOKE_CSRR0];
1232 }
1233 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_CSRR1] & ~0x3FFF0000);
1234 #if defined (DEBUG_OP)
1235 cpu_dump_rfi(env->nip, do_load_msr(env));
1236 #endif
1237 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1238 }
1239
1240 void do_rfdi (void)
1241 {
1242 #if defined(TARGET_PPC64)
1243 if (env->spr[SPR_BOOKE_DSRR1] & (1 << MSR_CM)) {
1244 env->nip = (uint64_t)env->spr[SPR_BOOKE_DSRR0];
1245 } else
1246 #endif
1247 {
1248 env->nip = (uint32_t)env->spr[SPR_BOOKE_DSRR0];
1249 }
1250 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_DSRR1] & ~0x3FFF0000);
1251 #if defined (DEBUG_OP)
1252 cpu_dump_rfi(env->nip, do_load_msr(env));
1253 #endif
1254 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1255 }
1256
1257 void do_rfmci (void)
1258 {
1259 #if defined(TARGET_PPC64)
1260 if (env->spr[SPR_BOOKE_MCSRR1] & (1 << MSR_CM)) {
1261 env->nip = (uint64_t)env->spr[SPR_BOOKE_MCSRR0];
1262 } else
1263 #endif
1264 {
1265 env->nip = (uint32_t)env->spr[SPR_BOOKE_MCSRR0];
1266 }
1267 do_store_msr(env, (uint32_t)env->spr[SPR_BOOKE_MCSRR1] & ~0x3FFF0000);
1268 #if defined (DEBUG_OP)
1269 cpu_dump_rfi(env->nip, do_load_msr(env));
1270 #endif
1271 env->interrupt_request = CPU_INTERRUPT_EXITTB;
1272 }
1273
1274 void do_load_dcr (void)
1275 {
1276 target_ulong val;
1277
1278 if (unlikely(env->dcr_env == NULL)) {
1279 if (loglevel != 0) {
1280 fprintf(logfile, "No DCR environment\n");
1281 }
1282 do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_INVAL);
1283 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1284 if (loglevel != 0) {
1285 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1286 }
1287 do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_PRIV_REG);
1288 } else {
1289 T0 = val;
1290 }
1291 }
1292
1293 void do_store_dcr (void)
1294 {
1295 if (unlikely(env->dcr_env == NULL)) {
1296 if (loglevel != 0) {
1297 fprintf(logfile, "No DCR environment\n");
1298 }
1299 do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_INVAL);
1300 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1301 if (loglevel != 0) {
1302 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1303 }
1304 do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_PRIV_REG);
1305 }
1306 }
1307
1308 void do_load_403_pb (int num)
1309 {
1310 T0 = env->pb[num];
1311 }
1312
1313 void do_store_403_pb (int num)
1314 {
1315 if (likely(env->pb[num] != T0)) {
1316 env->pb[num] = T0;
1317 /* Should be optimized */
1318 tlb_flush(env, 1);
1319 }
1320 }
1321 #endif
1322
1323 /* 440 specific */
1324 void do_440_dlmzb (void)
1325 {
1326 target_ulong mask;
1327 int i;
1328
1329 i = 1;
1330 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1331 if ((T0 & mask) == 0)
1332 goto done;
1333 i++;
1334 }
1335 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1336 if ((T1 & mask) == 0)
1337 break;
1338 i++;
1339 }
1340 done:
1341 T0 = i;
1342 }
1343
1344 #if defined(TARGET_PPCEMB)
1345 /* SPE extension helpers */
1346 /* Use a table to make this quicker */
1347 static uint8_t hbrev[16] = {
1348 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1349 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1350 };
1351
1352 static inline uint8_t byte_reverse (uint8_t val)
1353 {
1354 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1355 }
1356
1357 static inline uint32_t word_reverse (uint32_t val)
1358 {
1359 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1360 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1361 }
1362
1363 #define MASKBITS 16 // Random value - to be fixed
1364 void do_brinc (void)
1365 {
1366 uint32_t a, b, d, mask;
1367
1368 mask = (uint32_t)(-1UL) >> MASKBITS;
1369 b = T1_64 & mask;
1370 a = T0_64 & mask;
1371 d = word_reverse(1 + word_reverse(a | ~mask));
1372 T0_64 = (T0_64 & ~mask) | (d & mask);
1373 }
1374
1375 #define DO_SPE_OP2(name) \
1376 void do_ev##name (void) \
1377 { \
1378 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1379 (uint64_t)_do_e##name(T0_64, T1_64); \
1380 }
1381
1382 #define DO_SPE_OP1(name) \
1383 void do_ev##name (void) \
1384 { \
1385 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1386 (uint64_t)_do_e##name(T0_64); \
1387 }
1388
1389 /* Fixed-point vector arithmetic */
1390 static inline uint32_t _do_eabs (uint32_t val)
1391 {
1392 if (val != 0x80000000)
1393 val &= ~0x80000000;
1394
1395 return val;
1396 }
1397
1398 static inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1399 {
1400 return op1 + op2;
1401 }
1402
1403 static inline int _do_ecntlsw (uint32_t val)
1404 {
1405 if (val & 0x80000000)
1406 return _do_cntlzw(~val);
1407 else
1408 return _do_cntlzw(val);
1409 }
1410
1411 static inline int _do_ecntlzw (uint32_t val)
1412 {
1413 return _do_cntlzw(val);
1414 }
1415
1416 static inline uint32_t _do_eneg (uint32_t val)
1417 {
1418 if (val != 0x80000000)
1419 val ^= 0x80000000;
1420
1421 return val;
1422 }
1423
1424 static inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1425 {
1426 return rotl32(op1, op2);
1427 }
1428
1429 static inline uint32_t _do_erndw (uint32_t val)
1430 {
1431 return (val + 0x000080000000) & 0xFFFF0000;
1432 }
1433
1434 static inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1435 {
1436 /* No error here: 6 bits are used */
1437 return op1 << (op2 & 0x3F);
1438 }
1439
1440 static inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1441 {
1442 /* No error here: 6 bits are used */
1443 return op1 >> (op2 & 0x3F);
1444 }
1445
1446 static inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1447 {
1448 /* No error here: 6 bits are used */
1449 return op1 >> (op2 & 0x3F);
1450 }
1451
1452 static inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1453 {
1454 return op2 - op1;
1455 }
1456
1457 /* evabs */
1458 DO_SPE_OP1(abs);
1459 /* evaddw */
1460 DO_SPE_OP2(addw);
1461 /* evcntlsw */
1462 DO_SPE_OP1(cntlsw);
1463 /* evcntlzw */
1464 DO_SPE_OP1(cntlzw);
1465 /* evneg */
1466 DO_SPE_OP1(neg);
1467 /* evrlw */
1468 DO_SPE_OP2(rlw);
1469 /* evrnd */
1470 DO_SPE_OP1(rndw);
1471 /* evslw */
1472 DO_SPE_OP2(slw);
1473 /* evsrws */
1474 DO_SPE_OP2(srws);
1475 /* evsrwu */
1476 DO_SPE_OP2(srwu);
1477 /* evsubfw */
1478 DO_SPE_OP2(subfw);
1479
1480 /* evsel is a little bit more complicated... */
1481 static inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
1482 {
1483 if (n)
1484 return op1;
1485 else
1486 return op2;
1487 }
1488
1489 void do_evsel (void)
1490 {
1491 T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
1492 (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
1493 }
1494
1495 /* Fixed-point vector comparisons */
1496 #define DO_SPE_CMP(name) \
1497 void do_ev##name (void) \
1498 { \
1499 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
1500 T1_64 >> 32) << 32, \
1501 _do_e##name(T0_64, T1_64)); \
1502 }
1503
1504 static inline uint32_t _do_evcmp_merge (int t0, int t1)
1505 {
1506 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1507 }
1508 static inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
1509 {
1510 return op1 == op2 ? 1 : 0;
1511 }
1512
1513 static inline int _do_ecmpgts (int32_t op1, int32_t op2)
1514 {
1515 return op1 > op2 ? 1 : 0;
1516 }
1517
1518 static inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
1519 {
1520 return op1 > op2 ? 1 : 0;
1521 }
1522
1523 static inline int _do_ecmplts (int32_t op1, int32_t op2)
1524 {
1525 return op1 < op2 ? 1 : 0;
1526 }
1527
1528 static inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
1529 {
1530 return op1 < op2 ? 1 : 0;
1531 }
1532
1533 /* evcmpeq */
1534 DO_SPE_CMP(cmpeq);
1535 /* evcmpgts */
1536 DO_SPE_CMP(cmpgts);
1537 /* evcmpgtu */
1538 DO_SPE_CMP(cmpgtu);
1539 /* evcmplts */
1540 DO_SPE_CMP(cmplts);
1541 /* evcmpltu */
1542 DO_SPE_CMP(cmpltu);
1543
1544 /* Single precision floating-point conversions from/to integer */
1545 static inline uint32_t _do_efscfsi (int32_t val)
1546 {
1547 union {
1548 uint32_t u;
1549 float32 f;
1550 } u;
1551
1552 u.f = int32_to_float32(val, &env->spe_status);
1553
1554 return u.u;
1555 }
1556
1557 static inline uint32_t _do_efscfui (uint32_t val)
1558 {
1559 union {
1560 uint32_t u;
1561 float32 f;
1562 } u;
1563
1564 u.f = uint32_to_float32(val, &env->spe_status);
1565
1566 return u.u;
1567 }
1568
1569 static inline int32_t _do_efsctsi (uint32_t val)
1570 {
1571 union {
1572 int32_t u;
1573 float32 f;
1574 } u;
1575
1576 u.u = val;
1577 /* NaN are not treated the same way IEEE 754 does */
1578 if (unlikely(isnan(u.f)))
1579 return 0;
1580
1581 return float32_to_int32(u.f, &env->spe_status);
1582 }
1583
1584 static inline uint32_t _do_efsctui (uint32_t val)
1585 {
1586 union {
1587 int32_t u;
1588 float32 f;
1589 } u;
1590
1591 u.u = val;
1592 /* NaN are not treated the same way IEEE 754 does */
1593 if (unlikely(isnan(u.f)))
1594 return 0;
1595
1596 return float32_to_uint32(u.f, &env->spe_status);
1597 }
1598
1599 static inline int32_t _do_efsctsiz (uint32_t val)
1600 {
1601 union {
1602 int32_t u;
1603 float32 f;
1604 } u;
1605
1606 u.u = val;
1607 /* NaN are not treated the same way IEEE 754 does */
1608 if (unlikely(isnan(u.f)))
1609 return 0;
1610
1611 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1612 }
1613
1614 static inline uint32_t _do_efsctuiz (uint32_t val)
1615 {
1616 union {
1617 int32_t u;
1618 float32 f;
1619 } u;
1620
1621 u.u = val;
1622 /* NaN are not treated the same way IEEE 754 does */
1623 if (unlikely(isnan(u.f)))
1624 return 0;
1625
1626 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1627 }
1628
1629 void do_efscfsi (void)
1630 {
1631 T0_64 = _do_efscfsi(T0_64);
1632 }
1633
1634 void do_efscfui (void)
1635 {
1636 T0_64 = _do_efscfui(T0_64);
1637 }
1638
1639 void do_efsctsi (void)
1640 {
1641 T0_64 = _do_efsctsi(T0_64);
1642 }
1643
1644 void do_efsctui (void)
1645 {
1646 T0_64 = _do_efsctui(T0_64);
1647 }
1648
1649 void do_efsctsiz (void)
1650 {
1651 T0_64 = _do_efsctsiz(T0_64);
1652 }
1653
1654 void do_efsctuiz (void)
1655 {
1656 T0_64 = _do_efsctuiz(T0_64);
1657 }
1658
1659 /* Single precision floating-point conversion to/from fractional */
1660 static inline uint32_t _do_efscfsf (uint32_t val)
1661 {
1662 union {
1663 uint32_t u;
1664 float32 f;
1665 } u;
1666 float32 tmp;
1667
1668 u.f = int32_to_float32(val, &env->spe_status);
1669 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1670 u.f = float32_div(u.f, tmp, &env->spe_status);
1671
1672 return u.u;
1673 }
1674
1675 static inline uint32_t _do_efscfuf (uint32_t val)
1676 {
1677 union {
1678 uint32_t u;
1679 float32 f;
1680 } u;
1681 float32 tmp;
1682
1683 u.f = uint32_to_float32(val, &env->spe_status);
1684 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1685 u.f = float32_div(u.f, tmp, &env->spe_status);
1686
1687 return u.u;
1688 }
1689
1690 static inline int32_t _do_efsctsf (uint32_t val)
1691 {
1692 union {
1693 int32_t u;
1694 float32 f;
1695 } u;
1696 float32 tmp;
1697
1698 u.u = val;
1699 /* NaN are not treated the same way IEEE 754 does */
1700 if (unlikely(isnan(u.f)))
1701 return 0;
1702 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1703 u.f = float32_mul(u.f, tmp, &env->spe_status);
1704
1705 return float32_to_int32(u.f, &env->spe_status);
1706 }
1707
1708 static inline uint32_t _do_efsctuf (uint32_t val)
1709 {
1710 union {
1711 int32_t u;
1712 float32 f;
1713 } u;
1714 float32 tmp;
1715
1716 u.u = val;
1717 /* NaN are not treated the same way IEEE 754 does */
1718 if (unlikely(isnan(u.f)))
1719 return 0;
1720 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1721 u.f = float32_mul(u.f, tmp, &env->spe_status);
1722
1723 return float32_to_uint32(u.f, &env->spe_status);
1724 }
1725
1726 static inline int32_t _do_efsctsfz (uint32_t val)
1727 {
1728 union {
1729 int32_t u;
1730 float32 f;
1731 } u;
1732 float32 tmp;
1733
1734 u.u = val;
1735 /* NaN are not treated the same way IEEE 754 does */
1736 if (unlikely(isnan(u.f)))
1737 return 0;
1738 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1739 u.f = float32_mul(u.f, tmp, &env->spe_status);
1740
1741 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1742 }
1743
1744 static inline uint32_t _do_efsctufz (uint32_t val)
1745 {
1746 union {
1747 int32_t u;
1748 float32 f;
1749 } u;
1750 float32 tmp;
1751
1752 u.u = val;
1753 /* NaN are not treated the same way IEEE 754 does */
1754 if (unlikely(isnan(u.f)))
1755 return 0;
1756 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
1757 u.f = float32_mul(u.f, tmp, &env->spe_status);
1758
1759 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1760 }
1761
1762 void do_efscfsf (void)
1763 {
1764 T0_64 = _do_efscfsf(T0_64);
1765 }
1766
1767 void do_efscfuf (void)
1768 {
1769 T0_64 = _do_efscfuf(T0_64);
1770 }
1771
1772 void do_efsctsf (void)
1773 {
1774 T0_64 = _do_efsctsf(T0_64);
1775 }
1776
1777 void do_efsctuf (void)
1778 {
1779 T0_64 = _do_efsctuf(T0_64);
1780 }
1781
1782 void do_efsctsfz (void)
1783 {
1784 T0_64 = _do_efsctsfz(T0_64);
1785 }
1786
1787 void do_efsctufz (void)
1788 {
1789 T0_64 = _do_efsctufz(T0_64);
1790 }
1791
1792 /* Double precision floating point helpers */
1793 static inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
1794 {
1795 /* XXX: TODO: test special values (NaN, infinites, ...) */
1796 return _do_efdtstlt(op1, op2);
1797 }
1798
1799 static inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
1800 {
1801 /* XXX: TODO: test special values (NaN, infinites, ...) */
1802 return _do_efdtstgt(op1, op2);
1803 }
1804
1805 static inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
1806 {
1807 /* XXX: TODO: test special values (NaN, infinites, ...) */
1808 return _do_efdtsteq(op1, op2);
1809 }
1810
1811 void do_efdcmplt (void)
1812 {
1813 T0 = _do_efdcmplt(T0_64, T1_64);
1814 }
1815
1816 void do_efdcmpgt (void)
1817 {
1818 T0 = _do_efdcmpgt(T0_64, T1_64);
1819 }
1820
1821 void do_efdcmpeq (void)
1822 {
1823 T0 = _do_efdcmpeq(T0_64, T1_64);
1824 }
1825
1826 /* Double precision floating-point conversion to/from integer */
1827 static inline uint64_t _do_efdcfsi (int64_t val)
1828 {
1829 union {
1830 uint64_t u;
1831 float64 f;
1832 } u;
1833
1834 u.f = int64_to_float64(val, &env->spe_status);
1835
1836 return u.u;
1837 }
1838
1839 static inline uint64_t _do_efdcfui (uint64_t val)
1840 {
1841 union {
1842 uint64_t u;
1843 float64 f;
1844 } u;
1845
1846 u.f = uint64_to_float64(val, &env->spe_status);
1847
1848 return u.u;
1849 }
1850
1851 static inline int64_t _do_efdctsi (uint64_t val)
1852 {
1853 union {
1854 int64_t u;
1855 float64 f;
1856 } u;
1857
1858 u.u = val;
1859 /* NaN are not treated the same way IEEE 754 does */
1860 if (unlikely(isnan(u.f)))
1861 return 0;
1862
1863 return float64_to_int64(u.f, &env->spe_status);
1864 }
1865
1866 static inline uint64_t _do_efdctui (uint64_t val)
1867 {
1868 union {
1869 int64_t u;
1870 float64 f;
1871 } u;
1872
1873 u.u = val;
1874 /* NaN are not treated the same way IEEE 754 does */
1875 if (unlikely(isnan(u.f)))
1876 return 0;
1877
1878 return float64_to_uint64(u.f, &env->spe_status);
1879 }
1880
1881 static inline int64_t _do_efdctsiz (uint64_t val)
1882 {
1883 union {
1884 int64_t u;
1885 float64 f;
1886 } u;
1887
1888 u.u = val;
1889 /* NaN are not treated the same way IEEE 754 does */
1890 if (unlikely(isnan(u.f)))
1891 return 0;
1892
1893 return float64_to_int64_round_to_zero(u.f, &env->spe_status);
1894 }
1895
1896 static inline uint64_t _do_efdctuiz (uint64_t val)
1897 {
1898 union {
1899 int64_t u;
1900 float64 f;
1901 } u;
1902
1903 u.u = val;
1904 /* NaN are not treated the same way IEEE 754 does */
1905 if (unlikely(isnan(u.f)))
1906 return 0;
1907
1908 return float64_to_uint64_round_to_zero(u.f, &env->spe_status);
1909 }
1910
1911 void do_efdcfsi (void)
1912 {
1913 T0_64 = _do_efdcfsi(T0_64);
1914 }
1915
1916 void do_efdcfui (void)
1917 {
1918 T0_64 = _do_efdcfui(T0_64);
1919 }
1920
1921 void do_efdctsi (void)
1922 {
1923 T0_64 = _do_efdctsi(T0_64);
1924 }
1925
1926 void do_efdctui (void)
1927 {
1928 T0_64 = _do_efdctui(T0_64);
1929 }
1930
1931 void do_efdctsiz (void)
1932 {
1933 T0_64 = _do_efdctsiz(T0_64);
1934 }
1935
1936 void do_efdctuiz (void)
1937 {
1938 T0_64 = _do_efdctuiz(T0_64);
1939 }
1940
1941 /* Double precision floating-point conversion to/from fractional */
1942 static inline uint64_t _do_efdcfsf (int64_t val)
1943 {
1944 union {
1945 uint64_t u;
1946 float64 f;
1947 } u;
1948 float64 tmp;
1949
1950 u.f = int32_to_float64(val, &env->spe_status);
1951 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1952 u.f = float64_div(u.f, tmp, &env->spe_status);
1953
1954 return u.u;
1955 }
1956
1957 static inline uint64_t _do_efdcfuf (uint64_t val)
1958 {
1959 union {
1960 uint64_t u;
1961 float64 f;
1962 } u;
1963 float64 tmp;
1964
1965 u.f = uint32_to_float64(val, &env->spe_status);
1966 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
1967 u.f = float64_div(u.f, tmp, &env->spe_status);
1968
1969 return u.u;
1970 }
1971
1972 static inline int64_t _do_efdctsf (uint64_t val)
1973 {
1974 union {
1975 int64_t u;
1976 float64 f;
1977 } u;
1978 float64 tmp;
1979
1980 u.u = val;
1981 /* NaN are not treated the same way IEEE 754 does */
1982 if (unlikely(isnan(u.f)))
1983 return 0;
1984 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
1985 u.f = float64_mul(u.f, tmp, &env->spe_status);
1986
1987 return float64_to_int32(u.f, &env->spe_status);
1988 }
1989
1990 static inline uint64_t _do_efdctuf (uint64_t val)
1991 {
1992 union {
1993 int64_t u;
1994 float64 f;
1995 } u;
1996 float64 tmp;
1997
1998 u.u = val;
1999 /* NaN are not treated the same way IEEE 754 does */
2000 if (unlikely(isnan(u.f)))
2001 return 0;
2002 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2003 u.f = float64_mul(u.f, tmp, &env->spe_status);
2004
2005 return float64_to_uint32(u.f, &env->spe_status);
2006 }
2007
2008 static inline int64_t _do_efdctsfz (uint64_t val)
2009 {
2010 union {
2011 int64_t u;
2012 float64 f;
2013 } u;
2014 float64 tmp;
2015
2016 u.u = val;
2017 /* NaN are not treated the same way IEEE 754 does */
2018 if (unlikely(isnan(u.f)))
2019 return 0;
2020 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2021 u.f = float64_mul(u.f, tmp, &env->spe_status);
2022
2023 return float64_to_int32_round_to_zero(u.f, &env->spe_status);
2024 }
2025
2026 static inline uint64_t _do_efdctufz (uint64_t val)
2027 {
2028 union {
2029 int64_t u;
2030 float64 f;
2031 } u;
2032 float64 tmp;
2033
2034 u.u = val;
2035 /* NaN are not treated the same way IEEE 754 does */
2036 if (unlikely(isnan(u.f)))
2037 return 0;
2038 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2039 u.f = float64_mul(u.f, tmp, &env->spe_status);
2040
2041 return float64_to_uint32_round_to_zero(u.f, &env->spe_status);
2042 }
2043
2044 void do_efdcfsf (void)
2045 {
2046 T0_64 = _do_efdcfsf(T0_64);
2047 }
2048
2049 void do_efdcfuf (void)
2050 {
2051 T0_64 = _do_efdcfuf(T0_64);
2052 }
2053
2054 void do_efdctsf (void)
2055 {
2056 T0_64 = _do_efdctsf(T0_64);
2057 }
2058
2059 void do_efdctuf (void)
2060 {
2061 T0_64 = _do_efdctuf(T0_64);
2062 }
2063
2064 void do_efdctsfz (void)
2065 {
2066 T0_64 = _do_efdctsfz(T0_64);
2067 }
2068
2069 void do_efdctufz (void)
2070 {
2071 T0_64 = _do_efdctufz(T0_64);
2072 }
2073
2074 /* Floating point conversion between single and double precision */
2075 static inline uint32_t _do_efscfd (uint64_t val)
2076 {
2077 union {
2078 uint64_t u;
2079 float64 f;
2080 } u1;
2081 union {
2082 uint32_t u;
2083 float32 f;
2084 } u2;
2085
2086 u1.u = val;
2087 u2.f = float64_to_float32(u1.f, &env->spe_status);
2088
2089 return u2.u;
2090 }
2091
2092 static inline uint64_t _do_efdcfs (uint32_t val)
2093 {
2094 union {
2095 uint64_t u;
2096 float64 f;
2097 } u2;
2098 union {
2099 uint32_t u;
2100 float32 f;
2101 } u1;
2102
2103 u1.u = val;
2104 u2.f = float32_to_float64(u1.f, &env->spe_status);
2105
2106 return u2.u;
2107 }
2108
2109 void do_efscfd (void)
2110 {
2111 T0_64 = _do_efscfd(T0_64);
2112 }
2113
2114 void do_efdcfs (void)
2115 {
2116 T0_64 = _do_efdcfs(T0_64);
2117 }
2118
2119 /* Single precision fixed-point vector arithmetic */
2120 /* evfsabs */
2121 DO_SPE_OP1(fsabs);
2122 /* evfsnabs */
2123 DO_SPE_OP1(fsnabs);
2124 /* evfsneg */
2125 DO_SPE_OP1(fsneg);
2126 /* evfsadd */
2127 DO_SPE_OP2(fsadd);
2128 /* evfssub */
2129 DO_SPE_OP2(fssub);
2130 /* evfsmul */
2131 DO_SPE_OP2(fsmul);
2132 /* evfsdiv */
2133 DO_SPE_OP2(fsdiv);
2134
2135 /* Single-precision floating-point comparisons */
2136 static inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2137 {
2138 /* XXX: TODO: test special values (NaN, infinites, ...) */
2139 return _do_efststlt(op1, op2);
2140 }
2141
2142 static inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2143 {
2144 /* XXX: TODO: test special values (NaN, infinites, ...) */
2145 return _do_efststgt(op1, op2);
2146 }
2147
2148 static inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2149 {
2150 /* XXX: TODO: test special values (NaN, infinites, ...) */
2151 return _do_efststeq(op1, op2);
2152 }
2153
2154 void do_efscmplt (void)
2155 {
2156 T0 = _do_efscmplt(T0_64, T1_64);
2157 }
2158
2159 void do_efscmpgt (void)
2160 {
2161 T0 = _do_efscmpgt(T0_64, T1_64);
2162 }
2163
2164 void do_efscmpeq (void)
2165 {
2166 T0 = _do_efscmpeq(T0_64, T1_64);
2167 }
2168
2169 /* Single-precision floating-point vector comparisons */
2170 /* evfscmplt */
2171 DO_SPE_CMP(fscmplt);
2172 /* evfscmpgt */
2173 DO_SPE_CMP(fscmpgt);
2174 /* evfscmpeq */
2175 DO_SPE_CMP(fscmpeq);
2176 /* evfststlt */
2177 DO_SPE_CMP(fststlt);
2178 /* evfststgt */
2179 DO_SPE_CMP(fststgt);
2180 /* evfststeq */
2181 DO_SPE_CMP(fststeq);
2182
2183 /* Single-precision floating-point vector conversions */
2184 /* evfscfsi */
2185 DO_SPE_OP1(fscfsi);
2186 /* evfscfui */
2187 DO_SPE_OP1(fscfui);
2188 /* evfscfuf */
2189 DO_SPE_OP1(fscfuf);
2190 /* evfscfsf */
2191 DO_SPE_OP1(fscfsf);
2192 /* evfsctsi */
2193 DO_SPE_OP1(fsctsi);
2194 /* evfsctui */
2195 DO_SPE_OP1(fsctui);
2196 /* evfsctsiz */
2197 DO_SPE_OP1(fsctsiz);
2198 /* evfsctuiz */
2199 DO_SPE_OP1(fsctuiz);
2200 /* evfsctsf */
2201 DO_SPE_OP1(fsctsf);
2202 /* evfsctuf */
2203 DO_SPE_OP1(fsctuf);
2204 #endif /* defined(TARGET_PPCEMB) */
2205
2206 /*****************************************************************************/
2207 /* Softmmu support */
2208 #if !defined (CONFIG_USER_ONLY)
2209
2210 #define MMUSUFFIX _mmu
2211 #define GETPC() (__builtin_return_address(0))
2212
2213 #define SHIFT 0
2214 #include "softmmu_template.h"
2215
2216 #define SHIFT 1
2217 #include "softmmu_template.h"
2218
2219 #define SHIFT 2
2220 #include "softmmu_template.h"
2221
2222 #define SHIFT 3
2223 #include "softmmu_template.h"
2224
2225 /* try to fill the TLB and return an exception if error. If retaddr is
2226 NULL, it means that the function was called in C code (i.e. not
2227 from generated code or from helper.c) */
2228 /* XXX: fix it to restore all registers */
2229 void tlb_fill (target_ulong addr, int is_write, int is_user, void *retaddr)
2230 {
2231 TranslationBlock *tb;
2232 CPUState *saved_env;
2233 target_phys_addr_t pc;
2234 int ret;
2235
2236 /* XXX: hack to restore env in all cases, even if not called from
2237 generated code */
2238 saved_env = env;
2239 env = cpu_single_env;
2240 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, is_user, 1);
2241 if (unlikely(ret != 0)) {
2242 if (likely(retaddr)) {
2243 /* now we have a real cpu fault */
2244 pc = (target_phys_addr_t)retaddr;
2245 tb = tb_find_pc(pc);
2246 if (likely(tb)) {
2247 /* the PC is inside the translated code. It means that we have
2248 a virtual CPU fault */
2249 cpu_restore_state(tb, env, pc, NULL);
2250 }
2251 }
2252 do_raise_exception_err(env->exception_index, env->error_code);
2253 }
2254 env = saved_env;
2255 }
2256
2257 /* TLB invalidation helpers */
2258 void do_tlbia (void)
2259 {
2260 ppc_tlb_invalidate_all(env);
2261 }
2262
2263 void do_tlbie (void)
2264 {
2265 T0 = (uint32_t)T0;
2266 #if !defined(FLUSH_ALL_TLBS)
2267 if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
2268 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
2269 if (env->id_tlbs == 1)
2270 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
2271 } else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) {
2272 /* XXX: TODO */
2273 #if 0
2274 ppcbooke_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
2275 env->spr[SPR_BOOKE_PID]);
2276 #endif
2277 } else {
2278 /* tlbie invalidate TLBs for all segments */
2279 T0 &= TARGET_PAGE_MASK;
2280 T0 &= ~((target_ulong)-1 << 28);
2281 /* XXX: this case should be optimized,
2282 * giving a mask to tlb_flush_page
2283 */
2284 tlb_flush_page(env, T0 | (0x0 << 28));
2285 tlb_flush_page(env, T0 | (0x1 << 28));
2286 tlb_flush_page(env, T0 | (0x2 << 28));
2287 tlb_flush_page(env, T0 | (0x3 << 28));
2288 tlb_flush_page(env, T0 | (0x4 << 28));
2289 tlb_flush_page(env, T0 | (0x5 << 28));
2290 tlb_flush_page(env, T0 | (0x6 << 28));
2291 tlb_flush_page(env, T0 | (0x7 << 28));
2292 tlb_flush_page(env, T0 | (0x8 << 28));
2293 tlb_flush_page(env, T0 | (0x9 << 28));
2294 tlb_flush_page(env, T0 | (0xA << 28));
2295 tlb_flush_page(env, T0 | (0xB << 28));
2296 tlb_flush_page(env, T0 | (0xC << 28));
2297 tlb_flush_page(env, T0 | (0xD << 28));
2298 tlb_flush_page(env, T0 | (0xE << 28));
2299 tlb_flush_page(env, T0 | (0xF << 28));
2300 }
2301 #else
2302 do_tlbia();
2303 #endif
2304 }
2305
2306 #if defined(TARGET_PPC64)
2307 void do_tlbie_64 (void)
2308 {
2309 T0 = (uint64_t)T0;
2310 #if !defined(FLUSH_ALL_TLBS)
2311 if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_6xx)) {
2312 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 0);
2313 if (env->id_tlbs == 1)
2314 ppc6xx_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK, 1);
2315 } else if (unlikely(PPC_MMU(env) == PPC_FLAGS_MMU_SOFT_4xx)) {
2316 /* XXX: TODO */
2317 #if 0
2318 ppcbooke_tlb_invalidate_virt(env, T0 & TARGET_PAGE_MASK,
2319 env->spr[SPR_BOOKE_PID]);
2320 #endif
2321 } else {
2322 /* tlbie invalidate TLBs for all segments
2323 * As we have 2^36 segments, invalidate all qemu TLBs
2324 */
2325 #if 0
2326 T0 &= TARGET_PAGE_MASK;
2327 T0 &= ~((target_ulong)-1 << 28);
2328 /* XXX: this case should be optimized,
2329 * giving a mask to tlb_flush_page
2330 */
2331 tlb_flush_page(env, T0 | (0x0 << 28));
2332 tlb_flush_page(env, T0 | (0x1 << 28));
2333 tlb_flush_page(env, T0 | (0x2 << 28));
2334 tlb_flush_page(env, T0 | (0x3 << 28));
2335 tlb_flush_page(env, T0 | (0x4 << 28));
2336 tlb_flush_page(env, T0 | (0x5 << 28));
2337 tlb_flush_page(env, T0 | (0x6 << 28));
2338 tlb_flush_page(env, T0 | (0x7 << 28));
2339 tlb_flush_page(env, T0 | (0x8 << 28));
2340 tlb_flush_page(env, T0 | (0x9 << 28));
2341 tlb_flush_page(env, T0 | (0xA << 28));
2342 tlb_flush_page(env, T0 | (0xB << 28));
2343 tlb_flush_page(env, T0 | (0xC << 28));
2344 tlb_flush_page(env, T0 | (0xD << 28));
2345 tlb_flush_page(env, T0 | (0xE << 28));
2346 tlb_flush_page(env, T0 | (0xF << 28));
2347 #else
2348 tlb_flush(env, 1);
2349 #endif
2350 }
2351 #else
2352 do_tlbia();
2353 #endif
2354 }
2355 #endif
2356
2357 #if defined(TARGET_PPC64)
2358 void do_slbia (void)
2359 {
2360 /* XXX: TODO */
2361 tlb_flush(env, 1);
2362 }
2363
2364 void do_slbie (void)
2365 {
2366 /* XXX: TODO */
2367 tlb_flush(env, 1);
2368 }
2369 #endif
2370
2371 /* Software driven TLBs management */
2372 /* PowerPC 602/603 software TLB load instructions helpers */
2373 void do_load_6xx_tlb (int is_code)
2374 {
2375 target_ulong RPN, CMP, EPN;
2376 int way;
2377
2378 RPN = env->spr[SPR_RPA];
2379 if (is_code) {
2380 CMP = env->spr[SPR_ICMP];
2381 EPN = env->spr[SPR_IMISS];
2382 } else {
2383 CMP = env->spr[SPR_DCMP];
2384 EPN = env->spr[SPR_DMISS];
2385 }
2386 way = (env->spr[SPR_SRR1] >> 17) & 1;
2387 #if defined (DEBUG_SOFTWARE_TLB)
2388 if (loglevel != 0) {
2389 fprintf(logfile, "%s: EPN %08lx %08lx PTE0 %08lx PTE1 %08lx way %d\n",
2390 __func__, (unsigned long)T0, (unsigned long)EPN,
2391 (unsigned long)CMP, (unsigned long)RPN, way);
2392 }
2393 #endif
2394 /* Store this TLB */
2395 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2396 way, is_code, CMP, RPN);
2397 }
2398
2399 static target_ulong booke_tlb_to_page_size (int size)
2400 {
2401 return 1024 << (2 * size);
2402 }
2403
2404 static int booke_page_size_to_tlb (target_ulong page_size)
2405 {
2406 int size;
2407
2408 switch (page_size) {
2409 case 0x00000400UL:
2410 size = 0x0;
2411 break;
2412 case 0x00001000UL:
2413 size = 0x1;
2414 break;
2415 case 0x00004000UL:
2416 size = 0x2;
2417 break;
2418 case 0x00010000UL:
2419 size = 0x3;
2420 break;
2421 case 0x00040000UL:
2422 size = 0x4;
2423 break;
2424 case 0x00100000UL:
2425 size = 0x5;
2426 break;
2427 case 0x00400000UL:
2428 size = 0x6;
2429 break;
2430 case 0x01000000UL:
2431 size = 0x7;
2432 break;
2433 case 0x04000000UL:
2434 size = 0x8;
2435 break;
2436 case 0x10000000UL:
2437 size = 0x9;
2438 break;
2439 case 0x40000000UL:
2440 size = 0xA;
2441 break;
2442 #if defined (TARGET_PPC64)
2443 case 0x000100000000ULL:
2444 size = 0xB;
2445 break;
2446 case 0x000400000000ULL:
2447 size = 0xC;
2448 break;
2449 case 0x001000000000ULL:
2450 size = 0xD;
2451 break;
2452 case 0x004000000000ULL:
2453 size = 0xE;
2454 break;
2455 case 0x010000000000ULL:
2456 size = 0xF;
2457 break;
2458 #endif
2459 default:
2460 size = -1;
2461 break;
2462 }
2463
2464 return size;
2465 }
2466
2467 /* Helpers for 4xx TLB management */
2468 void do_4xx_tlbre_lo (void)
2469 {
2470 ppcemb_tlb_t *tlb;
2471 int size;
2472
2473 T0 &= 0x3F;
2474 tlb = &env->tlb[T0].tlbe;
2475 T0 = tlb->EPN;
2476 if (tlb->prot & PAGE_VALID)
2477 T0 |= 0x400;
2478 size = booke_page_size_to_tlb(tlb->size);
2479 if (size < 0 || size > 0x7)
2480 size = 1;
2481 T0 |= size << 7;
2482 env->spr[SPR_40x_PID] = tlb->PID;
2483 }
2484
2485 void do_4xx_tlbre_hi (void)
2486 {
2487 ppcemb_tlb_t *tlb;
2488
2489 T0 &= 0x3F;
2490 tlb = &env->tlb[T0].tlbe;
2491 T0 = tlb->RPN;
2492 if (tlb->prot & PAGE_EXEC)
2493 T0 |= 0x200;
2494 if (tlb->prot & PAGE_WRITE)
2495 T0 |= 0x100;
2496 }
2497
2498 void do_4xx_tlbsx (void)
2499 {
2500 T0 = ppcemb_tlb_search(env, T0);
2501 }
2502
2503 void do_4xx_tlbsx_ (void)
2504 {
2505 int tmp = xer_ov;
2506
2507 T0 = ppcemb_tlb_search(env, T0);
2508 if (T0 != -1)
2509 tmp |= 0x02;
2510 env->crf[0] = tmp;
2511 }
2512
2513 void do_4xx_tlbwe_hi (void)
2514 {
2515 ppcemb_tlb_t *tlb;
2516 target_ulong page, end;
2517
2518 #if defined (DEBUG_SOFTWARE_TLB)
2519 if (loglevel != 0) {
2520 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
2521 }
2522 #endif
2523 T0 &= 0x3F;
2524 tlb = &env->tlb[T0].tlbe;
2525 /* Invalidate previous TLB (if it's valid) */
2526 if (tlb->prot & PAGE_VALID) {
2527 end = tlb->EPN + tlb->size;
2528 #if defined (DEBUG_SOFTWARE_TLB)
2529 if (loglevel != 0) {
2530 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2531 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2532 }
2533 #endif
2534 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2535 tlb_flush_page(env, page);
2536 }
2537 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2538 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2539 * If this ever occurs, one should use the ppcemb target instead
2540 * of the ppc or ppc64 one
2541 */
2542 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2543 cpu_abort(env, "TLB size %u < %u are not supported (%d)\n",
2544 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2545 }
2546 tlb->EPN = (T1 & 0xFFFFFC00) & ~(tlb->size - 1);
2547 if (T1 & 0x40)
2548 tlb->prot |= PAGE_VALID;
2549 else
2550 tlb->prot &= ~PAGE_VALID;
2551 if (T1 & 0x20) {
2552 /* XXX: TO BE FIXED */
2553 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2554 }
2555 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2556 tlb->attr = T1 & 0xFF;
2557 #if defined (DEBUG_SOFTWARE_TLB)
2558 if (loglevel != 0) {
2559 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2560 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2561 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2562 tlb->prot & PAGE_READ ? 'r' : '-',
2563 tlb->prot & PAGE_WRITE ? 'w' : '-',
2564 tlb->prot & PAGE_EXEC ? 'x' : '-',
2565 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2566 }
2567 #endif
2568 /* Invalidate new TLB (if valid) */
2569 if (tlb->prot & PAGE_VALID) {
2570 end = tlb->EPN + tlb->size;
2571 #if defined (DEBUG_SOFTWARE_TLB)
2572 if (loglevel != 0) {
2573 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2574 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2575 }
2576 #endif
2577 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2578 tlb_flush_page(env, page);
2579 }
2580 }
2581
2582 void do_4xx_tlbwe_lo (void)
2583 {
2584 ppcemb_tlb_t *tlb;
2585
2586 #if defined (DEBUG_SOFTWARE_TLB)
2587 if (loglevel != 0) {
2588 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
2589 }
2590 #endif
2591 T0 &= 0x3F;
2592 tlb = &env->tlb[T0].tlbe;
2593 tlb->RPN = T1 & 0xFFFFFC00;
2594 tlb->prot = PAGE_READ;
2595 if (T1 & 0x200)
2596 tlb->prot |= PAGE_EXEC;
2597 if (T1 & 0x100)
2598 tlb->prot |= PAGE_WRITE;
2599 #if defined (DEBUG_SOFTWARE_TLB)
2600 if (loglevel != 0) {
2601 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2602 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2603 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2604 tlb->prot & PAGE_READ ? 'r' : '-',
2605 tlb->prot & PAGE_WRITE ? 'w' : '-',
2606 tlb->prot & PAGE_EXEC ? 'x' : '-',
2607 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2608 }
2609 #endif
2610 }
2611 #endif /* !CONFIG_USER_ONLY */