]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/op_helper.c
Make Alpha and PowerPC targets use shared helpers
[mirror_qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21 #include "host-utils.h"
22
23 #include "helper_regs.h"
24 #include "op_helper.h"
25
26 #define MEMSUFFIX _raw
27 #include "op_helper.h"
28 #include "op_helper_mem.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #define MEMSUFFIX _user
31 #include "op_helper.h"
32 #include "op_helper_mem.h"
33 #define MEMSUFFIX _kernel
34 #include "op_helper.h"
35 #include "op_helper_mem.h"
36 #if defined(TARGET_PPC64H)
37 #define MEMSUFFIX _hypv
38 #include "op_helper.h"
39 #include "op_helper_mem.h"
40 #endif
41 #endif
42
43 //#define DEBUG_OP
44 //#define DEBUG_EXCEPTIONS
45 //#define DEBUG_SOFTWARE_TLB
46
47 /*****************************************************************************/
48 /* Exceptions processing helpers */
49
50 void do_raise_exception_err (uint32_t exception, int error_code)
51 {
52 #if 0
53 printf("Raise exception %3x code : %d\n", exception, error_code);
54 #endif
55 env->exception_index = exception;
56 env->error_code = error_code;
57 cpu_loop_exit();
58 }
59
60 void do_raise_exception (uint32_t exception)
61 {
62 do_raise_exception_err(exception, 0);
63 }
64
65 void cpu_dump_EA (target_ulong EA);
66 void do_print_mem_EA (target_ulong EA)
67 {
68 cpu_dump_EA(EA);
69 }
70
71 /*****************************************************************************/
72 /* Registers load and stores */
73 void do_load_cr (void)
74 {
75 T0 = (env->crf[0] << 28) |
76 (env->crf[1] << 24) |
77 (env->crf[2] << 20) |
78 (env->crf[3] << 16) |
79 (env->crf[4] << 12) |
80 (env->crf[5] << 8) |
81 (env->crf[6] << 4) |
82 (env->crf[7] << 0);
83 }
84
85 void do_store_cr (uint32_t mask)
86 {
87 int i, sh;
88
89 for (i = 0, sh = 7; i < 8; i++, sh--) {
90 if (mask & (1 << sh))
91 env->crf[i] = (T0 >> (sh * 4)) & 0xFUL;
92 }
93 }
94
95 #if defined(TARGET_PPC64)
96 void do_store_pri (int prio)
97 {
98 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
99 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
100 }
101 #endif
102
103 target_ulong ppc_load_dump_spr (int sprn)
104 {
105 if (loglevel != 0) {
106 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
107 sprn, sprn, env->spr[sprn]);
108 }
109
110 return env->spr[sprn];
111 }
112
113 void ppc_store_dump_spr (int sprn, target_ulong val)
114 {
115 if (loglevel != 0) {
116 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
117 sprn, sprn, env->spr[sprn], val);
118 }
119 env->spr[sprn] = val;
120 }
121
122 /*****************************************************************************/
123 /* Fixed point operations helpers */
124 void do_adde (void)
125 {
126 T2 = T0;
127 T0 += T1 + xer_ca;
128 if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
129 (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
130 xer_ca = 0;
131 } else {
132 xer_ca = 1;
133 }
134 }
135
136 #if defined(TARGET_PPC64)
137 void do_adde_64 (void)
138 {
139 T2 = T0;
140 T0 += T1 + xer_ca;
141 if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
142 (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
143 xer_ca = 0;
144 } else {
145 xer_ca = 1;
146 }
147 }
148 #endif
149
150 void do_addmeo (void)
151 {
152 T1 = T0;
153 T0 += xer_ca + (-1);
154 if (likely(!((uint32_t)T1 &
155 ((uint32_t)T1 ^ (uint32_t)T0) & (1UL << 31)))) {
156 xer_ov = 0;
157 } else {
158 xer_ov = 1;
159 xer_so = 1;
160 }
161 if (likely(T1 != 0))
162 xer_ca = 1;
163 }
164
165 #if defined(TARGET_PPC64)
166 void do_addmeo_64 (void)
167 {
168 T1 = T0;
169 T0 += xer_ca + (-1);
170 if (likely(!((uint64_t)T1 &
171 ((uint64_t)T1 ^ (uint64_t)T0) & (1ULL << 63)))) {
172 xer_ov = 0;
173 } else {
174 xer_ov = 1;
175 xer_so = 1;
176 }
177 if (likely(T1 != 0))
178 xer_ca = 1;
179 }
180 #endif
181
182 void do_divwo (void)
183 {
184 if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) ||
185 (int32_t)T1 == 0))) {
186 xer_ov = 0;
187 T0 = (int32_t)T0 / (int32_t)T1;
188 } else {
189 xer_ov = 1;
190 xer_so = 1;
191 T0 = (-1) * ((uint32_t)T0 >> 31);
192 }
193 }
194
195 #if defined(TARGET_PPC64)
196 void do_divdo (void)
197 {
198 if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == -1ULL) ||
199 (int64_t)T1 == 0))) {
200 xer_ov = 0;
201 T0 = (int64_t)T0 / (int64_t)T1;
202 } else {
203 xer_ov = 1;
204 xer_so = 1;
205 T0 = (-1ULL) * ((uint64_t)T0 >> 63);
206 }
207 }
208 #endif
209
210 void do_divwuo (void)
211 {
212 if (likely((uint32_t)T1 != 0)) {
213 xer_ov = 0;
214 T0 = (uint32_t)T0 / (uint32_t)T1;
215 } else {
216 xer_ov = 1;
217 xer_so = 1;
218 T0 = 0;
219 }
220 }
221
222 #if defined(TARGET_PPC64)
223 void do_divduo (void)
224 {
225 if (likely((uint64_t)T1 != 0)) {
226 xer_ov = 0;
227 T0 = (uint64_t)T0 / (uint64_t)T1;
228 } else {
229 xer_ov = 1;
230 xer_so = 1;
231 T0 = 0;
232 }
233 }
234 #endif
235
236 void do_mullwo (void)
237 {
238 int64_t res = (int64_t)T0 * (int64_t)T1;
239
240 if (likely((int32_t)res == res)) {
241 xer_ov = 0;
242 } else {
243 xer_ov = 1;
244 xer_so = 1;
245 }
246 T0 = (int32_t)res;
247 }
248
249 #if defined(TARGET_PPC64)
250 void do_mulldo (void)
251 {
252 int64_t th;
253 uint64_t tl;
254
255 muls64(&tl, &th, T0, T1);
256 /* If th != 0 && th != -1, then we had an overflow */
257 if (likely((th + 1) <= 1)) {
258 xer_ov = 0;
259 } else {
260 xer_ov = 1;
261 xer_so = 1;
262 }
263 T0 = (int64_t)tl;
264 }
265 #endif
266
267 void do_nego (void)
268 {
269 if (likely((int32_t)T0 != INT32_MIN)) {
270 xer_ov = 0;
271 T0 = -(int32_t)T0;
272 } else {
273 xer_ov = 1;
274 xer_so = 1;
275 }
276 }
277
278 #if defined(TARGET_PPC64)
279 void do_nego_64 (void)
280 {
281 if (likely((int64_t)T0 != INT64_MIN)) {
282 xer_ov = 0;
283 T0 = -(int64_t)T0;
284 } else {
285 xer_ov = 1;
286 xer_so = 1;
287 }
288 }
289 #endif
290
291 void do_subfe (void)
292 {
293 T0 = T1 + ~T0 + xer_ca;
294 if (likely((uint32_t)T0 >= (uint32_t)T1 &&
295 (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
296 xer_ca = 0;
297 } else {
298 xer_ca = 1;
299 }
300 }
301
302 #if defined(TARGET_PPC64)
303 void do_subfe_64 (void)
304 {
305 T0 = T1 + ~T0 + xer_ca;
306 if (likely((uint64_t)T0 >= (uint64_t)T1 &&
307 (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
308 xer_ca = 0;
309 } else {
310 xer_ca = 1;
311 }
312 }
313 #endif
314
315 void do_subfmeo (void)
316 {
317 T1 = T0;
318 T0 = ~T0 + xer_ca - 1;
319 if (likely(!((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0) &
320 (1UL << 31)))) {
321 xer_ov = 0;
322 } else {
323 xer_ov = 1;
324 xer_so = 1;
325 }
326 if (likely((uint32_t)T1 != UINT32_MAX))
327 xer_ca = 1;
328 }
329
330 #if defined(TARGET_PPC64)
331 void do_subfmeo_64 (void)
332 {
333 T1 = T0;
334 T0 = ~T0 + xer_ca - 1;
335 if (likely(!((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0) &
336 (1ULL << 63)))) {
337 xer_ov = 0;
338 } else {
339 xer_ov = 1;
340 xer_so = 1;
341 }
342 if (likely((uint64_t)T1 != UINT64_MAX))
343 xer_ca = 1;
344 }
345 #endif
346
347 void do_subfzeo (void)
348 {
349 T1 = T0;
350 T0 = ~T0 + xer_ca;
351 if (likely(!(((uint32_t)~T1 ^ UINT32_MAX) &
352 ((uint32_t)(~T1) ^ (uint32_t)T0) & (1UL << 31)))) {
353 xer_ov = 0;
354 } else {
355 xer_ov = 1;
356 xer_so = 1;
357 }
358 if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
359 xer_ca = 0;
360 } else {
361 xer_ca = 1;
362 }
363 }
364
365 #if defined(TARGET_PPC64)
366 void do_subfzeo_64 (void)
367 {
368 T1 = T0;
369 T0 = ~T0 + xer_ca;
370 if (likely(!(((uint64_t)~T1 ^ UINT64_MAX) &
371 ((uint64_t)(~T1) ^ (uint64_t)T0) & (1ULL << 63)))) {
372 xer_ov = 0;
373 } else {
374 xer_ov = 1;
375 xer_so = 1;
376 }
377 if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
378 xer_ca = 0;
379 } else {
380 xer_ca = 1;
381 }
382 }
383 #endif
384
385 void do_cntlzw (void)
386 {
387 T0 = clz32(T0);
388 }
389
390 #if defined(TARGET_PPC64)
391 void do_cntlzd (void)
392 {
393 T0 = clz64(T0);
394 }
395 #endif
396
397 /* shift right arithmetic helper */
398 void do_sraw (void)
399 {
400 int32_t ret;
401
402 if (likely(!(T1 & 0x20UL))) {
403 if (likely((uint32_t)T1 != 0)) {
404 ret = (int32_t)T0 >> (T1 & 0x1fUL);
405 if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
406 xer_ca = 0;
407 } else {
408 xer_ca = 1;
409 }
410 } else {
411 ret = T0;
412 xer_ca = 0;
413 }
414 } else {
415 ret = (-1) * ((uint32_t)T0 >> 31);
416 if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) {
417 xer_ca = 0;
418 } else {
419 xer_ca = 1;
420 }
421 }
422 T0 = ret;
423 }
424
425 #if defined(TARGET_PPC64)
426 void do_srad (void)
427 {
428 int64_t ret;
429
430 if (likely(!(T1 & 0x40UL))) {
431 if (likely((uint64_t)T1 != 0)) {
432 ret = (int64_t)T0 >> (T1 & 0x3FUL);
433 if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
434 xer_ca = 0;
435 } else {
436 xer_ca = 1;
437 }
438 } else {
439 ret = T0;
440 xer_ca = 0;
441 }
442 } else {
443 ret = (-1) * ((uint64_t)T0 >> 63);
444 if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
445 xer_ca = 0;
446 } else {
447 xer_ca = 1;
448 }
449 }
450 T0 = ret;
451 }
452 #endif
453
454 void do_popcntb (void)
455 {
456 uint32_t ret;
457 int i;
458
459 ret = 0;
460 for (i = 0; i < 32; i += 8)
461 ret |= ctpop8((T0 >> i) & 0xFF) << i;
462 T0 = ret;
463 }
464
465 #if defined(TARGET_PPC64)
466 void do_popcntb_64 (void)
467 {
468 uint64_t ret;
469 int i;
470
471 ret = 0;
472 for (i = 0; i < 64; i += 8)
473 ret |= ctpop8((T0 >> i) & 0xFF) << i;
474 T0 = ret;
475 }
476 #endif
477
478 /*****************************************************************************/
479 /* Floating point operations helpers */
480 static always_inline int fpisneg (float64 f)
481 {
482 union {
483 float64 f;
484 uint64_t u;
485 } u;
486
487 u.f = f;
488
489 return u.u >> 63 != 0;
490 }
491
492 static always_inline int isden (float f)
493 {
494 union {
495 float64 f;
496 uint64_t u;
497 } u;
498
499 u.f = f;
500
501 return ((u.u >> 52) & 0x7FF) == 0;
502 }
503
504 static always_inline int iszero (float64 f)
505 {
506 union {
507 float64 f;
508 uint64_t u;
509 } u;
510
511 u.f = f;
512
513 return (u.u & ~0x8000000000000000ULL) == 0;
514 }
515
516 static always_inline int isinfinity (float64 f)
517 {
518 union {
519 float64 f;
520 uint64_t u;
521 } u;
522
523 u.f = f;
524
525 return ((u.u >> 52) & 0x3FF) == 0x3FF &&
526 (u.u & 0x000FFFFFFFFFFFFFULL) == 0;
527 }
528
529 void do_compute_fprf (int set_fprf)
530 {
531 int isneg;
532
533 isneg = fpisneg(FT0);
534 if (unlikely(float64_is_nan(FT0))) {
535 if (float64_is_signaling_nan(FT0)) {
536 /* Signaling NaN: flags are undefined */
537 T0 = 0x00;
538 } else {
539 /* Quiet NaN */
540 T0 = 0x11;
541 }
542 } else if (unlikely(isinfinity(FT0))) {
543 /* +/- infinity */
544 if (isneg)
545 T0 = 0x09;
546 else
547 T0 = 0x05;
548 } else {
549 if (iszero(FT0)) {
550 /* +/- zero */
551 if (isneg)
552 T0 = 0x12;
553 else
554 T0 = 0x02;
555 } else {
556 if (isden(FT0)) {
557 /* Denormalized numbers */
558 T0 = 0x10;
559 } else {
560 /* Normalized numbers */
561 T0 = 0x00;
562 }
563 if (isneg) {
564 T0 |= 0x08;
565 } else {
566 T0 |= 0x04;
567 }
568 }
569 }
570 if (set_fprf) {
571 /* We update FPSCR_FPRF */
572 env->fpscr &= ~(0x1F << FPSCR_FPRF);
573 env->fpscr |= T0 << FPSCR_FPRF;
574 }
575 /* We just need fpcc to update Rc1 */
576 T0 &= 0xF;
577 }
578
579 /* Floating-point invalid operations exception */
580 static always_inline void fload_invalid_op_excp (int op)
581 {
582 int ve;
583
584 ve = fpscr_ve;
585 if (op & POWERPC_EXCP_FP_VXSNAN) {
586 /* Operation on signaling NaN */
587 env->fpscr |= 1 << FPSCR_VXSNAN;
588 }
589 if (op & POWERPC_EXCP_FP_VXSOFT) {
590 /* Software-defined condition */
591 env->fpscr |= 1 << FPSCR_VXSOFT;
592 }
593 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
594 case POWERPC_EXCP_FP_VXISI:
595 /* Magnitude subtraction of infinities */
596 env->fpscr |= 1 << FPSCR_VXISI;
597 goto update_arith;
598 case POWERPC_EXCP_FP_VXIDI:
599 /* Division of infinity by infinity */
600 env->fpscr |= 1 << FPSCR_VXIDI;
601 goto update_arith;
602 case POWERPC_EXCP_FP_VXZDZ:
603 /* Division of zero by zero */
604 env->fpscr |= 1 << FPSCR_VXZDZ;
605 goto update_arith;
606 case POWERPC_EXCP_FP_VXIMZ:
607 /* Multiplication of zero by infinity */
608 env->fpscr |= 1 << FPSCR_VXIMZ;
609 goto update_arith;
610 case POWERPC_EXCP_FP_VXVC:
611 /* Ordered comparison of NaN */
612 env->fpscr |= 1 << FPSCR_VXVC;
613 env->fpscr &= ~(0xF << FPSCR_FPCC);
614 env->fpscr |= 0x11 << FPSCR_FPCC;
615 /* We must update the target FPR before raising the exception */
616 if (ve != 0) {
617 env->exception_index = POWERPC_EXCP_PROGRAM;
618 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
619 /* Update the floating-point enabled exception summary */
620 env->fpscr |= 1 << FPSCR_FEX;
621 /* Exception is differed */
622 ve = 0;
623 }
624 break;
625 case POWERPC_EXCP_FP_VXSQRT:
626 /* Square root of a negative number */
627 env->fpscr |= 1 << FPSCR_VXSQRT;
628 update_arith:
629 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
630 if (ve == 0) {
631 /* Set the result to quiet NaN */
632 FT0 = (uint64_t)-1;
633 env->fpscr &= ~(0xF << FPSCR_FPCC);
634 env->fpscr |= 0x11 << FPSCR_FPCC;
635 }
636 break;
637 case POWERPC_EXCP_FP_VXCVI:
638 /* Invalid conversion */
639 env->fpscr |= 1 << FPSCR_VXCVI;
640 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
641 if (ve == 0) {
642 /* Set the result to quiet NaN */
643 FT0 = (uint64_t)-1;
644 env->fpscr &= ~(0xF << FPSCR_FPCC);
645 env->fpscr |= 0x11 << FPSCR_FPCC;
646 }
647 break;
648 }
649 /* Update the floating-point invalid operation summary */
650 env->fpscr |= 1 << FPSCR_VX;
651 /* Update the floating-point exception summary */
652 env->fpscr |= 1 << FPSCR_FX;
653 if (ve != 0) {
654 /* Update the floating-point enabled exception summary */
655 env->fpscr |= 1 << FPSCR_FEX;
656 if (msr_fe0 != 0 || msr_fe1 != 0)
657 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
658 }
659 }
660
661 static always_inline void float_zero_divide_excp (void)
662 {
663 union {
664 float64 f;
665 uint64_t u;
666 } u0, u1;
667
668 env->fpscr |= 1 << FPSCR_ZX;
669 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670 /* Update the floating-point exception summary */
671 env->fpscr |= 1 << FPSCR_FX;
672 if (fpscr_ze != 0) {
673 /* Update the floating-point enabled exception summary */
674 env->fpscr |= 1 << FPSCR_FEX;
675 if (msr_fe0 != 0 || msr_fe1 != 0) {
676 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
677 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
678 }
679 } else {
680 /* Set the result to infinity */
681 u0.f = FT0;
682 u1.f = FT1;
683 u0.u = ((u0.u ^ u1.u) & 0x8000000000000000ULL);
684 u0.u |= 0x3FFULL << 52;
685 FT0 = u0.f;
686 }
687 }
688
689 static always_inline void float_overflow_excp (void)
690 {
691 env->fpscr |= 1 << FPSCR_OX;
692 /* Update the floating-point exception summary */
693 env->fpscr |= 1 << FPSCR_FX;
694 if (fpscr_oe != 0) {
695 /* XXX: should adjust the result */
696 /* Update the floating-point enabled exception summary */
697 env->fpscr |= 1 << FPSCR_FEX;
698 /* We must update the target FPR before raising the exception */
699 env->exception_index = POWERPC_EXCP_PROGRAM;
700 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
701 } else {
702 env->fpscr |= 1 << FPSCR_XX;
703 env->fpscr |= 1 << FPSCR_FI;
704 }
705 }
706
707 static always_inline void float_underflow_excp (void)
708 {
709 env->fpscr |= 1 << FPSCR_UX;
710 /* Update the floating-point exception summary */
711 env->fpscr |= 1 << FPSCR_FX;
712 if (fpscr_ue != 0) {
713 /* XXX: should adjust the result */
714 /* Update the floating-point enabled exception summary */
715 env->fpscr |= 1 << FPSCR_FEX;
716 /* We must update the target FPR before raising the exception */
717 env->exception_index = POWERPC_EXCP_PROGRAM;
718 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
719 }
720 }
721
722 static always_inline void float_inexact_excp (void)
723 {
724 env->fpscr |= 1 << FPSCR_XX;
725 /* Update the floating-point exception summary */
726 env->fpscr |= 1 << FPSCR_FX;
727 if (fpscr_xe != 0) {
728 /* Update the floating-point enabled exception summary */
729 env->fpscr |= 1 << FPSCR_FEX;
730 /* We must update the target FPR before raising the exception */
731 env->exception_index = POWERPC_EXCP_PROGRAM;
732 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
733 }
734 }
735
736 static always_inline void fpscr_set_rounding_mode (void)
737 {
738 int rnd_type;
739
740 /* Set rounding mode */
741 switch (fpscr_rn) {
742 case 0:
743 /* Best approximation (round to nearest) */
744 rnd_type = float_round_nearest_even;
745 break;
746 case 1:
747 /* Smaller magnitude (round toward zero) */
748 rnd_type = float_round_to_zero;
749 break;
750 case 2:
751 /* Round toward +infinite */
752 rnd_type = float_round_up;
753 break;
754 default:
755 case 3:
756 /* Round toward -infinite */
757 rnd_type = float_round_down;
758 break;
759 }
760 set_float_rounding_mode(rnd_type, &env->fp_status);
761 }
762
763 void do_fpscr_setbit (int bit)
764 {
765 int prev;
766
767 prev = (env->fpscr >> bit) & 1;
768 env->fpscr |= 1 << bit;
769 if (prev == 0) {
770 switch (bit) {
771 case FPSCR_VX:
772 env->fpscr |= 1 << FPSCR_FX;
773 if (fpscr_ve)
774 goto raise_ve;
775 case FPSCR_OX:
776 env->fpscr |= 1 << FPSCR_FX;
777 if (fpscr_oe)
778 goto raise_oe;
779 break;
780 case FPSCR_UX:
781 env->fpscr |= 1 << FPSCR_FX;
782 if (fpscr_ue)
783 goto raise_ue;
784 break;
785 case FPSCR_ZX:
786 env->fpscr |= 1 << FPSCR_FX;
787 if (fpscr_ze)
788 goto raise_ze;
789 break;
790 case FPSCR_XX:
791 env->fpscr |= 1 << FPSCR_FX;
792 if (fpscr_xe)
793 goto raise_xe;
794 break;
795 case FPSCR_VXSNAN:
796 case FPSCR_VXISI:
797 case FPSCR_VXIDI:
798 case FPSCR_VXZDZ:
799 case FPSCR_VXIMZ:
800 case FPSCR_VXVC:
801 case FPSCR_VXSOFT:
802 case FPSCR_VXSQRT:
803 case FPSCR_VXCVI:
804 env->fpscr |= 1 << FPSCR_VX;
805 env->fpscr |= 1 << FPSCR_FX;
806 if (fpscr_ve != 0)
807 goto raise_ve;
808 break;
809 case FPSCR_VE:
810 if (fpscr_vx != 0) {
811 raise_ve:
812 env->error_code = POWERPC_EXCP_FP;
813 if (fpscr_vxsnan)
814 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
815 if (fpscr_vxisi)
816 env->error_code |= POWERPC_EXCP_FP_VXISI;
817 if (fpscr_vxidi)
818 env->error_code |= POWERPC_EXCP_FP_VXIDI;
819 if (fpscr_vxzdz)
820 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
821 if (fpscr_vximz)
822 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
823 if (fpscr_vxvc)
824 env->error_code |= POWERPC_EXCP_FP_VXVC;
825 if (fpscr_vxsoft)
826 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
827 if (fpscr_vxsqrt)
828 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
829 if (fpscr_vxcvi)
830 env->error_code |= POWERPC_EXCP_FP_VXCVI;
831 goto raise_excp;
832 }
833 break;
834 case FPSCR_OE:
835 if (fpscr_ox != 0) {
836 raise_oe:
837 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
838 goto raise_excp;
839 }
840 break;
841 case FPSCR_UE:
842 if (fpscr_ux != 0) {
843 raise_ue:
844 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
845 goto raise_excp;
846 }
847 break;
848 case FPSCR_ZE:
849 if (fpscr_zx != 0) {
850 raise_ze:
851 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
852 goto raise_excp;
853 }
854 break;
855 case FPSCR_XE:
856 if (fpscr_xx != 0) {
857 raise_xe:
858 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
859 goto raise_excp;
860 }
861 break;
862 case FPSCR_RN1:
863 case FPSCR_RN:
864 fpscr_set_rounding_mode();
865 break;
866 default:
867 break;
868 raise_excp:
869 /* Update the floating-point enabled exception summary */
870 env->fpscr |= 1 << FPSCR_FEX;
871 /* We have to update Rc1 before raising the exception */
872 env->exception_index = POWERPC_EXCP_PROGRAM;
873 break;
874 }
875 }
876 }
877
878 #if defined(WORDS_BIGENDIAN)
879 #define WORD0 0
880 #define WORD1 1
881 #else
882 #define WORD0 1
883 #define WORD1 0
884 #endif
885 void do_store_fpscr (uint32_t mask)
886 {
887 /*
888 * We use only the 32 LSB of the incoming fpr
889 */
890 union {
891 double d;
892 struct {
893 uint32_t u[2];
894 } s;
895 } u;
896 uint32_t prev, new;
897 int i;
898
899 u.d = FT0;
900 prev = env->fpscr;
901 new = u.s.u[WORD1];
902 new &= ~0x90000000;
903 new |= prev & 0x90000000;
904 for (i = 0; i < 7; i++) {
905 if (mask & (1 << i)) {
906 env->fpscr &= ~(0xF << (4 * i));
907 env->fpscr |= new & (0xF << (4 * i));
908 }
909 }
910 /* Update VX and FEX */
911 if (fpscr_ix != 0)
912 env->fpscr |= 1 << FPSCR_VX;
913 if ((fpscr_ex & fpscr_eex) != 0) {
914 env->fpscr |= 1 << FPSCR_FEX;
915 env->exception_index = POWERPC_EXCP_PROGRAM;
916 /* XXX: we should compute it properly */
917 env->error_code = POWERPC_EXCP_FP;
918 }
919 fpscr_set_rounding_mode();
920 }
921 #undef WORD0
922 #undef WORD1
923
924 #ifdef CONFIG_SOFTFLOAT
925 void do_float_check_status (void)
926 {
927 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
928 (env->error_code & POWERPC_EXCP_FP)) {
929 /* Differred floating-point exception after target FPR update */
930 if (msr_fe0 != 0 || msr_fe1 != 0)
931 do_raise_exception_err(env->exception_index, env->error_code);
932 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
933 float_overflow_excp();
934 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
935 float_underflow_excp();
936 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
937 float_inexact_excp();
938 }
939 }
940 #endif
941
942 #if USE_PRECISE_EMULATION
943 void do_fadd (void)
944 {
945 if (unlikely(float64_is_signaling_nan(FT0) ||
946 float64_is_signaling_nan(FT1))) {
947 /* sNaN addition */
948 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
949 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
950 fpisneg(FT0) == fpisneg(FT1))) {
951 FT0 = float64_add(FT0, FT1, &env->fp_status);
952 } else {
953 /* Magnitude subtraction of infinities */
954 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
955 }
956 }
957
958 void do_fsub (void)
959 {
960 if (unlikely(float64_is_signaling_nan(FT0) ||
961 float64_is_signaling_nan(FT1))) {
962 /* sNaN subtraction */
963 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
964 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
965 fpisneg(FT0) != fpisneg(FT1))) {
966 FT0 = float64_sub(FT0, FT1, &env->fp_status);
967 } else {
968 /* Magnitude subtraction of infinities */
969 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
970 }
971 }
972
973 void do_fmul (void)
974 {
975 if (unlikely(float64_is_signaling_nan(FT0) ||
976 float64_is_signaling_nan(FT1))) {
977 /* sNaN multiplication */
978 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
979 } else if (unlikely((isinfinity(FT0) && iszero(FT1)) ||
980 (iszero(FT0) && isinfinity(FT1)))) {
981 /* Multiplication of zero by infinity */
982 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
983 } else {
984 FT0 = float64_mul(FT0, FT1, &env->fp_status);
985 }
986 }
987
988 void do_fdiv (void)
989 {
990 if (unlikely(float64_is_signaling_nan(FT0) ||
991 float64_is_signaling_nan(FT1))) {
992 /* sNaN division */
993 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
994 } else if (unlikely(isinfinity(FT0) && isinfinity(FT1))) {
995 /* Division of infinity by infinity */
996 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
997 } else if (unlikely(iszero(FT1))) {
998 if (iszero(FT0)) {
999 /* Division of zero by zero */
1000 fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1001 } else {
1002 /* Division by zero */
1003 float_zero_divide_excp();
1004 }
1005 } else {
1006 FT0 = float64_div(FT0, FT1, &env->fp_status);
1007 }
1008 }
1009 #endif /* USE_PRECISE_EMULATION */
1010
1011 void do_fctiw (void)
1012 {
1013 union {
1014 double d;
1015 uint64_t i;
1016 } p;
1017
1018 if (unlikely(float64_is_signaling_nan(FT0))) {
1019 /* sNaN conversion */
1020 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1021 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1022 /* qNan / infinity conversion */
1023 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1024 } else {
1025 p.i = float64_to_int32(FT0, &env->fp_status);
1026 #if USE_PRECISE_EMULATION
1027 /* XXX: higher bits are not supposed to be significant.
1028 * to make tests easier, return the same as a real PowerPC 750
1029 */
1030 p.i |= 0xFFF80000ULL << 32;
1031 #endif
1032 FT0 = p.d;
1033 }
1034 }
1035
1036 void do_fctiwz (void)
1037 {
1038 union {
1039 double d;
1040 uint64_t i;
1041 } p;
1042
1043 if (unlikely(float64_is_signaling_nan(FT0))) {
1044 /* sNaN conversion */
1045 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1046 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1047 /* qNan / infinity conversion */
1048 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1049 } else {
1050 p.i = float64_to_int32_round_to_zero(FT0, &env->fp_status);
1051 #if USE_PRECISE_EMULATION
1052 /* XXX: higher bits are not supposed to be significant.
1053 * to make tests easier, return the same as a real PowerPC 750
1054 */
1055 p.i |= 0xFFF80000ULL << 32;
1056 #endif
1057 FT0 = p.d;
1058 }
1059 }
1060
1061 #if defined(TARGET_PPC64)
1062 void do_fcfid (void)
1063 {
1064 union {
1065 double d;
1066 uint64_t i;
1067 } p;
1068
1069 p.d = FT0;
1070 FT0 = int64_to_float64(p.i, &env->fp_status);
1071 }
1072
1073 void do_fctid (void)
1074 {
1075 union {
1076 double d;
1077 uint64_t i;
1078 } p;
1079
1080 if (unlikely(float64_is_signaling_nan(FT0))) {
1081 /* sNaN conversion */
1082 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1083 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1084 /* qNan / infinity conversion */
1085 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1086 } else {
1087 p.i = float64_to_int64(FT0, &env->fp_status);
1088 FT0 = p.d;
1089 }
1090 }
1091
1092 void do_fctidz (void)
1093 {
1094 union {
1095 double d;
1096 uint64_t i;
1097 } p;
1098
1099 if (unlikely(float64_is_signaling_nan(FT0))) {
1100 /* sNaN conversion */
1101 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1102 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1103 /* qNan / infinity conversion */
1104 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1105 } else {
1106 p.i = float64_to_int64_round_to_zero(FT0, &env->fp_status);
1107 FT0 = p.d;
1108 }
1109 }
1110
1111 #endif
1112
1113 static always_inline void do_fri (int rounding_mode)
1114 {
1115 if (unlikely(float64_is_signaling_nan(FT0))) {
1116 /* sNaN round */
1117 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1118 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1119 /* qNan / infinity round */
1120 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1121 } else {
1122 set_float_rounding_mode(rounding_mode, &env->fp_status);
1123 FT0 = float64_round_to_int(FT0, &env->fp_status);
1124 /* Restore rounding mode from FPSCR */
1125 fpscr_set_rounding_mode();
1126 }
1127 }
1128
1129 void do_frin (void)
1130 {
1131 do_fri(float_round_nearest_even);
1132 }
1133
1134 void do_friz (void)
1135 {
1136 do_fri(float_round_to_zero);
1137 }
1138
1139 void do_frip (void)
1140 {
1141 do_fri(float_round_up);
1142 }
1143
1144 void do_frim (void)
1145 {
1146 do_fri(float_round_down);
1147 }
1148
1149 #if USE_PRECISE_EMULATION
1150 void do_fmadd (void)
1151 {
1152 if (unlikely(float64_is_signaling_nan(FT0) ||
1153 float64_is_signaling_nan(FT1) ||
1154 float64_is_signaling_nan(FT2))) {
1155 /* sNaN operation */
1156 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1157 } else {
1158 #ifdef FLOAT128
1159 /* This is the way the PowerPC specification defines it */
1160 float128 ft0_128, ft1_128;
1161
1162 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1163 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1164 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1165 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1166 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1167 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1168 #else
1169 /* This is OK on x86 hosts */
1170 FT0 = (FT0 * FT1) + FT2;
1171 #endif
1172 }
1173 }
1174
1175 void do_fmsub (void)
1176 {
1177 if (unlikely(float64_is_signaling_nan(FT0) ||
1178 float64_is_signaling_nan(FT1) ||
1179 float64_is_signaling_nan(FT2))) {
1180 /* sNaN operation */
1181 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1182 } else {
1183 #ifdef FLOAT128
1184 /* This is the way the PowerPC specification defines it */
1185 float128 ft0_128, ft1_128;
1186
1187 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1188 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1189 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1190 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1191 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1192 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1193 #else
1194 /* This is OK on x86 hosts */
1195 FT0 = (FT0 * FT1) - FT2;
1196 #endif
1197 }
1198 }
1199 #endif /* USE_PRECISE_EMULATION */
1200
1201 void do_fnmadd (void)
1202 {
1203 if (unlikely(float64_is_signaling_nan(FT0) ||
1204 float64_is_signaling_nan(FT1) ||
1205 float64_is_signaling_nan(FT2))) {
1206 /* sNaN operation */
1207 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1208 } else {
1209 #if USE_PRECISE_EMULATION
1210 #ifdef FLOAT128
1211 /* This is the way the PowerPC specification defines it */
1212 float128 ft0_128, ft1_128;
1213
1214 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1215 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1216 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1217 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1218 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1219 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1220 #else
1221 /* This is OK on x86 hosts */
1222 FT0 = (FT0 * FT1) + FT2;
1223 #endif
1224 #else
1225 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1226 FT0 = float64_add(FT0, FT2, &env->fp_status);
1227 #endif
1228 if (likely(!isnan(FT0)))
1229 FT0 = float64_chs(FT0);
1230 }
1231 }
1232
1233 void do_fnmsub (void)
1234 {
1235 if (unlikely(float64_is_signaling_nan(FT0) ||
1236 float64_is_signaling_nan(FT1) ||
1237 float64_is_signaling_nan(FT2))) {
1238 /* sNaN operation */
1239 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1240 } else {
1241 #if USE_PRECISE_EMULATION
1242 #ifdef FLOAT128
1243 /* This is the way the PowerPC specification defines it */
1244 float128 ft0_128, ft1_128;
1245
1246 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1247 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1248 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1249 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1250 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1251 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1252 #else
1253 /* This is OK on x86 hosts */
1254 FT0 = (FT0 * FT1) - FT2;
1255 #endif
1256 #else
1257 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1258 FT0 = float64_sub(FT0, FT2, &env->fp_status);
1259 #endif
1260 if (likely(!isnan(FT0)))
1261 FT0 = float64_chs(FT0);
1262 }
1263 }
1264
1265 #if USE_PRECISE_EMULATION
1266 void do_frsp (void)
1267 {
1268 if (unlikely(float64_is_signaling_nan(FT0))) {
1269 /* sNaN square root */
1270 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1271 } else {
1272 FT0 = float64_to_float32(FT0, &env->fp_status);
1273 }
1274 }
1275 #endif /* USE_PRECISE_EMULATION */
1276
1277 void do_fsqrt (void)
1278 {
1279 if (unlikely(float64_is_signaling_nan(FT0))) {
1280 /* sNaN square root */
1281 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1282 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1283 /* Square root of a negative nonzero number */
1284 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1285 } else {
1286 FT0 = float64_sqrt(FT0, &env->fp_status);
1287 }
1288 }
1289
1290 void do_fre (void)
1291 {
1292 union {
1293 double d;
1294 uint64_t i;
1295 } p;
1296
1297 if (unlikely(float64_is_signaling_nan(FT0))) {
1298 /* sNaN reciprocal */
1299 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1300 } else if (unlikely(iszero(FT0))) {
1301 /* Zero reciprocal */
1302 float_zero_divide_excp();
1303 } else if (likely(isnormal(FT0))) {
1304 FT0 = float64_div(1.0, FT0, &env->fp_status);
1305 } else {
1306 p.d = FT0;
1307 if (p.i == 0x8000000000000000ULL) {
1308 p.i = 0xFFF0000000000000ULL;
1309 } else if (p.i == 0x0000000000000000ULL) {
1310 p.i = 0x7FF0000000000000ULL;
1311 } else if (isnan(FT0)) {
1312 p.i = 0x7FF8000000000000ULL;
1313 } else if (fpisneg(FT0)) {
1314 p.i = 0x8000000000000000ULL;
1315 } else {
1316 p.i = 0x0000000000000000ULL;
1317 }
1318 FT0 = p.d;
1319 }
1320 }
1321
1322 void do_fres (void)
1323 {
1324 union {
1325 double d;
1326 uint64_t i;
1327 } p;
1328
1329 if (unlikely(float64_is_signaling_nan(FT0))) {
1330 /* sNaN reciprocal */
1331 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1332 } else if (unlikely(iszero(FT0))) {
1333 /* Zero reciprocal */
1334 float_zero_divide_excp();
1335 } else if (likely(isnormal(FT0))) {
1336 #if USE_PRECISE_EMULATION
1337 FT0 = float64_div(1.0, FT0, &env->fp_status);
1338 FT0 = float64_to_float32(FT0, &env->fp_status);
1339 #else
1340 FT0 = float32_div(1.0, FT0, &env->fp_status);
1341 #endif
1342 } else {
1343 p.d = FT0;
1344 if (p.i == 0x8000000000000000ULL) {
1345 p.i = 0xFFF0000000000000ULL;
1346 } else if (p.i == 0x0000000000000000ULL) {
1347 p.i = 0x7FF0000000000000ULL;
1348 } else if (isnan(FT0)) {
1349 p.i = 0x7FF8000000000000ULL;
1350 } else if (fpisneg(FT0)) {
1351 p.i = 0x8000000000000000ULL;
1352 } else {
1353 p.i = 0x0000000000000000ULL;
1354 }
1355 FT0 = p.d;
1356 }
1357 }
1358
1359 void do_frsqrte (void)
1360 {
1361 union {
1362 double d;
1363 uint64_t i;
1364 } p;
1365
1366 if (unlikely(float64_is_signaling_nan(FT0))) {
1367 /* sNaN reciprocal square root */
1368 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1369 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1370 /* Reciprocal square root of a negative nonzero number */
1371 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1372 } else if (likely(isnormal(FT0))) {
1373 FT0 = float64_sqrt(FT0, &env->fp_status);
1374 FT0 = float32_div(1.0, FT0, &env->fp_status);
1375 } else {
1376 p.d = FT0;
1377 if (p.i == 0x8000000000000000ULL) {
1378 p.i = 0xFFF0000000000000ULL;
1379 } else if (p.i == 0x0000000000000000ULL) {
1380 p.i = 0x7FF0000000000000ULL;
1381 } else if (isnan(FT0)) {
1382 p.i |= 0x000FFFFFFFFFFFFFULL;
1383 } else if (fpisneg(FT0)) {
1384 p.i = 0x7FF8000000000000ULL;
1385 } else {
1386 p.i = 0x0000000000000000ULL;
1387 }
1388 FT0 = p.d;
1389 }
1390 }
1391
1392 void do_fsel (void)
1393 {
1394 if (!fpisneg(FT0) || iszero(FT0))
1395 FT0 = FT1;
1396 else
1397 FT0 = FT2;
1398 }
1399
1400 void do_fcmpu (void)
1401 {
1402 if (unlikely(float64_is_signaling_nan(FT0) ||
1403 float64_is_signaling_nan(FT1))) {
1404 /* sNaN comparison */
1405 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1406 } else {
1407 if (float64_lt(FT0, FT1, &env->fp_status)) {
1408 T0 = 0x08UL;
1409 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1410 T0 = 0x04UL;
1411 } else {
1412 T0 = 0x02UL;
1413 }
1414 }
1415 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1416 env->fpscr |= T0 << FPSCR_FPRF;
1417 }
1418
1419 void do_fcmpo (void)
1420 {
1421 if (unlikely(float64_is_nan(FT0) ||
1422 float64_is_nan(FT1))) {
1423 if (float64_is_signaling_nan(FT0) ||
1424 float64_is_signaling_nan(FT1)) {
1425 /* sNaN comparison */
1426 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1427 POWERPC_EXCP_FP_VXVC);
1428 } else {
1429 /* qNaN comparison */
1430 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1431 }
1432 } else {
1433 if (float64_lt(FT0, FT1, &env->fp_status)) {
1434 T0 = 0x08UL;
1435 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1436 T0 = 0x04UL;
1437 } else {
1438 T0 = 0x02UL;
1439 }
1440 }
1441 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1442 env->fpscr |= T0 << FPSCR_FPRF;
1443 }
1444
1445 #if !defined (CONFIG_USER_ONLY)
1446 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1447
1448 void do_store_msr (void)
1449 {
1450 T0 = hreg_store_msr(env, T0);
1451 if (T0 != 0) {
1452 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1453 do_raise_exception(T0);
1454 }
1455 }
1456
1457 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1458 target_ulong msrm, int keep_msrh)
1459 {
1460 #if defined(TARGET_PPC64)
1461 if (msr & (1ULL << MSR_SF)) {
1462 nip = (uint64_t)nip;
1463 msr &= (uint64_t)msrm;
1464 } else {
1465 nip = (uint32_t)nip;
1466 msr = (uint32_t)(msr & msrm);
1467 if (keep_msrh)
1468 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1469 }
1470 #else
1471 nip = (uint32_t)nip;
1472 msr &= (uint32_t)msrm;
1473 #endif
1474 /* XXX: beware: this is false if VLE is supported */
1475 env->nip = nip & ~((target_ulong)0x00000003);
1476 hreg_store_msr(env, msr);
1477 #if defined (DEBUG_OP)
1478 cpu_dump_rfi(env->nip, env->msr);
1479 #endif
1480 /* No need to raise an exception here,
1481 * as rfi is always the last insn of a TB
1482 */
1483 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1484 }
1485
1486 void do_rfi (void)
1487 {
1488 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1489 ~((target_ulong)0xFFFF0000), 1);
1490 }
1491
1492 #if defined(TARGET_PPC64)
1493 void do_rfid (void)
1494 {
1495 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1496 ~((target_ulong)0xFFFF0000), 0);
1497 }
1498 #endif
1499 #if defined(TARGET_PPC64H)
1500 void do_hrfid (void)
1501 {
1502 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1503 ~((target_ulong)0xFFFF0000), 0);
1504 }
1505 #endif
1506 #endif
1507
1508 void do_tw (int flags)
1509 {
1510 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
1511 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
1512 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
1513 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
1514 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
1515 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1516 }
1517 }
1518
1519 #if defined(TARGET_PPC64)
1520 void do_td (int flags)
1521 {
1522 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
1523 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
1524 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
1525 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
1526 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
1527 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1528 }
1529 #endif
1530
1531 /*****************************************************************************/
1532 /* PowerPC 601 specific instructions (POWER bridge) */
1533 void do_POWER_abso (void)
1534 {
1535 if ((uint32_t)T0 == INT32_MIN) {
1536 T0 = INT32_MAX;
1537 xer_ov = 1;
1538 xer_so = 1;
1539 } else {
1540 T0 = -T0;
1541 xer_ov = 0;
1542 }
1543 }
1544
1545 void do_POWER_clcs (void)
1546 {
1547 switch (T0) {
1548 case 0x0CUL:
1549 /* Instruction cache line size */
1550 T0 = env->icache_line_size;
1551 break;
1552 case 0x0DUL:
1553 /* Data cache line size */
1554 T0 = env->dcache_line_size;
1555 break;
1556 case 0x0EUL:
1557 /* Minimum cache line size */
1558 T0 = env->icache_line_size < env->dcache_line_size ?
1559 env->icache_line_size : env->dcache_line_size;
1560 break;
1561 case 0x0FUL:
1562 /* Maximum cache line size */
1563 T0 = env->icache_line_size > env->dcache_line_size ?
1564 env->icache_line_size : env->dcache_line_size;
1565 break;
1566 default:
1567 /* Undefined */
1568 break;
1569 }
1570 }
1571
1572 void do_POWER_div (void)
1573 {
1574 uint64_t tmp;
1575
1576 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1577 T0 = (long)((-1) * (T0 >> 31));
1578 env->spr[SPR_MQ] = 0;
1579 } else {
1580 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1581 env->spr[SPR_MQ] = tmp % T1;
1582 T0 = tmp / (int32_t)T1;
1583 }
1584 }
1585
1586 void do_POWER_divo (void)
1587 {
1588 int64_t tmp;
1589
1590 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1591 T0 = (long)((-1) * (T0 >> 31));
1592 env->spr[SPR_MQ] = 0;
1593 xer_ov = 1;
1594 xer_so = 1;
1595 } else {
1596 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1597 env->spr[SPR_MQ] = tmp % T1;
1598 tmp /= (int32_t)T1;
1599 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1600 xer_ov = 1;
1601 xer_so = 1;
1602 } else {
1603 xer_ov = 0;
1604 }
1605 T0 = tmp;
1606 }
1607 }
1608
1609 void do_POWER_divs (void)
1610 {
1611 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1612 T0 = (long)((-1) * (T0 >> 31));
1613 env->spr[SPR_MQ] = 0;
1614 } else {
1615 env->spr[SPR_MQ] = T0 % T1;
1616 T0 = (int32_t)T0 / (int32_t)T1;
1617 }
1618 }
1619
1620 void do_POWER_divso (void)
1621 {
1622 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == -1) || (int32_t)T1 == 0) {
1623 T0 = (long)((-1) * (T0 >> 31));
1624 env->spr[SPR_MQ] = 0;
1625 xer_ov = 1;
1626 xer_so = 1;
1627 } else {
1628 T0 = (int32_t)T0 / (int32_t)T1;
1629 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1630 xer_ov = 0;
1631 }
1632 }
1633
1634 void do_POWER_dozo (void)
1635 {
1636 if ((int32_t)T1 > (int32_t)T0) {
1637 T2 = T0;
1638 T0 = T1 - T0;
1639 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1640 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1641 xer_ov = 1;
1642 xer_so = 1;
1643 } else {
1644 xer_ov = 0;
1645 }
1646 } else {
1647 T0 = 0;
1648 xer_ov = 0;
1649 }
1650 }
1651
1652 void do_POWER_maskg (void)
1653 {
1654 uint32_t ret;
1655
1656 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1657 ret = -1;
1658 } else {
1659 ret = (((uint32_t)(-1)) >> ((uint32_t)T0)) ^
1660 (((uint32_t)(-1) >> ((uint32_t)T1)) >> 1);
1661 if ((uint32_t)T0 > (uint32_t)T1)
1662 ret = ~ret;
1663 }
1664 T0 = ret;
1665 }
1666
1667 void do_POWER_mulo (void)
1668 {
1669 uint64_t tmp;
1670
1671 tmp = (uint64_t)T0 * (uint64_t)T1;
1672 env->spr[SPR_MQ] = tmp >> 32;
1673 T0 = tmp;
1674 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1675 xer_ov = 1;
1676 xer_so = 1;
1677 } else {
1678 xer_ov = 0;
1679 }
1680 }
1681
1682 #if !defined (CONFIG_USER_ONLY)
1683 void do_POWER_rac (void)
1684 {
1685 #if 0
1686 mmu_ctx_t ctx;
1687
1688 /* We don't have to generate many instances of this instruction,
1689 * as rac is supervisor only.
1690 */
1691 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT, 1) == 0)
1692 T0 = ctx.raddr;
1693 #endif
1694 }
1695
1696 void do_POWER_rfsvc (void)
1697 {
1698 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1699 }
1700
1701 /* PowerPC 601 BAT management helper */
1702 void do_store_601_batu (int nr)
1703 {
1704 do_store_ibatu(env, nr, (uint32_t)T0);
1705 env->DBAT[0][nr] = env->IBAT[0][nr];
1706 env->DBAT[1][nr] = env->IBAT[1][nr];
1707 }
1708 #endif
1709
1710 /*****************************************************************************/
1711 /* 602 specific instructions */
1712 /* mfrom is the most crazy instruction ever seen, imho ! */
1713 /* Real implementation uses a ROM table. Do the same */
1714 #define USE_MFROM_ROM_TABLE
1715 void do_op_602_mfrom (void)
1716 {
1717 if (likely(T0 < 602)) {
1718 #if defined(USE_MFROM_ROM_TABLE)
1719 #include "mfrom_table.c"
1720 T0 = mfrom_ROM_table[T0];
1721 #else
1722 double d;
1723 /* Extremly decomposed:
1724 * -T0 / 256
1725 * T0 = 256 * log10(10 + 1.0) + 0.5
1726 */
1727 d = T0;
1728 d = float64_div(d, 256, &env->fp_status);
1729 d = float64_chs(d);
1730 d = exp10(d); // XXX: use float emulation function
1731 d = float64_add(d, 1.0, &env->fp_status);
1732 d = log10(d); // XXX: use float emulation function
1733 d = float64_mul(d, 256, &env->fp_status);
1734 d = float64_add(d, 0.5, &env->fp_status);
1735 T0 = float64_round_to_int(d, &env->fp_status);
1736 #endif
1737 } else {
1738 T0 = 0;
1739 }
1740 }
1741
1742 /*****************************************************************************/
1743 /* Embedded PowerPC specific helpers */
1744 void do_405_check_ov (void)
1745 {
1746 if (likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1747 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1748 xer_ov = 0;
1749 } else {
1750 xer_ov = 1;
1751 xer_so = 1;
1752 }
1753 }
1754
1755 void do_405_check_sat (void)
1756 {
1757 if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1758 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1759 /* Saturate result */
1760 if (T2 >> 31) {
1761 T0 = INT32_MIN;
1762 } else {
1763 T0 = INT32_MAX;
1764 }
1765 }
1766 }
1767
1768 /* XXX: to be improved to check access rights when in user-mode */
1769 void do_load_dcr (void)
1770 {
1771 target_ulong val;
1772
1773 if (unlikely(env->dcr_env == NULL)) {
1774 if (loglevel != 0) {
1775 fprintf(logfile, "No DCR environment\n");
1776 }
1777 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1778 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1779 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1780 if (loglevel != 0) {
1781 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1782 }
1783 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1784 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1785 } else {
1786 T0 = val;
1787 }
1788 }
1789
1790 void do_store_dcr (void)
1791 {
1792 if (unlikely(env->dcr_env == NULL)) {
1793 if (loglevel != 0) {
1794 fprintf(logfile, "No DCR environment\n");
1795 }
1796 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1797 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1798 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1799 if (loglevel != 0) {
1800 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1801 }
1802 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1803 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1804 }
1805 }
1806
1807 #if !defined(CONFIG_USER_ONLY)
1808 void do_40x_rfci (void)
1809 {
1810 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1811 ~((target_ulong)0xFFFF0000), 0);
1812 }
1813
1814 void do_rfci (void)
1815 {
1816 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1817 ~((target_ulong)0x3FFF0000), 0);
1818 }
1819
1820 void do_rfdi (void)
1821 {
1822 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1823 ~((target_ulong)0x3FFF0000), 0);
1824 }
1825
1826 void do_rfmci (void)
1827 {
1828 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1829 ~((target_ulong)0x3FFF0000), 0);
1830 }
1831
1832 void do_load_403_pb (int num)
1833 {
1834 T0 = env->pb[num];
1835 }
1836
1837 void do_store_403_pb (int num)
1838 {
1839 if (likely(env->pb[num] != T0)) {
1840 env->pb[num] = T0;
1841 /* Should be optimized */
1842 tlb_flush(env, 1);
1843 }
1844 }
1845 #endif
1846
1847 /* 440 specific */
1848 void do_440_dlmzb (void)
1849 {
1850 target_ulong mask;
1851 int i;
1852
1853 i = 1;
1854 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1855 if ((T0 & mask) == 0)
1856 goto done;
1857 i++;
1858 }
1859 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1860 if ((T1 & mask) == 0)
1861 break;
1862 i++;
1863 }
1864 done:
1865 T0 = i;
1866 }
1867
1868 #if defined(TARGET_PPCEMB)
1869 /* SPE extension helpers */
1870 /* Use a table to make this quicker */
1871 static uint8_t hbrev[16] = {
1872 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1873 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1874 };
1875
1876 static always_inline uint8_t byte_reverse (uint8_t val)
1877 {
1878 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1879 }
1880
1881 static always_inline uint32_t word_reverse (uint32_t val)
1882 {
1883 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1884 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1885 }
1886
1887 #define MASKBITS 16 // Random value - to be fixed
1888 void do_brinc (void)
1889 {
1890 uint32_t a, b, d, mask;
1891
1892 mask = (uint32_t)(-1UL) >> MASKBITS;
1893 b = T1_64 & mask;
1894 a = T0_64 & mask;
1895 d = word_reverse(1 + word_reverse(a | ~mask));
1896 T0_64 = (T0_64 & ~mask) | (d & mask);
1897 }
1898
1899 #define DO_SPE_OP2(name) \
1900 void do_ev##name (void) \
1901 { \
1902 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1903 (uint64_t)_do_e##name(T0_64, T1_64); \
1904 }
1905
1906 #define DO_SPE_OP1(name) \
1907 void do_ev##name (void) \
1908 { \
1909 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1910 (uint64_t)_do_e##name(T0_64); \
1911 }
1912
1913 /* Fixed-point vector arithmetic */
1914 static always_inline uint32_t _do_eabs (uint32_t val)
1915 {
1916 if (val != 0x80000000)
1917 val &= ~0x80000000;
1918
1919 return val;
1920 }
1921
1922 static always_inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1923 {
1924 return op1 + op2;
1925 }
1926
1927 static always_inline int _do_ecntlsw (uint32_t val)
1928 {
1929 if (val & 0x80000000)
1930 return clz32(~val);
1931 else
1932 return clz32(val);
1933 }
1934
1935 static always_inline int _do_ecntlzw (uint32_t val)
1936 {
1937 return clz32(val);
1938 }
1939
1940 static always_inline uint32_t _do_eneg (uint32_t val)
1941 {
1942 if (val != 0x80000000)
1943 val ^= 0x80000000;
1944
1945 return val;
1946 }
1947
1948 static always_inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1949 {
1950 return rotl32(op1, op2);
1951 }
1952
1953 static always_inline uint32_t _do_erndw (uint32_t val)
1954 {
1955 return (val + 0x000080000000) & 0xFFFF0000;
1956 }
1957
1958 static always_inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1959 {
1960 /* No error here: 6 bits are used */
1961 return op1 << (op2 & 0x3F);
1962 }
1963
1964 static always_inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1965 {
1966 /* No error here: 6 bits are used */
1967 return op1 >> (op2 & 0x3F);
1968 }
1969
1970 static always_inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1971 {
1972 /* No error here: 6 bits are used */
1973 return op1 >> (op2 & 0x3F);
1974 }
1975
1976 static always_inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1977 {
1978 return op2 - op1;
1979 }
1980
1981 /* evabs */
1982 DO_SPE_OP1(abs);
1983 /* evaddw */
1984 DO_SPE_OP2(addw);
1985 /* evcntlsw */
1986 DO_SPE_OP1(cntlsw);
1987 /* evcntlzw */
1988 DO_SPE_OP1(cntlzw);
1989 /* evneg */
1990 DO_SPE_OP1(neg);
1991 /* evrlw */
1992 DO_SPE_OP2(rlw);
1993 /* evrnd */
1994 DO_SPE_OP1(rndw);
1995 /* evslw */
1996 DO_SPE_OP2(slw);
1997 /* evsrws */
1998 DO_SPE_OP2(srws);
1999 /* evsrwu */
2000 DO_SPE_OP2(srwu);
2001 /* evsubfw */
2002 DO_SPE_OP2(subfw);
2003
2004 /* evsel is a little bit more complicated... */
2005 static always_inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
2006 {
2007 if (n)
2008 return op1;
2009 else
2010 return op2;
2011 }
2012
2013 void do_evsel (void)
2014 {
2015 T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
2016 (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
2017 }
2018
2019 /* Fixed-point vector comparisons */
2020 #define DO_SPE_CMP(name) \
2021 void do_ev##name (void) \
2022 { \
2023 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
2024 T1_64 >> 32) << 32, \
2025 _do_e##name(T0_64, T1_64)); \
2026 }
2027
2028 static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
2029 {
2030 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2031 }
2032 static always_inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
2033 {
2034 return op1 == op2 ? 1 : 0;
2035 }
2036
2037 static always_inline int _do_ecmpgts (int32_t op1, int32_t op2)
2038 {
2039 return op1 > op2 ? 1 : 0;
2040 }
2041
2042 static always_inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
2043 {
2044 return op1 > op2 ? 1 : 0;
2045 }
2046
2047 static always_inline int _do_ecmplts (int32_t op1, int32_t op2)
2048 {
2049 return op1 < op2 ? 1 : 0;
2050 }
2051
2052 static always_inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
2053 {
2054 return op1 < op2 ? 1 : 0;
2055 }
2056
2057 /* evcmpeq */
2058 DO_SPE_CMP(cmpeq);
2059 /* evcmpgts */
2060 DO_SPE_CMP(cmpgts);
2061 /* evcmpgtu */
2062 DO_SPE_CMP(cmpgtu);
2063 /* evcmplts */
2064 DO_SPE_CMP(cmplts);
2065 /* evcmpltu */
2066 DO_SPE_CMP(cmpltu);
2067
2068 /* Single precision floating-point conversions from/to integer */
2069 static always_inline uint32_t _do_efscfsi (int32_t val)
2070 {
2071 union {
2072 uint32_t u;
2073 float32 f;
2074 } u;
2075
2076 u.f = int32_to_float32(val, &env->spe_status);
2077
2078 return u.u;
2079 }
2080
2081 static always_inline uint32_t _do_efscfui (uint32_t val)
2082 {
2083 union {
2084 uint32_t u;
2085 float32 f;
2086 } u;
2087
2088 u.f = uint32_to_float32(val, &env->spe_status);
2089
2090 return u.u;
2091 }
2092
2093 static always_inline int32_t _do_efsctsi (uint32_t val)
2094 {
2095 union {
2096 int32_t u;
2097 float32 f;
2098 } u;
2099
2100 u.u = val;
2101 /* NaN are not treated the same way IEEE 754 does */
2102 if (unlikely(isnan(u.f)))
2103 return 0;
2104
2105 return float32_to_int32(u.f, &env->spe_status);
2106 }
2107
2108 static always_inline uint32_t _do_efsctui (uint32_t val)
2109 {
2110 union {
2111 int32_t u;
2112 float32 f;
2113 } u;
2114
2115 u.u = val;
2116 /* NaN are not treated the same way IEEE 754 does */
2117 if (unlikely(isnan(u.f)))
2118 return 0;
2119
2120 return float32_to_uint32(u.f, &env->spe_status);
2121 }
2122
2123 static always_inline int32_t _do_efsctsiz (uint32_t val)
2124 {
2125 union {
2126 int32_t u;
2127 float32 f;
2128 } u;
2129
2130 u.u = val;
2131 /* NaN are not treated the same way IEEE 754 does */
2132 if (unlikely(isnan(u.f)))
2133 return 0;
2134
2135 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2136 }
2137
2138 static always_inline uint32_t _do_efsctuiz (uint32_t val)
2139 {
2140 union {
2141 int32_t u;
2142 float32 f;
2143 } u;
2144
2145 u.u = val;
2146 /* NaN are not treated the same way IEEE 754 does */
2147 if (unlikely(isnan(u.f)))
2148 return 0;
2149
2150 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2151 }
2152
2153 void do_efscfsi (void)
2154 {
2155 T0_64 = _do_efscfsi(T0_64);
2156 }
2157
2158 void do_efscfui (void)
2159 {
2160 T0_64 = _do_efscfui(T0_64);
2161 }
2162
2163 void do_efsctsi (void)
2164 {
2165 T0_64 = _do_efsctsi(T0_64);
2166 }
2167
2168 void do_efsctui (void)
2169 {
2170 T0_64 = _do_efsctui(T0_64);
2171 }
2172
2173 void do_efsctsiz (void)
2174 {
2175 T0_64 = _do_efsctsiz(T0_64);
2176 }
2177
2178 void do_efsctuiz (void)
2179 {
2180 T0_64 = _do_efsctuiz(T0_64);
2181 }
2182
2183 /* Single precision floating-point conversion to/from fractional */
2184 static always_inline uint32_t _do_efscfsf (uint32_t val)
2185 {
2186 union {
2187 uint32_t u;
2188 float32 f;
2189 } u;
2190 float32 tmp;
2191
2192 u.f = int32_to_float32(val, &env->spe_status);
2193 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2194 u.f = float32_div(u.f, tmp, &env->spe_status);
2195
2196 return u.u;
2197 }
2198
2199 static always_inline uint32_t _do_efscfuf (uint32_t val)
2200 {
2201 union {
2202 uint32_t u;
2203 float32 f;
2204 } u;
2205 float32 tmp;
2206
2207 u.f = uint32_to_float32(val, &env->spe_status);
2208 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2209 u.f = float32_div(u.f, tmp, &env->spe_status);
2210
2211 return u.u;
2212 }
2213
2214 static always_inline int32_t _do_efsctsf (uint32_t val)
2215 {
2216 union {
2217 int32_t u;
2218 float32 f;
2219 } u;
2220 float32 tmp;
2221
2222 u.u = val;
2223 /* NaN are not treated the same way IEEE 754 does */
2224 if (unlikely(isnan(u.f)))
2225 return 0;
2226 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2227 u.f = float32_mul(u.f, tmp, &env->spe_status);
2228
2229 return float32_to_int32(u.f, &env->spe_status);
2230 }
2231
2232 static always_inline uint32_t _do_efsctuf (uint32_t val)
2233 {
2234 union {
2235 int32_t u;
2236 float32 f;
2237 } u;
2238 float32 tmp;
2239
2240 u.u = val;
2241 /* NaN are not treated the same way IEEE 754 does */
2242 if (unlikely(isnan(u.f)))
2243 return 0;
2244 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2245 u.f = float32_mul(u.f, tmp, &env->spe_status);
2246
2247 return float32_to_uint32(u.f, &env->spe_status);
2248 }
2249
2250 static always_inline int32_t _do_efsctsfz (uint32_t val)
2251 {
2252 union {
2253 int32_t u;
2254 float32 f;
2255 } u;
2256 float32 tmp;
2257
2258 u.u = val;
2259 /* NaN are not treated the same way IEEE 754 does */
2260 if (unlikely(isnan(u.f)))
2261 return 0;
2262 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2263 u.f = float32_mul(u.f, tmp, &env->spe_status);
2264
2265 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2266 }
2267
2268 static always_inline uint32_t _do_efsctufz (uint32_t val)
2269 {
2270 union {
2271 int32_t u;
2272 float32 f;
2273 } u;
2274 float32 tmp;
2275
2276 u.u = val;
2277 /* NaN are not treated the same way IEEE 754 does */
2278 if (unlikely(isnan(u.f)))
2279 return 0;
2280 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2281 u.f = float32_mul(u.f, tmp, &env->spe_status);
2282
2283 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2284 }
2285
2286 void do_efscfsf (void)
2287 {
2288 T0_64 = _do_efscfsf(T0_64);
2289 }
2290
2291 void do_efscfuf (void)
2292 {
2293 T0_64 = _do_efscfuf(T0_64);
2294 }
2295
2296 void do_efsctsf (void)
2297 {
2298 T0_64 = _do_efsctsf(T0_64);
2299 }
2300
2301 void do_efsctuf (void)
2302 {
2303 T0_64 = _do_efsctuf(T0_64);
2304 }
2305
2306 void do_efsctsfz (void)
2307 {
2308 T0_64 = _do_efsctsfz(T0_64);
2309 }
2310
2311 void do_efsctufz (void)
2312 {
2313 T0_64 = _do_efsctufz(T0_64);
2314 }
2315
2316 /* Double precision floating point helpers */
2317 static always_inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
2318 {
2319 /* XXX: TODO: test special values (NaN, infinites, ...) */
2320 return _do_efdtstlt(op1, op2);
2321 }
2322
2323 static always_inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
2324 {
2325 /* XXX: TODO: test special values (NaN, infinites, ...) */
2326 return _do_efdtstgt(op1, op2);
2327 }
2328
2329 static always_inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
2330 {
2331 /* XXX: TODO: test special values (NaN, infinites, ...) */
2332 return _do_efdtsteq(op1, op2);
2333 }
2334
2335 void do_efdcmplt (void)
2336 {
2337 T0 = _do_efdcmplt(T0_64, T1_64);
2338 }
2339
2340 void do_efdcmpgt (void)
2341 {
2342 T0 = _do_efdcmpgt(T0_64, T1_64);
2343 }
2344
2345 void do_efdcmpeq (void)
2346 {
2347 T0 = _do_efdcmpeq(T0_64, T1_64);
2348 }
2349
2350 /* Double precision floating-point conversion to/from integer */
2351 static always_inline uint64_t _do_efdcfsi (int64_t val)
2352 {
2353 union {
2354 uint64_t u;
2355 float64 f;
2356 } u;
2357
2358 u.f = int64_to_float64(val, &env->spe_status);
2359
2360 return u.u;
2361 }
2362
2363 static always_inline uint64_t _do_efdcfui (uint64_t val)
2364 {
2365 union {
2366 uint64_t u;
2367 float64 f;
2368 } u;
2369
2370 u.f = uint64_to_float64(val, &env->spe_status);
2371
2372 return u.u;
2373 }
2374
2375 static always_inline int64_t _do_efdctsi (uint64_t val)
2376 {
2377 union {
2378 int64_t u;
2379 float64 f;
2380 } u;
2381
2382 u.u = val;
2383 /* NaN are not treated the same way IEEE 754 does */
2384 if (unlikely(isnan(u.f)))
2385 return 0;
2386
2387 return float64_to_int64(u.f, &env->spe_status);
2388 }
2389
2390 static always_inline uint64_t _do_efdctui (uint64_t val)
2391 {
2392 union {
2393 int64_t u;
2394 float64 f;
2395 } u;
2396
2397 u.u = val;
2398 /* NaN are not treated the same way IEEE 754 does */
2399 if (unlikely(isnan(u.f)))
2400 return 0;
2401
2402 return float64_to_uint64(u.f, &env->spe_status);
2403 }
2404
2405 static always_inline int64_t _do_efdctsiz (uint64_t val)
2406 {
2407 union {
2408 int64_t u;
2409 float64 f;
2410 } u;
2411
2412 u.u = val;
2413 /* NaN are not treated the same way IEEE 754 does */
2414 if (unlikely(isnan(u.f)))
2415 return 0;
2416
2417 return float64_to_int64_round_to_zero(u.f, &env->spe_status);
2418 }
2419
2420 static always_inline uint64_t _do_efdctuiz (uint64_t val)
2421 {
2422 union {
2423 int64_t u;
2424 float64 f;
2425 } u;
2426
2427 u.u = val;
2428 /* NaN are not treated the same way IEEE 754 does */
2429 if (unlikely(isnan(u.f)))
2430 return 0;
2431
2432 return float64_to_uint64_round_to_zero(u.f, &env->spe_status);
2433 }
2434
2435 void do_efdcfsi (void)
2436 {
2437 T0_64 = _do_efdcfsi(T0_64);
2438 }
2439
2440 void do_efdcfui (void)
2441 {
2442 T0_64 = _do_efdcfui(T0_64);
2443 }
2444
2445 void do_efdctsi (void)
2446 {
2447 T0_64 = _do_efdctsi(T0_64);
2448 }
2449
2450 void do_efdctui (void)
2451 {
2452 T0_64 = _do_efdctui(T0_64);
2453 }
2454
2455 void do_efdctsiz (void)
2456 {
2457 T0_64 = _do_efdctsiz(T0_64);
2458 }
2459
2460 void do_efdctuiz (void)
2461 {
2462 T0_64 = _do_efdctuiz(T0_64);
2463 }
2464
2465 /* Double precision floating-point conversion to/from fractional */
2466 static always_inline uint64_t _do_efdcfsf (int64_t val)
2467 {
2468 union {
2469 uint64_t u;
2470 float64 f;
2471 } u;
2472 float64 tmp;
2473
2474 u.f = int32_to_float64(val, &env->spe_status);
2475 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2476 u.f = float64_div(u.f, tmp, &env->spe_status);
2477
2478 return u.u;
2479 }
2480
2481 static always_inline uint64_t _do_efdcfuf (uint64_t val)
2482 {
2483 union {
2484 uint64_t u;
2485 float64 f;
2486 } u;
2487 float64 tmp;
2488
2489 u.f = uint32_to_float64(val, &env->spe_status);
2490 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2491 u.f = float64_div(u.f, tmp, &env->spe_status);
2492
2493 return u.u;
2494 }
2495
2496 static always_inline int64_t _do_efdctsf (uint64_t val)
2497 {
2498 union {
2499 int64_t u;
2500 float64 f;
2501 } u;
2502 float64 tmp;
2503
2504 u.u = val;
2505 /* NaN are not treated the same way IEEE 754 does */
2506 if (unlikely(isnan(u.f)))
2507 return 0;
2508 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2509 u.f = float64_mul(u.f, tmp, &env->spe_status);
2510
2511 return float64_to_int32(u.f, &env->spe_status);
2512 }
2513
2514 static always_inline uint64_t _do_efdctuf (uint64_t val)
2515 {
2516 union {
2517 int64_t u;
2518 float64 f;
2519 } u;
2520 float64 tmp;
2521
2522 u.u = val;
2523 /* NaN are not treated the same way IEEE 754 does */
2524 if (unlikely(isnan(u.f)))
2525 return 0;
2526 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2527 u.f = float64_mul(u.f, tmp, &env->spe_status);
2528
2529 return float64_to_uint32(u.f, &env->spe_status);
2530 }
2531
2532 static always_inline int64_t _do_efdctsfz (uint64_t val)
2533 {
2534 union {
2535 int64_t u;
2536 float64 f;
2537 } u;
2538 float64 tmp;
2539
2540 u.u = val;
2541 /* NaN are not treated the same way IEEE 754 does */
2542 if (unlikely(isnan(u.f)))
2543 return 0;
2544 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2545 u.f = float64_mul(u.f, tmp, &env->spe_status);
2546
2547 return float64_to_int32_round_to_zero(u.f, &env->spe_status);
2548 }
2549
2550 static always_inline uint64_t _do_efdctufz (uint64_t val)
2551 {
2552 union {
2553 int64_t u;
2554 float64 f;
2555 } u;
2556 float64 tmp;
2557
2558 u.u = val;
2559 /* NaN are not treated the same way IEEE 754 does */
2560 if (unlikely(isnan(u.f)))
2561 return 0;
2562 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2563 u.f = float64_mul(u.f, tmp, &env->spe_status);
2564
2565 return float64_to_uint32_round_to_zero(u.f, &env->spe_status);
2566 }
2567
2568 void do_efdcfsf (void)
2569 {
2570 T0_64 = _do_efdcfsf(T0_64);
2571 }
2572
2573 void do_efdcfuf (void)
2574 {
2575 T0_64 = _do_efdcfuf(T0_64);
2576 }
2577
2578 void do_efdctsf (void)
2579 {
2580 T0_64 = _do_efdctsf(T0_64);
2581 }
2582
2583 void do_efdctuf (void)
2584 {
2585 T0_64 = _do_efdctuf(T0_64);
2586 }
2587
2588 void do_efdctsfz (void)
2589 {
2590 T0_64 = _do_efdctsfz(T0_64);
2591 }
2592
2593 void do_efdctufz (void)
2594 {
2595 T0_64 = _do_efdctufz(T0_64);
2596 }
2597
2598 /* Floating point conversion between single and double precision */
2599 static always_inline uint32_t _do_efscfd (uint64_t val)
2600 {
2601 union {
2602 uint64_t u;
2603 float64 f;
2604 } u1;
2605 union {
2606 uint32_t u;
2607 float32 f;
2608 } u2;
2609
2610 u1.u = val;
2611 u2.f = float64_to_float32(u1.f, &env->spe_status);
2612
2613 return u2.u;
2614 }
2615
2616 static always_inline uint64_t _do_efdcfs (uint32_t val)
2617 {
2618 union {
2619 uint64_t u;
2620 float64 f;
2621 } u2;
2622 union {
2623 uint32_t u;
2624 float32 f;
2625 } u1;
2626
2627 u1.u = val;
2628 u2.f = float32_to_float64(u1.f, &env->spe_status);
2629
2630 return u2.u;
2631 }
2632
2633 void do_efscfd (void)
2634 {
2635 T0_64 = _do_efscfd(T0_64);
2636 }
2637
2638 void do_efdcfs (void)
2639 {
2640 T0_64 = _do_efdcfs(T0_64);
2641 }
2642
2643 /* Single precision fixed-point vector arithmetic */
2644 /* evfsabs */
2645 DO_SPE_OP1(fsabs);
2646 /* evfsnabs */
2647 DO_SPE_OP1(fsnabs);
2648 /* evfsneg */
2649 DO_SPE_OP1(fsneg);
2650 /* evfsadd */
2651 DO_SPE_OP2(fsadd);
2652 /* evfssub */
2653 DO_SPE_OP2(fssub);
2654 /* evfsmul */
2655 DO_SPE_OP2(fsmul);
2656 /* evfsdiv */
2657 DO_SPE_OP2(fsdiv);
2658
2659 /* Single-precision floating-point comparisons */
2660 static always_inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2661 {
2662 /* XXX: TODO: test special values (NaN, infinites, ...) */
2663 return _do_efststlt(op1, op2);
2664 }
2665
2666 static always_inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2667 {
2668 /* XXX: TODO: test special values (NaN, infinites, ...) */
2669 return _do_efststgt(op1, op2);
2670 }
2671
2672 static always_inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2673 {
2674 /* XXX: TODO: test special values (NaN, infinites, ...) */
2675 return _do_efststeq(op1, op2);
2676 }
2677
2678 void do_efscmplt (void)
2679 {
2680 T0 = _do_efscmplt(T0_64, T1_64);
2681 }
2682
2683 void do_efscmpgt (void)
2684 {
2685 T0 = _do_efscmpgt(T0_64, T1_64);
2686 }
2687
2688 void do_efscmpeq (void)
2689 {
2690 T0 = _do_efscmpeq(T0_64, T1_64);
2691 }
2692
2693 /* Single-precision floating-point vector comparisons */
2694 /* evfscmplt */
2695 DO_SPE_CMP(fscmplt);
2696 /* evfscmpgt */
2697 DO_SPE_CMP(fscmpgt);
2698 /* evfscmpeq */
2699 DO_SPE_CMP(fscmpeq);
2700 /* evfststlt */
2701 DO_SPE_CMP(fststlt);
2702 /* evfststgt */
2703 DO_SPE_CMP(fststgt);
2704 /* evfststeq */
2705 DO_SPE_CMP(fststeq);
2706
2707 /* Single-precision floating-point vector conversions */
2708 /* evfscfsi */
2709 DO_SPE_OP1(fscfsi);
2710 /* evfscfui */
2711 DO_SPE_OP1(fscfui);
2712 /* evfscfuf */
2713 DO_SPE_OP1(fscfuf);
2714 /* evfscfsf */
2715 DO_SPE_OP1(fscfsf);
2716 /* evfsctsi */
2717 DO_SPE_OP1(fsctsi);
2718 /* evfsctui */
2719 DO_SPE_OP1(fsctui);
2720 /* evfsctsiz */
2721 DO_SPE_OP1(fsctsiz);
2722 /* evfsctuiz */
2723 DO_SPE_OP1(fsctuiz);
2724 /* evfsctsf */
2725 DO_SPE_OP1(fsctsf);
2726 /* evfsctuf */
2727 DO_SPE_OP1(fsctuf);
2728 #endif /* defined(TARGET_PPCEMB) */
2729
2730 /*****************************************************************************/
2731 /* Softmmu support */
2732 #if !defined (CONFIG_USER_ONLY)
2733
2734 #define MMUSUFFIX _mmu
2735 #define GETPC() (__builtin_return_address(0))
2736
2737 #define SHIFT 0
2738 #include "softmmu_template.h"
2739
2740 #define SHIFT 1
2741 #include "softmmu_template.h"
2742
2743 #define SHIFT 2
2744 #include "softmmu_template.h"
2745
2746 #define SHIFT 3
2747 #include "softmmu_template.h"
2748
2749 /* try to fill the TLB and return an exception if error. If retaddr is
2750 NULL, it means that the function was called in C code (i.e. not
2751 from generated code or from helper.c) */
2752 /* XXX: fix it to restore all registers */
2753 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2754 {
2755 TranslationBlock *tb;
2756 CPUState *saved_env;
2757 target_phys_addr_t pc;
2758 int ret;
2759
2760 /* XXX: hack to restore env in all cases, even if not called from
2761 generated code */
2762 saved_env = env;
2763 env = cpu_single_env;
2764 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2765 if (unlikely(ret != 0)) {
2766 if (likely(retaddr)) {
2767 /* now we have a real cpu fault */
2768 pc = (target_phys_addr_t)(unsigned long)retaddr;
2769 tb = tb_find_pc(pc);
2770 if (likely(tb)) {
2771 /* the PC is inside the translated code. It means that we have
2772 a virtual CPU fault */
2773 cpu_restore_state(tb, env, pc, NULL);
2774 }
2775 }
2776 do_raise_exception_err(env->exception_index, env->error_code);
2777 }
2778 env = saved_env;
2779 }
2780
2781 /* Software driven TLBs management */
2782 /* PowerPC 602/603 software TLB load instructions helpers */
2783 void do_load_6xx_tlb (int is_code)
2784 {
2785 target_ulong RPN, CMP, EPN;
2786 int way;
2787
2788 RPN = env->spr[SPR_RPA];
2789 if (is_code) {
2790 CMP = env->spr[SPR_ICMP];
2791 EPN = env->spr[SPR_IMISS];
2792 } else {
2793 CMP = env->spr[SPR_DCMP];
2794 EPN = env->spr[SPR_DMISS];
2795 }
2796 way = (env->spr[SPR_SRR1] >> 17) & 1;
2797 #if defined (DEBUG_SOFTWARE_TLB)
2798 if (loglevel != 0) {
2799 fprintf(logfile, "%s: EPN %08lx %08lx PTE0 %08lx PTE1 %08lx way %d\n",
2800 __func__, (unsigned long)T0, (unsigned long)EPN,
2801 (unsigned long)CMP, (unsigned long)RPN, way);
2802 }
2803 #endif
2804 /* Store this TLB */
2805 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2806 way, is_code, CMP, RPN);
2807 }
2808
2809 void do_load_74xx_tlb (int is_code)
2810 {
2811 target_ulong RPN, CMP, EPN;
2812 int way;
2813
2814 RPN = env->spr[SPR_PTELO];
2815 CMP = env->spr[SPR_PTEHI];
2816 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2817 way = env->spr[SPR_TLBMISS] & 0x3;
2818 #if defined (DEBUG_SOFTWARE_TLB)
2819 if (loglevel != 0) {
2820 fprintf(logfile, "%s: EPN %08lx %08lx PTE0 %08lx PTE1 %08lx way %d\n",
2821 __func__, (unsigned long)T0, (unsigned long)EPN,
2822 (unsigned long)CMP, (unsigned long)RPN, way);
2823 }
2824 #endif
2825 /* Store this TLB */
2826 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2827 way, is_code, CMP, RPN);
2828 }
2829
2830 static always_inline target_ulong booke_tlb_to_page_size (int size)
2831 {
2832 return 1024 << (2 * size);
2833 }
2834
2835 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2836 {
2837 int size;
2838
2839 switch (page_size) {
2840 case 0x00000400UL:
2841 size = 0x0;
2842 break;
2843 case 0x00001000UL:
2844 size = 0x1;
2845 break;
2846 case 0x00004000UL:
2847 size = 0x2;
2848 break;
2849 case 0x00010000UL:
2850 size = 0x3;
2851 break;
2852 case 0x00040000UL:
2853 size = 0x4;
2854 break;
2855 case 0x00100000UL:
2856 size = 0x5;
2857 break;
2858 case 0x00400000UL:
2859 size = 0x6;
2860 break;
2861 case 0x01000000UL:
2862 size = 0x7;
2863 break;
2864 case 0x04000000UL:
2865 size = 0x8;
2866 break;
2867 case 0x10000000UL:
2868 size = 0x9;
2869 break;
2870 case 0x40000000UL:
2871 size = 0xA;
2872 break;
2873 #if defined (TARGET_PPC64)
2874 case 0x000100000000ULL:
2875 size = 0xB;
2876 break;
2877 case 0x000400000000ULL:
2878 size = 0xC;
2879 break;
2880 case 0x001000000000ULL:
2881 size = 0xD;
2882 break;
2883 case 0x004000000000ULL:
2884 size = 0xE;
2885 break;
2886 case 0x010000000000ULL:
2887 size = 0xF;
2888 break;
2889 #endif
2890 default:
2891 size = -1;
2892 break;
2893 }
2894
2895 return size;
2896 }
2897
2898 /* Helpers for 4xx TLB management */
2899 void do_4xx_tlbre_lo (void)
2900 {
2901 ppcemb_tlb_t *tlb;
2902 int size;
2903
2904 T0 &= 0x3F;
2905 tlb = &env->tlb[T0].tlbe;
2906 T0 = tlb->EPN;
2907 if (tlb->prot & PAGE_VALID)
2908 T0 |= 0x400;
2909 size = booke_page_size_to_tlb(tlb->size);
2910 if (size < 0 || size > 0x7)
2911 size = 1;
2912 T0 |= size << 7;
2913 env->spr[SPR_40x_PID] = tlb->PID;
2914 }
2915
2916 void do_4xx_tlbre_hi (void)
2917 {
2918 ppcemb_tlb_t *tlb;
2919
2920 T0 &= 0x3F;
2921 tlb = &env->tlb[T0].tlbe;
2922 T0 = tlb->RPN;
2923 if (tlb->prot & PAGE_EXEC)
2924 T0 |= 0x200;
2925 if (tlb->prot & PAGE_WRITE)
2926 T0 |= 0x100;
2927 }
2928
2929 void do_4xx_tlbwe_hi (void)
2930 {
2931 ppcemb_tlb_t *tlb;
2932 target_ulong page, end;
2933
2934 #if defined (DEBUG_SOFTWARE_TLB)
2935 if (loglevel != 0) {
2936 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
2937 }
2938 #endif
2939 T0 &= 0x3F;
2940 tlb = &env->tlb[T0].tlbe;
2941 /* Invalidate previous TLB (if it's valid) */
2942 if (tlb->prot & PAGE_VALID) {
2943 end = tlb->EPN + tlb->size;
2944 #if defined (DEBUG_SOFTWARE_TLB)
2945 if (loglevel != 0) {
2946 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2947 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2948 }
2949 #endif
2950 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2951 tlb_flush_page(env, page);
2952 }
2953 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2954 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2955 * If this ever occurs, one should use the ppcemb target instead
2956 * of the ppc or ppc64 one
2957 */
2958 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2959 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2960 "are not supported (%d)\n",
2961 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2962 }
2963 tlb->EPN = T1 & ~(tlb->size - 1);
2964 if (T1 & 0x40)
2965 tlb->prot |= PAGE_VALID;
2966 else
2967 tlb->prot &= ~PAGE_VALID;
2968 if (T1 & 0x20) {
2969 /* XXX: TO BE FIXED */
2970 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2971 }
2972 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2973 tlb->attr = T1 & 0xFF;
2974 #if defined (DEBUG_SOFTWARE_TLB)
2975 if (loglevel != 0) {
2976 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2977 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2978 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2979 tlb->prot & PAGE_READ ? 'r' : '-',
2980 tlb->prot & PAGE_WRITE ? 'w' : '-',
2981 tlb->prot & PAGE_EXEC ? 'x' : '-',
2982 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2983 }
2984 #endif
2985 /* Invalidate new TLB (if valid) */
2986 if (tlb->prot & PAGE_VALID) {
2987 end = tlb->EPN + tlb->size;
2988 #if defined (DEBUG_SOFTWARE_TLB)
2989 if (loglevel != 0) {
2990 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2991 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2992 }
2993 #endif
2994 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2995 tlb_flush_page(env, page);
2996 }
2997 }
2998
2999 void do_4xx_tlbwe_lo (void)
3000 {
3001 ppcemb_tlb_t *tlb;
3002
3003 #if defined (DEBUG_SOFTWARE_TLB)
3004 if (loglevel != 0) {
3005 fprintf(logfile, "%s T0 " REGX " T1 " REGX "\n", __func__, T0, T1);
3006 }
3007 #endif
3008 T0 &= 0x3F;
3009 tlb = &env->tlb[T0].tlbe;
3010 tlb->RPN = T1 & 0xFFFFFC00;
3011 tlb->prot = PAGE_READ;
3012 if (T1 & 0x200)
3013 tlb->prot |= PAGE_EXEC;
3014 if (T1 & 0x100)
3015 tlb->prot |= PAGE_WRITE;
3016 #if defined (DEBUG_SOFTWARE_TLB)
3017 if (loglevel != 0) {
3018 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3019 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3020 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
3021 tlb->prot & PAGE_READ ? 'r' : '-',
3022 tlb->prot & PAGE_WRITE ? 'w' : '-',
3023 tlb->prot & PAGE_EXEC ? 'x' : '-',
3024 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3025 }
3026 #endif
3027 }
3028
3029 /* PowerPC 440 TLB management */
3030 void do_440_tlbwe (int word)
3031 {
3032 ppcemb_tlb_t *tlb;
3033 target_ulong EPN, RPN, size;
3034 int do_flush_tlbs;
3035
3036 #if defined (DEBUG_SOFTWARE_TLB)
3037 if (loglevel != 0) {
3038 fprintf(logfile, "%s word %d T0 " REGX " T1 " REGX "\n",
3039 __func__, word, T0, T1);
3040 }
3041 #endif
3042 do_flush_tlbs = 0;
3043 T0 &= 0x3F;
3044 tlb = &env->tlb[T0].tlbe;
3045 switch (word) {
3046 default:
3047 /* Just here to please gcc */
3048 case 0:
3049 EPN = T1 & 0xFFFFFC00;
3050 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3051 do_flush_tlbs = 1;
3052 tlb->EPN = EPN;
3053 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
3054 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3055 do_flush_tlbs = 1;
3056 tlb->size = size;
3057 tlb->attr &= ~0x1;
3058 tlb->attr |= (T1 >> 8) & 1;
3059 if (T1 & 0x200) {
3060 tlb->prot |= PAGE_VALID;
3061 } else {
3062 if (tlb->prot & PAGE_VALID) {
3063 tlb->prot &= ~PAGE_VALID;
3064 do_flush_tlbs = 1;
3065 }
3066 }
3067 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3068 if (do_flush_tlbs)
3069 tlb_flush(env, 1);
3070 break;
3071 case 1:
3072 RPN = T1 & 0xFFFFFC0F;
3073 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3074 tlb_flush(env, 1);
3075 tlb->RPN = RPN;
3076 break;
3077 case 2:
3078 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
3079 tlb->prot = tlb->prot & PAGE_VALID;
3080 if (T1 & 0x1)
3081 tlb->prot |= PAGE_READ << 4;
3082 if (T1 & 0x2)
3083 tlb->prot |= PAGE_WRITE << 4;
3084 if (T1 & 0x4)
3085 tlb->prot |= PAGE_EXEC << 4;
3086 if (T1 & 0x8)
3087 tlb->prot |= PAGE_READ;
3088 if (T1 & 0x10)
3089 tlb->prot |= PAGE_WRITE;
3090 if (T1 & 0x20)
3091 tlb->prot |= PAGE_EXEC;
3092 break;
3093 }
3094 }
3095
3096 void do_440_tlbre (int word)
3097 {
3098 ppcemb_tlb_t *tlb;
3099 int size;
3100
3101 T0 &= 0x3F;
3102 tlb = &env->tlb[T0].tlbe;
3103 switch (word) {
3104 default:
3105 /* Just here to please gcc */
3106 case 0:
3107 T0 = tlb->EPN;
3108 size = booke_page_size_to_tlb(tlb->size);
3109 if (size < 0 || size > 0xF)
3110 size = 1;
3111 T0 |= size << 4;
3112 if (tlb->attr & 0x1)
3113 T0 |= 0x100;
3114 if (tlb->prot & PAGE_VALID)
3115 T0 |= 0x200;
3116 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3117 env->spr[SPR_440_MMUCR] |= tlb->PID;
3118 break;
3119 case 1:
3120 T0 = tlb->RPN;
3121 break;
3122 case 2:
3123 T0 = tlb->attr & ~0x1;
3124 if (tlb->prot & (PAGE_READ << 4))
3125 T0 |= 0x1;
3126 if (tlb->prot & (PAGE_WRITE << 4))
3127 T0 |= 0x2;
3128 if (tlb->prot & (PAGE_EXEC << 4))
3129 T0 |= 0x4;
3130 if (tlb->prot & PAGE_READ)
3131 T0 |= 0x8;
3132 if (tlb->prot & PAGE_WRITE)
3133 T0 |= 0x10;
3134 if (tlb->prot & PAGE_EXEC)
3135 T0 |= 0x20;
3136 break;
3137 }
3138 }
3139 #endif /* !CONFIG_USER_ONLY */