]> git.proxmox.com Git - qemu.git/blob - target-ppc/op_helper.c
target-ppc: convert lscbx instruction to TCG
[qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
23
24 #include "helper_regs.h"
25 #include "op_helper.h"
26
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35 {
36 raise_exception_err(env, exception, error_code);
37 }
38
39 void helper_raise_debug (void)
40 {
41 raise_exception(env, EXCP_DEBUG);
42 }
43
44 /*****************************************************************************/
45 /* Registers load and stores */
46 target_ulong helper_load_cr (void)
47 {
48 return (env->crf[0] << 28) |
49 (env->crf[1] << 24) |
50 (env->crf[2] << 20) |
51 (env->crf[3] << 16) |
52 (env->crf[4] << 12) |
53 (env->crf[5] << 8) |
54 (env->crf[6] << 4) |
55 (env->crf[7] << 0);
56 }
57
58 void helper_store_cr (target_ulong val, uint32_t mask)
59 {
60 int i, sh;
61
62 for (i = 0, sh = 7; i < 8; i++, sh--) {
63 if (mask & (1 << sh))
64 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
65 }
66 }
67
68 #if defined(TARGET_PPC64)
69 void do_store_pri (int prio)
70 {
71 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
72 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
73 }
74 #endif
75
76 target_ulong ppc_load_dump_spr (int sprn)
77 {
78 if (loglevel != 0) {
79 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
80 sprn, sprn, env->spr[sprn]);
81 }
82
83 return env->spr[sprn];
84 }
85
86 void ppc_store_dump_spr (int sprn, target_ulong val)
87 {
88 if (loglevel != 0) {
89 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
90 sprn, sprn, env->spr[sprn], val);
91 }
92 env->spr[sprn] = val;
93 }
94
95 /*****************************************************************************/
96 /* Memory load and stores */
97
98 static always_inline target_ulong get_addr(target_ulong addr)
99 {
100 #if defined(TARGET_PPC64)
101 if (msr_sf)
102 return addr;
103 else
104 #endif
105 return (uint32_t)addr;
106 }
107
108 void helper_lmw (target_ulong addr, uint32_t reg)
109 {
110 #ifdef CONFIG_USER_ONLY
111 #define ldfun ldl_raw
112 #else
113 int (*ldfun)(target_ulong);
114
115 switch (env->mmu_idx) {
116 default:
117 case 0: ldfun = ldl_user;
118 break;
119 case 1: ldfun = ldl_kernel;
120 break;
121 case 2: ldfun = ldl_hypv;
122 break;
123 }
124 #endif
125 for (; reg < 32; reg++, addr += 4) {
126 if (msr_le)
127 env->gpr[reg] = bswap32(ldfun(get_addr(addr)));
128 else
129 env->gpr[reg] = ldfun(get_addr(addr));
130 }
131 }
132
133 void helper_stmw (target_ulong addr, uint32_t reg)
134 {
135 #ifdef CONFIG_USER_ONLY
136 #define stfun stl_raw
137 #else
138 void (*stfun)(target_ulong, int);
139
140 switch (env->mmu_idx) {
141 default:
142 case 0: stfun = stl_user;
143 break;
144 case 1: stfun = stl_kernel;
145 break;
146 case 2: stfun = stl_hypv;
147 break;
148 }
149 #endif
150 for (; reg < 32; reg++, addr += 4) {
151 if (msr_le)
152 stfun(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
153 else
154 stfun(get_addr(addr), (uint32_t)env->gpr[reg]);
155 }
156 }
157
158 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
159 {
160 int sh;
161 #ifdef CONFIG_USER_ONLY
162 #define ldfunl ldl_raw
163 #define ldfunb ldub_raw
164 #else
165 int (*ldfunl)(target_ulong);
166 int (*ldfunb)(target_ulong);
167
168 switch (env->mmu_idx) {
169 default:
170 case 0:
171 ldfunl = ldl_user;
172 ldfunb = ldub_user;
173 break;
174 case 1:
175 ldfunl = ldl_kernel;
176 ldfunb = ldub_kernel;
177 break;
178 case 2:
179 ldfunl = ldl_hypv;
180 ldfunb = ldub_hypv;
181 break;
182 }
183 #endif
184 for (; nb > 3; nb -= 4, addr += 4) {
185 env->gpr[reg] = ldfunl(get_addr(addr));
186 reg = (reg + 1) % 32;
187 }
188 if (unlikely(nb > 0)) {
189 env->gpr[reg] = 0;
190 for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
191 env->gpr[reg] |= ldfunb(get_addr(addr)) << sh;
192 }
193 }
194 }
195 /* PPC32 specification says we must generate an exception if
196 * rA is in the range of registers to be loaded.
197 * In an other hand, IBM says this is valid, but rA won't be loaded.
198 * For now, I'll follow the spec...
199 */
200 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
201 {
202 if (likely(xer_bc != 0)) {
203 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
204 (reg < rb && (reg + xer_bc) > rb))) {
205 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
206 POWERPC_EXCP_INVAL |
207 POWERPC_EXCP_INVAL_LSWX);
208 } else {
209 helper_lsw(addr, xer_bc, reg);
210 }
211 }
212 }
213
214 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
215 {
216 int sh;
217 #ifdef CONFIG_USER_ONLY
218 #define stfunl stl_raw
219 #define stfunb stb_raw
220 #else
221 void (*stfunl)(target_ulong, int);
222 void (*stfunb)(target_ulong, int);
223
224 switch (env->mmu_idx) {
225 default:
226 case 0:
227 stfunl = stl_user;
228 stfunb = stb_user;
229 break;
230 case 1:
231 stfunl = stl_kernel;
232 stfunb = stb_kernel;
233 break;
234 case 2:
235 stfunl = stl_hypv;
236 stfunb = stb_hypv;
237 break;
238 }
239 #endif
240
241 for (; nb > 3; nb -= 4, addr += 4) {
242 stfunl(get_addr(addr), env->gpr[reg]);
243 reg = (reg + 1) % 32;
244 }
245 if (unlikely(nb > 0)) {
246 for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
247 stfunb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
248 }
249 }
250
251 static void do_dcbz(target_ulong addr, int dcache_line_size)
252 {
253 target_long mask = get_addr(~(dcache_line_size - 1));
254 int i;
255 #ifdef CONFIG_USER_ONLY
256 #define stfun stl_raw
257 #else
258 void (*stfun)(target_ulong, int);
259
260 switch (env->mmu_idx) {
261 default:
262 case 0: stfun = stl_user;
263 break;
264 case 1: stfun = stl_kernel;
265 break;
266 case 2: stfun = stl_hypv;
267 break;
268 }
269 #endif
270 addr &= mask;
271 for (i = 0 ; i < dcache_line_size ; i += 4) {
272 stfun(addr + i , 0);
273 }
274 if ((env->reserve & mask) == addr)
275 env->reserve = (target_ulong)-1ULL;
276 }
277
278 void helper_dcbz(target_ulong addr)
279 {
280 do_dcbz(addr, env->dcache_line_size);
281 }
282
283 void helper_dcbz_970(target_ulong addr)
284 {
285 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
286 do_dcbz(addr, 32);
287 else
288 do_dcbz(addr, env->dcache_line_size);
289 }
290
291 void helper_icbi(target_ulong addr)
292 {
293 uint32_t tmp;
294
295 addr = get_addr(addr & ~(env->dcache_line_size - 1));
296 /* Invalidate one cache line :
297 * PowerPC specification says this is to be treated like a load
298 * (not a fetch) by the MMU. To be sure it will be so,
299 * do the load "by hand".
300 */
301 #ifdef CONFIG_USER_ONLY
302 tmp = ldl_raw(addr);
303 #else
304 switch (env->mmu_idx) {
305 default:
306 case 0: tmp = ldl_user(addr);
307 break;
308 case 1: tmp = ldl_kernel(addr);
309 break;
310 case 2: tmp = ldl_hypv(addr);
311 break;
312 }
313 #endif
314 tb_invalidate_page_range(addr, addr + env->icache_line_size);
315 }
316
317 // XXX: to be tested
318 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
319 {
320 int i, c, d;
321 #ifdef CONFIG_USER_ONLY
322 #define ldfun ldub_raw
323 #else
324 int (*ldfun)(target_ulong);
325
326 switch (env->mmu_idx) {
327 default:
328 case 0: ldfun = ldub_user;
329 break;
330 case 1: ldfun = ldub_kernel;
331 break;
332 case 2: ldfun = ldub_hypv;
333 break;
334 }
335 #endif
336 d = 24;
337 for (i = 0; i < xer_bc; i++) {
338 c = ldfun((uint32_t)addr++);
339 /* ra (if not 0) and rb are never modified */
340 if (likely(reg != rb && (ra == 0 || reg != ra))) {
341 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
342 }
343 if (unlikely(c == xer_cmp))
344 break;
345 if (likely(d != 0)) {
346 d -= 8;
347 } else {
348 d = 24;
349 reg++;
350 reg = reg & 0x1F;
351 }
352 }
353 return i;
354 }
355
356 /*****************************************************************************/
357 /* Fixed point operations helpers */
358 #if defined(TARGET_PPC64)
359
360 /* multiply high word */
361 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
362 {
363 uint64_t tl, th;
364
365 muls64(&tl, &th, arg1, arg2);
366 return th;
367 }
368
369 /* multiply high word unsigned */
370 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
371 {
372 uint64_t tl, th;
373
374 mulu64(&tl, &th, arg1, arg2);
375 return th;
376 }
377
378 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
379 {
380 int64_t th;
381 uint64_t tl;
382
383 muls64(&tl, (uint64_t *)&th, arg1, arg2);
384 /* If th != 0 && th != -1, then we had an overflow */
385 if (likely((uint64_t)(th + 1) <= 1)) {
386 env->xer &= ~(1 << XER_OV);
387 } else {
388 env->xer |= (1 << XER_OV) | (1 << XER_SO);
389 }
390 return (int64_t)tl;
391 }
392 #endif
393
394 target_ulong helper_cntlzw (target_ulong t)
395 {
396 return clz32(t);
397 }
398
399 #if defined(TARGET_PPC64)
400 target_ulong helper_cntlzd (target_ulong t)
401 {
402 return clz64(t);
403 }
404 #endif
405
406 /* shift right arithmetic helper */
407 target_ulong helper_sraw (target_ulong value, target_ulong shift)
408 {
409 int32_t ret;
410
411 if (likely(!(shift & 0x20))) {
412 if (likely((uint32_t)shift != 0)) {
413 shift &= 0x1f;
414 ret = (int32_t)value >> shift;
415 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
416 env->xer &= ~(1 << XER_CA);
417 } else {
418 env->xer |= (1 << XER_CA);
419 }
420 } else {
421 ret = (int32_t)value;
422 env->xer &= ~(1 << XER_CA);
423 }
424 } else {
425 ret = (int32_t)value >> 31;
426 if (ret) {
427 env->xer |= (1 << XER_CA);
428 } else {
429 env->xer &= ~(1 << XER_CA);
430 }
431 }
432 return (target_long)ret;
433 }
434
435 #if defined(TARGET_PPC64)
436 target_ulong helper_srad (target_ulong value, target_ulong shift)
437 {
438 int64_t ret;
439
440 if (likely(!(shift & 0x40))) {
441 if (likely((uint64_t)shift != 0)) {
442 shift &= 0x3f;
443 ret = (int64_t)value >> shift;
444 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
445 env->xer &= ~(1 << XER_CA);
446 } else {
447 env->xer |= (1 << XER_CA);
448 }
449 } else {
450 ret = (int64_t)value;
451 env->xer &= ~(1 << XER_CA);
452 }
453 } else {
454 ret = (int64_t)value >> 63;
455 if (ret) {
456 env->xer |= (1 << XER_CA);
457 } else {
458 env->xer &= ~(1 << XER_CA);
459 }
460 }
461 return ret;
462 }
463 #endif
464
465 target_ulong helper_popcntb (target_ulong val)
466 {
467 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
468 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
469 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
470 return val;
471 }
472
473 #if defined(TARGET_PPC64)
474 target_ulong helper_popcntb_64 (target_ulong val)
475 {
476 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
477 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
478 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
479 return val;
480 }
481 #endif
482
483 /*****************************************************************************/
484 /* Floating point operations helpers */
485 uint64_t helper_float32_to_float64(uint32_t arg)
486 {
487 CPU_FloatU f;
488 CPU_DoubleU d;
489 f.l = arg;
490 d.d = float32_to_float64(f.f, &env->fp_status);
491 return d.ll;
492 }
493
494 uint32_t helper_float64_to_float32(uint64_t arg)
495 {
496 CPU_FloatU f;
497 CPU_DoubleU d;
498 d.ll = arg;
499 f.f = float64_to_float32(d.d, &env->fp_status);
500 return f.l;
501 }
502
503 static always_inline int fpisneg (float64 d)
504 {
505 CPU_DoubleU u;
506
507 u.d = d;
508
509 return u.ll >> 63 != 0;
510 }
511
512 static always_inline int isden (float64 d)
513 {
514 CPU_DoubleU u;
515
516 u.d = d;
517
518 return ((u.ll >> 52) & 0x7FF) == 0;
519 }
520
521 static always_inline int iszero (float64 d)
522 {
523 CPU_DoubleU u;
524
525 u.d = d;
526
527 return (u.ll & ~0x8000000000000000ULL) == 0;
528 }
529
530 static always_inline int isinfinity (float64 d)
531 {
532 CPU_DoubleU u;
533
534 u.d = d;
535
536 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
537 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
538 }
539
540 #ifdef CONFIG_SOFTFLOAT
541 static always_inline int isfinite (float64 d)
542 {
543 CPU_DoubleU u;
544
545 u.d = d;
546
547 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
548 }
549
550 static always_inline int isnormal (float64 d)
551 {
552 CPU_DoubleU u;
553
554 u.d = d;
555
556 uint32_t exp = (u.ll >> 52) & 0x7FF;
557 return ((0 < exp) && (exp < 0x7FF));
558 }
559 #endif
560
561 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
562 {
563 CPU_DoubleU farg;
564 int isneg;
565 int ret;
566 farg.ll = arg;
567 isneg = fpisneg(farg.d);
568 if (unlikely(float64_is_nan(farg.d))) {
569 if (float64_is_signaling_nan(farg.d)) {
570 /* Signaling NaN: flags are undefined */
571 ret = 0x00;
572 } else {
573 /* Quiet NaN */
574 ret = 0x11;
575 }
576 } else if (unlikely(isinfinity(farg.d))) {
577 /* +/- infinity */
578 if (isneg)
579 ret = 0x09;
580 else
581 ret = 0x05;
582 } else {
583 if (iszero(farg.d)) {
584 /* +/- zero */
585 if (isneg)
586 ret = 0x12;
587 else
588 ret = 0x02;
589 } else {
590 if (isden(farg.d)) {
591 /* Denormalized numbers */
592 ret = 0x10;
593 } else {
594 /* Normalized numbers */
595 ret = 0x00;
596 }
597 if (isneg) {
598 ret |= 0x08;
599 } else {
600 ret |= 0x04;
601 }
602 }
603 }
604 if (set_fprf) {
605 /* We update FPSCR_FPRF */
606 env->fpscr &= ~(0x1F << FPSCR_FPRF);
607 env->fpscr |= ret << FPSCR_FPRF;
608 }
609 /* We just need fpcc to update Rc1 */
610 return ret & 0xF;
611 }
612
613 /* Floating-point invalid operations exception */
614 static always_inline uint64_t fload_invalid_op_excp (int op)
615 {
616 uint64_t ret = 0;
617 int ve;
618
619 ve = fpscr_ve;
620 if (op & POWERPC_EXCP_FP_VXSNAN) {
621 /* Operation on signaling NaN */
622 env->fpscr |= 1 << FPSCR_VXSNAN;
623 }
624 if (op & POWERPC_EXCP_FP_VXSOFT) {
625 /* Software-defined condition */
626 env->fpscr |= 1 << FPSCR_VXSOFT;
627 }
628 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
629 case POWERPC_EXCP_FP_VXISI:
630 /* Magnitude subtraction of infinities */
631 env->fpscr |= 1 << FPSCR_VXISI;
632 goto update_arith;
633 case POWERPC_EXCP_FP_VXIDI:
634 /* Division of infinity by infinity */
635 env->fpscr |= 1 << FPSCR_VXIDI;
636 goto update_arith;
637 case POWERPC_EXCP_FP_VXZDZ:
638 /* Division of zero by zero */
639 env->fpscr |= 1 << FPSCR_VXZDZ;
640 goto update_arith;
641 case POWERPC_EXCP_FP_VXIMZ:
642 /* Multiplication of zero by infinity */
643 env->fpscr |= 1 << FPSCR_VXIMZ;
644 goto update_arith;
645 case POWERPC_EXCP_FP_VXVC:
646 /* Ordered comparison of NaN */
647 env->fpscr |= 1 << FPSCR_VXVC;
648 env->fpscr &= ~(0xF << FPSCR_FPCC);
649 env->fpscr |= 0x11 << FPSCR_FPCC;
650 /* We must update the target FPR before raising the exception */
651 if (ve != 0) {
652 env->exception_index = POWERPC_EXCP_PROGRAM;
653 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
654 /* Update the floating-point enabled exception summary */
655 env->fpscr |= 1 << FPSCR_FEX;
656 /* Exception is differed */
657 ve = 0;
658 }
659 break;
660 case POWERPC_EXCP_FP_VXSQRT:
661 /* Square root of a negative number */
662 env->fpscr |= 1 << FPSCR_VXSQRT;
663 update_arith:
664 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
665 if (ve == 0) {
666 /* Set the result to quiet NaN */
667 ret = UINT64_MAX;
668 env->fpscr &= ~(0xF << FPSCR_FPCC);
669 env->fpscr |= 0x11 << FPSCR_FPCC;
670 }
671 break;
672 case POWERPC_EXCP_FP_VXCVI:
673 /* Invalid conversion */
674 env->fpscr |= 1 << FPSCR_VXCVI;
675 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
676 if (ve == 0) {
677 /* Set the result to quiet NaN */
678 ret = UINT64_MAX;
679 env->fpscr &= ~(0xF << FPSCR_FPCC);
680 env->fpscr |= 0x11 << FPSCR_FPCC;
681 }
682 break;
683 }
684 /* Update the floating-point invalid operation summary */
685 env->fpscr |= 1 << FPSCR_VX;
686 /* Update the floating-point exception summary */
687 env->fpscr |= 1 << FPSCR_FX;
688 if (ve != 0) {
689 /* Update the floating-point enabled exception summary */
690 env->fpscr |= 1 << FPSCR_FEX;
691 if (msr_fe0 != 0 || msr_fe1 != 0)
692 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
693 }
694 return ret;
695 }
696
697 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
698 {
699 env->fpscr |= 1 << FPSCR_ZX;
700 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
701 /* Update the floating-point exception summary */
702 env->fpscr |= 1 << FPSCR_FX;
703 if (fpscr_ze != 0) {
704 /* Update the floating-point enabled exception summary */
705 env->fpscr |= 1 << FPSCR_FEX;
706 if (msr_fe0 != 0 || msr_fe1 != 0) {
707 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
708 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
709 }
710 } else {
711 /* Set the result to infinity */
712 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
713 arg1 |= 0x7FFULL << 52;
714 }
715 return arg1;
716 }
717
718 static always_inline void float_overflow_excp (void)
719 {
720 env->fpscr |= 1 << FPSCR_OX;
721 /* Update the floating-point exception summary */
722 env->fpscr |= 1 << FPSCR_FX;
723 if (fpscr_oe != 0) {
724 /* XXX: should adjust the result */
725 /* Update the floating-point enabled exception summary */
726 env->fpscr |= 1 << FPSCR_FEX;
727 /* We must update the target FPR before raising the exception */
728 env->exception_index = POWERPC_EXCP_PROGRAM;
729 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730 } else {
731 env->fpscr |= 1 << FPSCR_XX;
732 env->fpscr |= 1 << FPSCR_FI;
733 }
734 }
735
736 static always_inline void float_underflow_excp (void)
737 {
738 env->fpscr |= 1 << FPSCR_UX;
739 /* Update the floating-point exception summary */
740 env->fpscr |= 1 << FPSCR_FX;
741 if (fpscr_ue != 0) {
742 /* XXX: should adjust the result */
743 /* Update the floating-point enabled exception summary */
744 env->fpscr |= 1 << FPSCR_FEX;
745 /* We must update the target FPR before raising the exception */
746 env->exception_index = POWERPC_EXCP_PROGRAM;
747 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748 }
749 }
750
751 static always_inline void float_inexact_excp (void)
752 {
753 env->fpscr |= 1 << FPSCR_XX;
754 /* Update the floating-point exception summary */
755 env->fpscr |= 1 << FPSCR_FX;
756 if (fpscr_xe != 0) {
757 /* Update the floating-point enabled exception summary */
758 env->fpscr |= 1 << FPSCR_FEX;
759 /* We must update the target FPR before raising the exception */
760 env->exception_index = POWERPC_EXCP_PROGRAM;
761 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762 }
763 }
764
765 static always_inline void fpscr_set_rounding_mode (void)
766 {
767 int rnd_type;
768
769 /* Set rounding mode */
770 switch (fpscr_rn) {
771 case 0:
772 /* Best approximation (round to nearest) */
773 rnd_type = float_round_nearest_even;
774 break;
775 case 1:
776 /* Smaller magnitude (round toward zero) */
777 rnd_type = float_round_to_zero;
778 break;
779 case 2:
780 /* Round toward +infinite */
781 rnd_type = float_round_up;
782 break;
783 default:
784 case 3:
785 /* Round toward -infinite */
786 rnd_type = float_round_down;
787 break;
788 }
789 set_float_rounding_mode(rnd_type, &env->fp_status);
790 }
791
792 void helper_fpscr_setbit (uint32_t bit)
793 {
794 int prev;
795
796 prev = (env->fpscr >> bit) & 1;
797 env->fpscr |= 1 << bit;
798 if (prev == 0) {
799 switch (bit) {
800 case FPSCR_VX:
801 env->fpscr |= 1 << FPSCR_FX;
802 if (fpscr_ve)
803 goto raise_ve;
804 case FPSCR_OX:
805 env->fpscr |= 1 << FPSCR_FX;
806 if (fpscr_oe)
807 goto raise_oe;
808 break;
809 case FPSCR_UX:
810 env->fpscr |= 1 << FPSCR_FX;
811 if (fpscr_ue)
812 goto raise_ue;
813 break;
814 case FPSCR_ZX:
815 env->fpscr |= 1 << FPSCR_FX;
816 if (fpscr_ze)
817 goto raise_ze;
818 break;
819 case FPSCR_XX:
820 env->fpscr |= 1 << FPSCR_FX;
821 if (fpscr_xe)
822 goto raise_xe;
823 break;
824 case FPSCR_VXSNAN:
825 case FPSCR_VXISI:
826 case FPSCR_VXIDI:
827 case FPSCR_VXZDZ:
828 case FPSCR_VXIMZ:
829 case FPSCR_VXVC:
830 case FPSCR_VXSOFT:
831 case FPSCR_VXSQRT:
832 case FPSCR_VXCVI:
833 env->fpscr |= 1 << FPSCR_VX;
834 env->fpscr |= 1 << FPSCR_FX;
835 if (fpscr_ve != 0)
836 goto raise_ve;
837 break;
838 case FPSCR_VE:
839 if (fpscr_vx != 0) {
840 raise_ve:
841 env->error_code = POWERPC_EXCP_FP;
842 if (fpscr_vxsnan)
843 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
844 if (fpscr_vxisi)
845 env->error_code |= POWERPC_EXCP_FP_VXISI;
846 if (fpscr_vxidi)
847 env->error_code |= POWERPC_EXCP_FP_VXIDI;
848 if (fpscr_vxzdz)
849 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
850 if (fpscr_vximz)
851 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
852 if (fpscr_vxvc)
853 env->error_code |= POWERPC_EXCP_FP_VXVC;
854 if (fpscr_vxsoft)
855 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
856 if (fpscr_vxsqrt)
857 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
858 if (fpscr_vxcvi)
859 env->error_code |= POWERPC_EXCP_FP_VXCVI;
860 goto raise_excp;
861 }
862 break;
863 case FPSCR_OE:
864 if (fpscr_ox != 0) {
865 raise_oe:
866 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
867 goto raise_excp;
868 }
869 break;
870 case FPSCR_UE:
871 if (fpscr_ux != 0) {
872 raise_ue:
873 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
874 goto raise_excp;
875 }
876 break;
877 case FPSCR_ZE:
878 if (fpscr_zx != 0) {
879 raise_ze:
880 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
881 goto raise_excp;
882 }
883 break;
884 case FPSCR_XE:
885 if (fpscr_xx != 0) {
886 raise_xe:
887 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
888 goto raise_excp;
889 }
890 break;
891 case FPSCR_RN1:
892 case FPSCR_RN:
893 fpscr_set_rounding_mode();
894 break;
895 default:
896 break;
897 raise_excp:
898 /* Update the floating-point enabled exception summary */
899 env->fpscr |= 1 << FPSCR_FEX;
900 /* We have to update Rc1 before raising the exception */
901 env->exception_index = POWERPC_EXCP_PROGRAM;
902 break;
903 }
904 }
905 }
906
907 void helper_store_fpscr (uint64_t arg, uint32_t mask)
908 {
909 /*
910 * We use only the 32 LSB of the incoming fpr
911 */
912 uint32_t prev, new;
913 int i;
914
915 prev = env->fpscr;
916 new = (uint32_t)arg;
917 new &= ~0x90000000;
918 new |= prev & 0x90000000;
919 for (i = 0; i < 7; i++) {
920 if (mask & (1 << i)) {
921 env->fpscr &= ~(0xF << (4 * i));
922 env->fpscr |= new & (0xF << (4 * i));
923 }
924 }
925 /* Update VX and FEX */
926 if (fpscr_ix != 0)
927 env->fpscr |= 1 << FPSCR_VX;
928 else
929 env->fpscr &= ~(1 << FPSCR_VX);
930 if ((fpscr_ex & fpscr_eex) != 0) {
931 env->fpscr |= 1 << FPSCR_FEX;
932 env->exception_index = POWERPC_EXCP_PROGRAM;
933 /* XXX: we should compute it properly */
934 env->error_code = POWERPC_EXCP_FP;
935 }
936 else
937 env->fpscr &= ~(1 << FPSCR_FEX);
938 fpscr_set_rounding_mode();
939 }
940
941 void helper_float_check_status (void)
942 {
943 #ifdef CONFIG_SOFTFLOAT
944 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
945 (env->error_code & POWERPC_EXCP_FP)) {
946 /* Differred floating-point exception after target FPR update */
947 if (msr_fe0 != 0 || msr_fe1 != 0)
948 raise_exception_err(env, env->exception_index, env->error_code);
949 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
950 float_overflow_excp();
951 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
952 float_underflow_excp();
953 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
954 float_inexact_excp();
955 }
956 #else
957 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
958 (env->error_code & POWERPC_EXCP_FP)) {
959 /* Differred floating-point exception after target FPR update */
960 if (msr_fe0 != 0 || msr_fe1 != 0)
961 raise_exception_err(env, env->exception_index, env->error_code);
962 }
963 RETURN();
964 #endif
965 }
966
967 #ifdef CONFIG_SOFTFLOAT
968 void helper_reset_fpstatus (void)
969 {
970 env->fp_status.float_exception_flags = 0;
971 }
972 #endif
973
974 /* fadd - fadd. */
975 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
976 {
977 CPU_DoubleU farg1, farg2;
978
979 farg1.ll = arg1;
980 farg2.ll = arg2;
981 #if USE_PRECISE_EMULATION
982 if (unlikely(float64_is_signaling_nan(farg1.d) ||
983 float64_is_signaling_nan(farg2.d))) {
984 /* sNaN addition */
985 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
986 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
987 fpisneg(farg1.d) == fpisneg(farg2.d))) {
988 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
989 } else {
990 /* Magnitude subtraction of infinities */
991 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
992 }
993 #else
994 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
995 #endif
996 return farg1.ll;
997 }
998
999 /* fsub - fsub. */
1000 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1001 {
1002 CPU_DoubleU farg1, farg2;
1003
1004 farg1.ll = arg1;
1005 farg2.ll = arg2;
1006 #if USE_PRECISE_EMULATION
1007 {
1008 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1009 float64_is_signaling_nan(farg2.d))) {
1010 /* sNaN subtraction */
1011 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1012 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
1013 fpisneg(farg1.d) != fpisneg(farg2.d))) {
1014 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1015 } else {
1016 /* Magnitude subtraction of infinities */
1017 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1018 }
1019 }
1020 #else
1021 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1022 #endif
1023 return farg1.ll;
1024 }
1025
1026 /* fmul - fmul. */
1027 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1028 {
1029 CPU_DoubleU farg1, farg2;
1030
1031 farg1.ll = arg1;
1032 farg2.ll = arg2;
1033 #if USE_PRECISE_EMULATION
1034 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1035 float64_is_signaling_nan(farg2.d))) {
1036 /* sNaN multiplication */
1037 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1038 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
1039 (iszero(farg1.d) && isinfinity(farg2.d)))) {
1040 /* Multiplication of zero by infinity */
1041 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1042 } else {
1043 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1044 }
1045 }
1046 #else
1047 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1048 #endif
1049 return farg1.ll;
1050 }
1051
1052 /* fdiv - fdiv. */
1053 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1054 {
1055 CPU_DoubleU farg1, farg2;
1056
1057 farg1.ll = arg1;
1058 farg2.ll = arg2;
1059 #if USE_PRECISE_EMULATION
1060 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1061 float64_is_signaling_nan(farg2.d))) {
1062 /* sNaN division */
1063 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1064 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
1065 /* Division of infinity by infinity */
1066 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1067 } else if (unlikely(iszero(farg2.d))) {
1068 if (iszero(farg1.d)) {
1069 /* Division of zero by zero */
1070 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1071 } else {
1072 /* Division by zero */
1073 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
1074 }
1075 } else {
1076 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1077 }
1078 #else
1079 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1080 #endif
1081 return farg1.ll;
1082 }
1083
1084 /* fabs */
1085 uint64_t helper_fabs (uint64_t arg)
1086 {
1087 CPU_DoubleU farg;
1088
1089 farg.ll = arg;
1090 farg.d = float64_abs(farg.d);
1091 return farg.ll;
1092 }
1093
1094 /* fnabs */
1095 uint64_t helper_fnabs (uint64_t arg)
1096 {
1097 CPU_DoubleU farg;
1098
1099 farg.ll = arg;
1100 farg.d = float64_abs(farg.d);
1101 farg.d = float64_chs(farg.d);
1102 return farg.ll;
1103 }
1104
1105 /* fneg */
1106 uint64_t helper_fneg (uint64_t arg)
1107 {
1108 CPU_DoubleU farg;
1109
1110 farg.ll = arg;
1111 farg.d = float64_chs(farg.d);
1112 return farg.ll;
1113 }
1114
1115 /* fctiw - fctiw. */
1116 uint64_t helper_fctiw (uint64_t arg)
1117 {
1118 CPU_DoubleU farg;
1119 farg.ll = arg;
1120
1121 if (unlikely(float64_is_signaling_nan(farg.d))) {
1122 /* sNaN conversion */
1123 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1124 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1125 /* qNan / infinity conversion */
1126 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1127 } else {
1128 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1129 #if USE_PRECISE_EMULATION
1130 /* XXX: higher bits are not supposed to be significant.
1131 * to make tests easier, return the same as a real PowerPC 750
1132 */
1133 farg.ll |= 0xFFF80000ULL << 32;
1134 #endif
1135 }
1136 return farg.ll;
1137 }
1138
1139 /* fctiwz - fctiwz. */
1140 uint64_t helper_fctiwz (uint64_t arg)
1141 {
1142 CPU_DoubleU farg;
1143 farg.ll = arg;
1144
1145 if (unlikely(float64_is_signaling_nan(farg.d))) {
1146 /* sNaN conversion */
1147 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1148 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1149 /* qNan / infinity conversion */
1150 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1151 } else {
1152 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1153 #if USE_PRECISE_EMULATION
1154 /* XXX: higher bits are not supposed to be significant.
1155 * to make tests easier, return the same as a real PowerPC 750
1156 */
1157 farg.ll |= 0xFFF80000ULL << 32;
1158 #endif
1159 }
1160 return farg.ll;
1161 }
1162
1163 #if defined(TARGET_PPC64)
1164 /* fcfid - fcfid. */
1165 uint64_t helper_fcfid (uint64_t arg)
1166 {
1167 CPU_DoubleU farg;
1168 farg.d = int64_to_float64(arg, &env->fp_status);
1169 return farg.ll;
1170 }
1171
1172 /* fctid - fctid. */
1173 uint64_t helper_fctid (uint64_t arg)
1174 {
1175 CPU_DoubleU farg;
1176 farg.ll = arg;
1177
1178 if (unlikely(float64_is_signaling_nan(farg.d))) {
1179 /* sNaN conversion */
1180 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1181 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1182 /* qNan / infinity conversion */
1183 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1184 } else {
1185 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1186 }
1187 return farg.ll;
1188 }
1189
1190 /* fctidz - fctidz. */
1191 uint64_t helper_fctidz (uint64_t arg)
1192 {
1193 CPU_DoubleU farg;
1194 farg.ll = arg;
1195
1196 if (unlikely(float64_is_signaling_nan(farg.d))) {
1197 /* sNaN conversion */
1198 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1199 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1200 /* qNan / infinity conversion */
1201 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1202 } else {
1203 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1204 }
1205 return farg.ll;
1206 }
1207
1208 #endif
1209
1210 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1211 {
1212 CPU_DoubleU farg;
1213 farg.ll = arg;
1214
1215 if (unlikely(float64_is_signaling_nan(farg.d))) {
1216 /* sNaN round */
1217 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1218 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1219 /* qNan / infinity round */
1220 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1221 } else {
1222 set_float_rounding_mode(rounding_mode, &env->fp_status);
1223 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1224 /* Restore rounding mode from FPSCR */
1225 fpscr_set_rounding_mode();
1226 }
1227 return farg.ll;
1228 }
1229
1230 uint64_t helper_frin (uint64_t arg)
1231 {
1232 return do_fri(arg, float_round_nearest_even);
1233 }
1234
1235 uint64_t helper_friz (uint64_t arg)
1236 {
1237 return do_fri(arg, float_round_to_zero);
1238 }
1239
1240 uint64_t helper_frip (uint64_t arg)
1241 {
1242 return do_fri(arg, float_round_up);
1243 }
1244
1245 uint64_t helper_frim (uint64_t arg)
1246 {
1247 return do_fri(arg, float_round_down);
1248 }
1249
1250 /* fmadd - fmadd. */
1251 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1252 {
1253 CPU_DoubleU farg1, farg2, farg3;
1254
1255 farg1.ll = arg1;
1256 farg2.ll = arg2;
1257 farg3.ll = arg3;
1258 #if USE_PRECISE_EMULATION
1259 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1260 float64_is_signaling_nan(farg2.d) ||
1261 float64_is_signaling_nan(farg3.d))) {
1262 /* sNaN operation */
1263 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1264 } else {
1265 #ifdef FLOAT128
1266 /* This is the way the PowerPC specification defines it */
1267 float128 ft0_128, ft1_128;
1268
1269 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1270 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1271 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1272 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1273 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1274 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1275 #else
1276 /* This is OK on x86 hosts */
1277 farg1.d = (farg1.d * farg2.d) + farg3.d;
1278 #endif
1279 }
1280 #else
1281 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1282 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1283 #endif
1284 return farg1.ll;
1285 }
1286
1287 /* fmsub - fmsub. */
1288 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1289 {
1290 CPU_DoubleU farg1, farg2, farg3;
1291
1292 farg1.ll = arg1;
1293 farg2.ll = arg2;
1294 farg3.ll = arg3;
1295 #if USE_PRECISE_EMULATION
1296 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1297 float64_is_signaling_nan(farg2.d) ||
1298 float64_is_signaling_nan(farg3.d))) {
1299 /* sNaN operation */
1300 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1301 } else {
1302 #ifdef FLOAT128
1303 /* This is the way the PowerPC specification defines it */
1304 float128 ft0_128, ft1_128;
1305
1306 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1307 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1308 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1309 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1310 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1311 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1312 #else
1313 /* This is OK on x86 hosts */
1314 farg1.d = (farg1.d * farg2.d) - farg3.d;
1315 #endif
1316 }
1317 #else
1318 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1319 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1320 #endif
1321 return farg1.ll;
1322 }
1323
1324 /* fnmadd - fnmadd. */
1325 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1326 {
1327 CPU_DoubleU farg1, farg2, farg3;
1328
1329 farg1.ll = arg1;
1330 farg2.ll = arg2;
1331 farg3.ll = arg3;
1332
1333 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1334 float64_is_signaling_nan(farg2.d) ||
1335 float64_is_signaling_nan(farg3.d))) {
1336 /* sNaN operation */
1337 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1338 } else {
1339 #if USE_PRECISE_EMULATION
1340 #ifdef FLOAT128
1341 /* This is the way the PowerPC specification defines it */
1342 float128 ft0_128, ft1_128;
1343
1344 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1345 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1346 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1347 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1348 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1349 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1350 #else
1351 /* This is OK on x86 hosts */
1352 farg1.d = (farg1.d * farg2.d) + farg3.d;
1353 #endif
1354 #else
1355 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1356 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1357 #endif
1358 if (likely(!isnan(farg1.d)))
1359 farg1.d = float64_chs(farg1.d);
1360 }
1361 return farg1.ll;
1362 }
1363
1364 /* fnmsub - fnmsub. */
1365 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1366 {
1367 CPU_DoubleU farg1, farg2, farg3;
1368
1369 farg1.ll = arg1;
1370 farg2.ll = arg2;
1371 farg3.ll = arg3;
1372
1373 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1374 float64_is_signaling_nan(farg2.d) ||
1375 float64_is_signaling_nan(farg3.d))) {
1376 /* sNaN operation */
1377 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1378 } else {
1379 #if USE_PRECISE_EMULATION
1380 #ifdef FLOAT128
1381 /* This is the way the PowerPC specification defines it */
1382 float128 ft0_128, ft1_128;
1383
1384 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1388 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1389 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1390 #else
1391 /* This is OK on x86 hosts */
1392 farg1.d = (farg1.d * farg2.d) - farg3.d;
1393 #endif
1394 #else
1395 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1396 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1397 #endif
1398 if (likely(!isnan(farg1.d)))
1399 farg1.d = float64_chs(farg1.d);
1400 }
1401 return farg1.ll;
1402 }
1403
1404 /* frsp - frsp. */
1405 uint64_t helper_frsp (uint64_t arg)
1406 {
1407 CPU_DoubleU farg;
1408 farg.ll = arg;
1409
1410 #if USE_PRECISE_EMULATION
1411 if (unlikely(float64_is_signaling_nan(farg.d))) {
1412 /* sNaN square root */
1413 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1414 } else {
1415 fard.d = float64_to_float32(farg.d, &env->fp_status);
1416 }
1417 #else
1418 farg.d = float64_to_float32(farg.d, &env->fp_status);
1419 #endif
1420 return farg.ll;
1421 }
1422
1423 /* fsqrt - fsqrt. */
1424 uint64_t helper_fsqrt (uint64_t arg)
1425 {
1426 CPU_DoubleU farg;
1427 farg.ll = arg;
1428
1429 if (unlikely(float64_is_signaling_nan(farg.d))) {
1430 /* sNaN square root */
1431 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1432 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1433 /* Square root of a negative nonzero number */
1434 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1435 } else {
1436 farg.d = float64_sqrt(farg.d, &env->fp_status);
1437 }
1438 return farg.ll;
1439 }
1440
1441 /* fre - fre. */
1442 uint64_t helper_fre (uint64_t arg)
1443 {
1444 CPU_DoubleU farg;
1445 farg.ll = arg;
1446
1447 if (unlikely(float64_is_signaling_nan(farg.d))) {
1448 /* sNaN reciprocal */
1449 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1450 } else if (unlikely(iszero(farg.d))) {
1451 /* Zero reciprocal */
1452 farg.ll = float_zero_divide_excp(1.0, farg.d);
1453 } else if (likely(isnormal(farg.d))) {
1454 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1455 } else {
1456 if (farg.ll == 0x8000000000000000ULL) {
1457 farg.ll = 0xFFF0000000000000ULL;
1458 } else if (farg.ll == 0x0000000000000000ULL) {
1459 farg.ll = 0x7FF0000000000000ULL;
1460 } else if (isnan(farg.d)) {
1461 farg.ll = 0x7FF8000000000000ULL;
1462 } else if (fpisneg(farg.d)) {
1463 farg.ll = 0x8000000000000000ULL;
1464 } else {
1465 farg.ll = 0x0000000000000000ULL;
1466 }
1467 }
1468 return farg.d;
1469 }
1470
1471 /* fres - fres. */
1472 uint64_t helper_fres (uint64_t arg)
1473 {
1474 CPU_DoubleU farg;
1475 farg.ll = arg;
1476
1477 if (unlikely(float64_is_signaling_nan(farg.d))) {
1478 /* sNaN reciprocal */
1479 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1480 } else if (unlikely(iszero(farg.d))) {
1481 /* Zero reciprocal */
1482 farg.ll = float_zero_divide_excp(1.0, farg.d);
1483 } else if (likely(isnormal(farg.d))) {
1484 #if USE_PRECISE_EMULATION
1485 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1486 farg.d = float64_to_float32(farg.d, &env->fp_status);
1487 #else
1488 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1489 #endif
1490 } else {
1491 if (farg.ll == 0x8000000000000000ULL) {
1492 farg.ll = 0xFFF0000000000000ULL;
1493 } else if (farg.ll == 0x0000000000000000ULL) {
1494 farg.ll = 0x7FF0000000000000ULL;
1495 } else if (isnan(farg.d)) {
1496 farg.ll = 0x7FF8000000000000ULL;
1497 } else if (fpisneg(farg.d)) {
1498 farg.ll = 0x8000000000000000ULL;
1499 } else {
1500 farg.ll = 0x0000000000000000ULL;
1501 }
1502 }
1503 return farg.ll;
1504 }
1505
1506 /* frsqrte - frsqrte. */
1507 uint64_t helper_frsqrte (uint64_t arg)
1508 {
1509 CPU_DoubleU farg;
1510 farg.ll = arg;
1511
1512 if (unlikely(float64_is_signaling_nan(farg.d))) {
1513 /* sNaN reciprocal square root */
1514 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1515 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1516 /* Reciprocal square root of a negative nonzero number */
1517 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1518 } else if (likely(isnormal(farg.d))) {
1519 farg.d = float64_sqrt(farg.d, &env->fp_status);
1520 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1521 } else {
1522 if (farg.ll == 0x8000000000000000ULL) {
1523 farg.ll = 0xFFF0000000000000ULL;
1524 } else if (farg.ll == 0x0000000000000000ULL) {
1525 farg.ll = 0x7FF0000000000000ULL;
1526 } else if (isnan(farg.d)) {
1527 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1528 } else if (fpisneg(farg.d)) {
1529 farg.ll = 0x7FF8000000000000ULL;
1530 } else {
1531 farg.ll = 0x0000000000000000ULL;
1532 }
1533 }
1534 return farg.ll;
1535 }
1536
1537 /* fsel - fsel. */
1538 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1539 {
1540 CPU_DoubleU farg1, farg2, farg3;
1541
1542 farg1.ll = arg1;
1543 farg2.ll = arg2;
1544 farg3.ll = arg3;
1545
1546 if (!fpisneg(farg1.d) || iszero(farg1.d))
1547 return farg2.ll;
1548 else
1549 return farg2.ll;
1550 }
1551
1552 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1553 {
1554 CPU_DoubleU farg1, farg2;
1555 uint32_t ret = 0;
1556 farg1.ll = arg1;
1557 farg2.ll = arg2;
1558
1559 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1560 float64_is_signaling_nan(farg2.d))) {
1561 /* sNaN comparison */
1562 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1563 } else {
1564 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1565 ret = 0x08UL;
1566 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1567 ret = 0x04UL;
1568 } else {
1569 ret = 0x02UL;
1570 }
1571 }
1572 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1573 env->fpscr |= ret << FPSCR_FPRF;
1574 return ret;
1575 }
1576
1577 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1578 {
1579 CPU_DoubleU farg1, farg2;
1580 uint32_t ret = 0;
1581 farg1.ll = arg1;
1582 farg2.ll = arg2;
1583
1584 if (unlikely(float64_is_nan(farg1.d) ||
1585 float64_is_nan(farg2.d))) {
1586 if (float64_is_signaling_nan(farg1.d) ||
1587 float64_is_signaling_nan(farg2.d)) {
1588 /* sNaN comparison */
1589 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1590 POWERPC_EXCP_FP_VXVC);
1591 } else {
1592 /* qNaN comparison */
1593 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1594 }
1595 } else {
1596 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1597 ret = 0x08UL;
1598 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1599 ret = 0x04UL;
1600 } else {
1601 ret = 0x02UL;
1602 }
1603 }
1604 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1605 env->fpscr |= ret << FPSCR_FPRF;
1606 return ret;
1607 }
1608
1609 #if !defined (CONFIG_USER_ONLY)
1610 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1611
1612 void do_store_msr (void)
1613 {
1614 T0 = hreg_store_msr(env, T0, 0);
1615 if (T0 != 0) {
1616 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1617 raise_exception(env, T0);
1618 }
1619 }
1620
1621 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1622 target_ulong msrm, int keep_msrh)
1623 {
1624 #if defined(TARGET_PPC64)
1625 if (msr & (1ULL << MSR_SF)) {
1626 nip = (uint64_t)nip;
1627 msr &= (uint64_t)msrm;
1628 } else {
1629 nip = (uint32_t)nip;
1630 msr = (uint32_t)(msr & msrm);
1631 if (keep_msrh)
1632 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1633 }
1634 #else
1635 nip = (uint32_t)nip;
1636 msr &= (uint32_t)msrm;
1637 #endif
1638 /* XXX: beware: this is false if VLE is supported */
1639 env->nip = nip & ~((target_ulong)0x00000003);
1640 hreg_store_msr(env, msr, 1);
1641 #if defined (DEBUG_OP)
1642 cpu_dump_rfi(env->nip, env->msr);
1643 #endif
1644 /* No need to raise an exception here,
1645 * as rfi is always the last insn of a TB
1646 */
1647 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1648 }
1649
1650 void do_rfi (void)
1651 {
1652 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1653 ~((target_ulong)0xFFFF0000), 1);
1654 }
1655
1656 #if defined(TARGET_PPC64)
1657 void do_rfid (void)
1658 {
1659 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1660 ~((target_ulong)0xFFFF0000), 0);
1661 }
1662
1663 void do_hrfid (void)
1664 {
1665 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1666 ~((target_ulong)0xFFFF0000), 0);
1667 }
1668 #endif
1669 #endif
1670
1671 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1672 {
1673 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1674 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1675 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1676 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1677 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1678 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1679 }
1680 }
1681
1682 #if defined(TARGET_PPC64)
1683 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1684 {
1685 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1686 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1687 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1688 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1689 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1690 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1691 }
1692 #endif
1693
1694 /*****************************************************************************/
1695 /* PowerPC 601 specific instructions (POWER bridge) */
1696 void do_POWER_abso (void)
1697 {
1698 if ((int32_t)T0 == INT32_MIN) {
1699 T0 = INT32_MAX;
1700 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1701 } else if ((int32_t)T0 < 0) {
1702 T0 = -T0;
1703 env->xer &= ~(1 << XER_OV);
1704 } else {
1705 env->xer &= ~(1 << XER_OV);
1706 }
1707 }
1708
1709 void do_POWER_clcs (void)
1710 {
1711 switch (T0) {
1712 case 0x0CUL:
1713 /* Instruction cache line size */
1714 T0 = env->icache_line_size;
1715 break;
1716 case 0x0DUL:
1717 /* Data cache line size */
1718 T0 = env->dcache_line_size;
1719 break;
1720 case 0x0EUL:
1721 /* Minimum cache line size */
1722 T0 = env->icache_line_size < env->dcache_line_size ?
1723 env->icache_line_size : env->dcache_line_size;
1724 break;
1725 case 0x0FUL:
1726 /* Maximum cache line size */
1727 T0 = env->icache_line_size > env->dcache_line_size ?
1728 env->icache_line_size : env->dcache_line_size;
1729 break;
1730 default:
1731 /* Undefined */
1732 break;
1733 }
1734 }
1735
1736 void do_POWER_div (void)
1737 {
1738 uint64_t tmp;
1739
1740 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1741 (int32_t)T1 == 0) {
1742 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1743 env->spr[SPR_MQ] = 0;
1744 } else {
1745 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1746 env->spr[SPR_MQ] = tmp % T1;
1747 T0 = tmp / (int32_t)T1;
1748 }
1749 }
1750
1751 void do_POWER_divo (void)
1752 {
1753 int64_t tmp;
1754
1755 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1756 (int32_t)T1 == 0) {
1757 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1758 env->spr[SPR_MQ] = 0;
1759 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1760 } else {
1761 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1762 env->spr[SPR_MQ] = tmp % T1;
1763 tmp /= (int32_t)T1;
1764 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1765 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1766 } else {
1767 env->xer &= ~(1 << XER_OV);
1768 }
1769 T0 = tmp;
1770 }
1771 }
1772
1773 void do_POWER_divs (void)
1774 {
1775 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1776 (int32_t)T1 == 0) {
1777 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1778 env->spr[SPR_MQ] = 0;
1779 } else {
1780 env->spr[SPR_MQ] = T0 % T1;
1781 T0 = (int32_t)T0 / (int32_t)T1;
1782 }
1783 }
1784
1785 void do_POWER_divso (void)
1786 {
1787 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1788 (int32_t)T1 == 0) {
1789 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1790 env->spr[SPR_MQ] = 0;
1791 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1792 } else {
1793 T0 = (int32_t)T0 / (int32_t)T1;
1794 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1795 env->xer &= ~(1 << XER_OV);
1796 }
1797 }
1798
1799 void do_POWER_dozo (void)
1800 {
1801 if ((int32_t)T1 > (int32_t)T0) {
1802 T2 = T0;
1803 T0 = T1 - T0;
1804 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1805 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1806 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1807 } else {
1808 env->xer &= ~(1 << XER_OV);
1809 }
1810 } else {
1811 T0 = 0;
1812 env->xer &= ~(1 << XER_OV);
1813 }
1814 }
1815
1816 void do_POWER_maskg (void)
1817 {
1818 uint32_t ret;
1819
1820 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1821 ret = UINT32_MAX;
1822 } else {
1823 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1824 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1825 if ((uint32_t)T0 > (uint32_t)T1)
1826 ret = ~ret;
1827 }
1828 T0 = ret;
1829 }
1830
1831 void do_POWER_mulo (void)
1832 {
1833 uint64_t tmp;
1834
1835 tmp = (uint64_t)T0 * (uint64_t)T1;
1836 env->spr[SPR_MQ] = tmp >> 32;
1837 T0 = tmp;
1838 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1839 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1840 } else {
1841 env->xer &= ~(1 << XER_OV);
1842 }
1843 }
1844
1845 #if !defined (CONFIG_USER_ONLY)
1846 void do_POWER_rac (void)
1847 {
1848 mmu_ctx_t ctx;
1849 int nb_BATs;
1850
1851 /* We don't have to generate many instances of this instruction,
1852 * as rac is supervisor only.
1853 */
1854 /* XXX: FIX THIS: Pretend we have no BAT */
1855 nb_BATs = env->nb_BATs;
1856 env->nb_BATs = 0;
1857 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1858 T0 = ctx.raddr;
1859 env->nb_BATs = nb_BATs;
1860 }
1861
1862 void do_POWER_rfsvc (void)
1863 {
1864 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1865 }
1866
1867 void do_store_hid0_601 (void)
1868 {
1869 uint32_t hid0;
1870
1871 hid0 = env->spr[SPR_HID0];
1872 if ((T0 ^ hid0) & 0x00000008) {
1873 /* Change current endianness */
1874 env->hflags &= ~(1 << MSR_LE);
1875 env->hflags_nmsr &= ~(1 << MSR_LE);
1876 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1877 env->hflags |= env->hflags_nmsr;
1878 if (loglevel != 0) {
1879 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1880 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1881 }
1882 }
1883 env->spr[SPR_HID0] = T0;
1884 }
1885 #endif
1886
1887 /*****************************************************************************/
1888 /* 602 specific instructions */
1889 /* mfrom is the most crazy instruction ever seen, imho ! */
1890 /* Real implementation uses a ROM table. Do the same */
1891 #define USE_MFROM_ROM_TABLE
1892 target_ulong helper_602_mfrom (target_ulong arg)
1893 {
1894 if (likely(arg < 602)) {
1895 #if defined(USE_MFROM_ROM_TABLE)
1896 #include "mfrom_table.c"
1897 return mfrom_ROM_table[T0];
1898 #else
1899 double d;
1900 /* Extremly decomposed:
1901 * -arg / 256
1902 * return 256 * log10(10 + 1.0) + 0.5
1903 */
1904 d = arg;
1905 d = float64_div(d, 256, &env->fp_status);
1906 d = float64_chs(d);
1907 d = exp10(d); // XXX: use float emulation function
1908 d = float64_add(d, 1.0, &env->fp_status);
1909 d = log10(d); // XXX: use float emulation function
1910 d = float64_mul(d, 256, &env->fp_status);
1911 d = float64_add(d, 0.5, &env->fp_status);
1912 return float64_round_to_int(d, &env->fp_status);
1913 #endif
1914 } else {
1915 return 0;
1916 }
1917 }
1918
1919 /*****************************************************************************/
1920 /* Embedded PowerPC specific helpers */
1921
1922 /* XXX: to be improved to check access rights when in user-mode */
1923 void do_load_dcr (void)
1924 {
1925 target_ulong val;
1926
1927 if (unlikely(env->dcr_env == NULL)) {
1928 if (loglevel != 0) {
1929 fprintf(logfile, "No DCR environment\n");
1930 }
1931 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1932 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1933 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1934 if (loglevel != 0) {
1935 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1936 }
1937 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1938 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1939 } else {
1940 T0 = val;
1941 }
1942 }
1943
1944 void do_store_dcr (void)
1945 {
1946 if (unlikely(env->dcr_env == NULL)) {
1947 if (loglevel != 0) {
1948 fprintf(logfile, "No DCR environment\n");
1949 }
1950 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1951 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1952 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1953 if (loglevel != 0) {
1954 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1955 }
1956 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1957 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1958 }
1959 }
1960
1961 #if !defined(CONFIG_USER_ONLY)
1962 void do_40x_rfci (void)
1963 {
1964 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1965 ~((target_ulong)0xFFFF0000), 0);
1966 }
1967
1968 void do_rfci (void)
1969 {
1970 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1971 ~((target_ulong)0x3FFF0000), 0);
1972 }
1973
1974 void do_rfdi (void)
1975 {
1976 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1977 ~((target_ulong)0x3FFF0000), 0);
1978 }
1979
1980 void do_rfmci (void)
1981 {
1982 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1983 ~((target_ulong)0x3FFF0000), 0);
1984 }
1985
1986 void do_load_403_pb (int num)
1987 {
1988 T0 = env->pb[num];
1989 }
1990
1991 void do_store_403_pb (int num)
1992 {
1993 if (likely(env->pb[num] != T0)) {
1994 env->pb[num] = T0;
1995 /* Should be optimized */
1996 tlb_flush(env, 1);
1997 }
1998 }
1999 #endif
2000
2001 /* 440 specific */
2002 void do_440_dlmzb (void)
2003 {
2004 target_ulong mask;
2005 int i;
2006
2007 i = 1;
2008 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
2009 if ((T0 & mask) == 0)
2010 goto done;
2011 i++;
2012 }
2013 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
2014 if ((T1 & mask) == 0)
2015 break;
2016 i++;
2017 }
2018 done:
2019 T0 = i;
2020 }
2021
2022 /*****************************************************************************/
2023 /* SPE extension helpers */
2024 /* Use a table to make this quicker */
2025 static uint8_t hbrev[16] = {
2026 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2027 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2028 };
2029
2030 static always_inline uint8_t byte_reverse (uint8_t val)
2031 {
2032 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2033 }
2034
2035 static always_inline uint32_t word_reverse (uint32_t val)
2036 {
2037 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2038 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2039 }
2040
2041 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2042 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2043 {
2044 uint32_t a, b, d, mask;
2045
2046 mask = UINT32_MAX >> (32 - MASKBITS);
2047 a = arg1 & mask;
2048 b = arg2 & mask;
2049 d = word_reverse(1 + word_reverse(a | ~b));
2050 return (arg1 & ~mask) | (d & b);
2051 }
2052
2053 uint32_t helper_cntlsw32 (uint32_t val)
2054 {
2055 if (val & 0x80000000)
2056 return clz32(~val);
2057 else
2058 return clz32(val);
2059 }
2060
2061 uint32_t helper_cntlzw32 (uint32_t val)
2062 {
2063 return clz32(val);
2064 }
2065
2066 /* Single-precision floating-point conversions */
2067 static always_inline uint32_t efscfsi (uint32_t val)
2068 {
2069 CPU_FloatU u;
2070
2071 u.f = int32_to_float32(val, &env->spe_status);
2072
2073 return u.l;
2074 }
2075
2076 static always_inline uint32_t efscfui (uint32_t val)
2077 {
2078 CPU_FloatU u;
2079
2080 u.f = uint32_to_float32(val, &env->spe_status);
2081
2082 return u.l;
2083 }
2084
2085 static always_inline int32_t efsctsi (uint32_t val)
2086 {
2087 CPU_FloatU u;
2088
2089 u.l = val;
2090 /* NaN are not treated the same way IEEE 754 does */
2091 if (unlikely(isnan(u.f)))
2092 return 0;
2093
2094 return float32_to_int32(u.f, &env->spe_status);
2095 }
2096
2097 static always_inline uint32_t efsctui (uint32_t val)
2098 {
2099 CPU_FloatU u;
2100
2101 u.l = val;
2102 /* NaN are not treated the same way IEEE 754 does */
2103 if (unlikely(isnan(u.f)))
2104 return 0;
2105
2106 return float32_to_uint32(u.f, &env->spe_status);
2107 }
2108
2109 static always_inline uint32_t efsctsiz (uint32_t val)
2110 {
2111 CPU_FloatU u;
2112
2113 u.l = val;
2114 /* NaN are not treated the same way IEEE 754 does */
2115 if (unlikely(isnan(u.f)))
2116 return 0;
2117
2118 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2119 }
2120
2121 static always_inline uint32_t efsctuiz (uint32_t val)
2122 {
2123 CPU_FloatU u;
2124
2125 u.l = val;
2126 /* NaN are not treated the same way IEEE 754 does */
2127 if (unlikely(isnan(u.f)))
2128 return 0;
2129
2130 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2131 }
2132
2133 static always_inline uint32_t efscfsf (uint32_t val)
2134 {
2135 CPU_FloatU u;
2136 float32 tmp;
2137
2138 u.f = int32_to_float32(val, &env->spe_status);
2139 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2140 u.f = float32_div(u.f, tmp, &env->spe_status);
2141
2142 return u.l;
2143 }
2144
2145 static always_inline uint32_t efscfuf (uint32_t val)
2146 {
2147 CPU_FloatU u;
2148 float32 tmp;
2149
2150 u.f = uint32_to_float32(val, &env->spe_status);
2151 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2152 u.f = float32_div(u.f, tmp, &env->spe_status);
2153
2154 return u.l;
2155 }
2156
2157 static always_inline uint32_t efsctsf (uint32_t val)
2158 {
2159 CPU_FloatU u;
2160 float32 tmp;
2161
2162 u.l = val;
2163 /* NaN are not treated the same way IEEE 754 does */
2164 if (unlikely(isnan(u.f)))
2165 return 0;
2166 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2167 u.f = float32_mul(u.f, tmp, &env->spe_status);
2168
2169 return float32_to_int32(u.f, &env->spe_status);
2170 }
2171
2172 static always_inline uint32_t efsctuf (uint32_t val)
2173 {
2174 CPU_FloatU u;
2175 float32 tmp;
2176
2177 u.l = val;
2178 /* NaN are not treated the same way IEEE 754 does */
2179 if (unlikely(isnan(u.f)))
2180 return 0;
2181 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2182 u.f = float32_mul(u.f, tmp, &env->spe_status);
2183
2184 return float32_to_uint32(u.f, &env->spe_status);
2185 }
2186
2187 #define HELPER_SPE_SINGLE_CONV(name) \
2188 uint32_t helper_e##name (uint32_t val) \
2189 { \
2190 return e##name(val); \
2191 }
2192 /* efscfsi */
2193 HELPER_SPE_SINGLE_CONV(fscfsi);
2194 /* efscfui */
2195 HELPER_SPE_SINGLE_CONV(fscfui);
2196 /* efscfuf */
2197 HELPER_SPE_SINGLE_CONV(fscfuf);
2198 /* efscfsf */
2199 HELPER_SPE_SINGLE_CONV(fscfsf);
2200 /* efsctsi */
2201 HELPER_SPE_SINGLE_CONV(fsctsi);
2202 /* efsctui */
2203 HELPER_SPE_SINGLE_CONV(fsctui);
2204 /* efsctsiz */
2205 HELPER_SPE_SINGLE_CONV(fsctsiz);
2206 /* efsctuiz */
2207 HELPER_SPE_SINGLE_CONV(fsctuiz);
2208 /* efsctsf */
2209 HELPER_SPE_SINGLE_CONV(fsctsf);
2210 /* efsctuf */
2211 HELPER_SPE_SINGLE_CONV(fsctuf);
2212
2213 #define HELPER_SPE_VECTOR_CONV(name) \
2214 uint64_t helper_ev##name (uint64_t val) \
2215 { \
2216 return ((uint64_t)e##name(val >> 32) << 32) | \
2217 (uint64_t)e##name(val); \
2218 }
2219 /* evfscfsi */
2220 HELPER_SPE_VECTOR_CONV(fscfsi);
2221 /* evfscfui */
2222 HELPER_SPE_VECTOR_CONV(fscfui);
2223 /* evfscfuf */
2224 HELPER_SPE_VECTOR_CONV(fscfuf);
2225 /* evfscfsf */
2226 HELPER_SPE_VECTOR_CONV(fscfsf);
2227 /* evfsctsi */
2228 HELPER_SPE_VECTOR_CONV(fsctsi);
2229 /* evfsctui */
2230 HELPER_SPE_VECTOR_CONV(fsctui);
2231 /* evfsctsiz */
2232 HELPER_SPE_VECTOR_CONV(fsctsiz);
2233 /* evfsctuiz */
2234 HELPER_SPE_VECTOR_CONV(fsctuiz);
2235 /* evfsctsf */
2236 HELPER_SPE_VECTOR_CONV(fsctsf);
2237 /* evfsctuf */
2238 HELPER_SPE_VECTOR_CONV(fsctuf);
2239
2240 /* Single-precision floating-point arithmetic */
2241 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2242 {
2243 CPU_FloatU u1, u2;
2244 u1.l = op1;
2245 u2.l = op2;
2246 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2247 return u1.l;
2248 }
2249
2250 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2251 {
2252 CPU_FloatU u1, u2;
2253 u1.l = op1;
2254 u2.l = op2;
2255 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2256 return u1.l;
2257 }
2258
2259 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2260 {
2261 CPU_FloatU u1, u2;
2262 u1.l = op1;
2263 u2.l = op2;
2264 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2265 return u1.l;
2266 }
2267
2268 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2269 {
2270 CPU_FloatU u1, u2;
2271 u1.l = op1;
2272 u2.l = op2;
2273 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2274 return u1.l;
2275 }
2276
2277 #define HELPER_SPE_SINGLE_ARITH(name) \
2278 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2279 { \
2280 return e##name(op1, op2); \
2281 }
2282 /* efsadd */
2283 HELPER_SPE_SINGLE_ARITH(fsadd);
2284 /* efssub */
2285 HELPER_SPE_SINGLE_ARITH(fssub);
2286 /* efsmul */
2287 HELPER_SPE_SINGLE_ARITH(fsmul);
2288 /* efsdiv */
2289 HELPER_SPE_SINGLE_ARITH(fsdiv);
2290
2291 #define HELPER_SPE_VECTOR_ARITH(name) \
2292 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2293 { \
2294 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2295 (uint64_t)e##name(op1, op2); \
2296 }
2297 /* evfsadd */
2298 HELPER_SPE_VECTOR_ARITH(fsadd);
2299 /* evfssub */
2300 HELPER_SPE_VECTOR_ARITH(fssub);
2301 /* evfsmul */
2302 HELPER_SPE_VECTOR_ARITH(fsmul);
2303 /* evfsdiv */
2304 HELPER_SPE_VECTOR_ARITH(fsdiv);
2305
2306 /* Single-precision floating-point comparisons */
2307 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2308 {
2309 CPU_FloatU u1, u2;
2310 u1.l = op1;
2311 u2.l = op2;
2312 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2313 }
2314
2315 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2316 {
2317 CPU_FloatU u1, u2;
2318 u1.l = op1;
2319 u2.l = op2;
2320 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2321 }
2322
2323 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2324 {
2325 CPU_FloatU u1, u2;
2326 u1.l = op1;
2327 u2.l = op2;
2328 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2329 }
2330
2331 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2332 {
2333 /* XXX: TODO: test special values (NaN, infinites, ...) */
2334 return efststlt(op1, op2);
2335 }
2336
2337 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2338 {
2339 /* XXX: TODO: test special values (NaN, infinites, ...) */
2340 return efststgt(op1, op2);
2341 }
2342
2343 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2344 {
2345 /* XXX: TODO: test special values (NaN, infinites, ...) */
2346 return efststeq(op1, op2);
2347 }
2348
2349 #define HELPER_SINGLE_SPE_CMP(name) \
2350 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2351 { \
2352 return e##name(op1, op2) << 2; \
2353 }
2354 /* efststlt */
2355 HELPER_SINGLE_SPE_CMP(fststlt);
2356 /* efststgt */
2357 HELPER_SINGLE_SPE_CMP(fststgt);
2358 /* efststeq */
2359 HELPER_SINGLE_SPE_CMP(fststeq);
2360 /* efscmplt */
2361 HELPER_SINGLE_SPE_CMP(fscmplt);
2362 /* efscmpgt */
2363 HELPER_SINGLE_SPE_CMP(fscmpgt);
2364 /* efscmpeq */
2365 HELPER_SINGLE_SPE_CMP(fscmpeq);
2366
2367 static always_inline uint32_t evcmp_merge (int t0, int t1)
2368 {
2369 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2370 }
2371
2372 #define HELPER_VECTOR_SPE_CMP(name) \
2373 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2374 { \
2375 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2376 }
2377 /* evfststlt */
2378 HELPER_VECTOR_SPE_CMP(fststlt);
2379 /* evfststgt */
2380 HELPER_VECTOR_SPE_CMP(fststgt);
2381 /* evfststeq */
2382 HELPER_VECTOR_SPE_CMP(fststeq);
2383 /* evfscmplt */
2384 HELPER_VECTOR_SPE_CMP(fscmplt);
2385 /* evfscmpgt */
2386 HELPER_VECTOR_SPE_CMP(fscmpgt);
2387 /* evfscmpeq */
2388 HELPER_VECTOR_SPE_CMP(fscmpeq);
2389
2390 /* Double-precision floating-point conversion */
2391 uint64_t helper_efdcfsi (uint32_t val)
2392 {
2393 CPU_DoubleU u;
2394
2395 u.d = int32_to_float64(val, &env->spe_status);
2396
2397 return u.ll;
2398 }
2399
2400 uint64_t helper_efdcfsid (uint64_t val)
2401 {
2402 CPU_DoubleU u;
2403
2404 u.d = int64_to_float64(val, &env->spe_status);
2405
2406 return u.ll;
2407 }
2408
2409 uint64_t helper_efdcfui (uint32_t val)
2410 {
2411 CPU_DoubleU u;
2412
2413 u.d = uint32_to_float64(val, &env->spe_status);
2414
2415 return u.ll;
2416 }
2417
2418 uint64_t helper_efdcfuid (uint64_t val)
2419 {
2420 CPU_DoubleU u;
2421
2422 u.d = uint64_to_float64(val, &env->spe_status);
2423
2424 return u.ll;
2425 }
2426
2427 uint32_t helper_efdctsi (uint64_t val)
2428 {
2429 CPU_DoubleU u;
2430
2431 u.ll = val;
2432 /* NaN are not treated the same way IEEE 754 does */
2433 if (unlikely(isnan(u.d)))
2434 return 0;
2435
2436 return float64_to_int32(u.d, &env->spe_status);
2437 }
2438
2439 uint32_t helper_efdctui (uint64_t val)
2440 {
2441 CPU_DoubleU u;
2442
2443 u.ll = val;
2444 /* NaN are not treated the same way IEEE 754 does */
2445 if (unlikely(isnan(u.d)))
2446 return 0;
2447
2448 return float64_to_uint32(u.d, &env->spe_status);
2449 }
2450
2451 uint32_t helper_efdctsiz (uint64_t val)
2452 {
2453 CPU_DoubleU u;
2454
2455 u.ll = val;
2456 /* NaN are not treated the same way IEEE 754 does */
2457 if (unlikely(isnan(u.d)))
2458 return 0;
2459
2460 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2461 }
2462
2463 uint64_t helper_efdctsidz (uint64_t val)
2464 {
2465 CPU_DoubleU u;
2466
2467 u.ll = val;
2468 /* NaN are not treated the same way IEEE 754 does */
2469 if (unlikely(isnan(u.d)))
2470 return 0;
2471
2472 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2473 }
2474
2475 uint32_t helper_efdctuiz (uint64_t val)
2476 {
2477 CPU_DoubleU u;
2478
2479 u.ll = val;
2480 /* NaN are not treated the same way IEEE 754 does */
2481 if (unlikely(isnan(u.d)))
2482 return 0;
2483
2484 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2485 }
2486
2487 uint64_t helper_efdctuidz (uint64_t val)
2488 {
2489 CPU_DoubleU u;
2490
2491 u.ll = val;
2492 /* NaN are not treated the same way IEEE 754 does */
2493 if (unlikely(isnan(u.d)))
2494 return 0;
2495
2496 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2497 }
2498
2499 uint64_t helper_efdcfsf (uint32_t val)
2500 {
2501 CPU_DoubleU u;
2502 float64 tmp;
2503
2504 u.d = int32_to_float64(val, &env->spe_status);
2505 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2506 u.d = float64_div(u.d, tmp, &env->spe_status);
2507
2508 return u.ll;
2509 }
2510
2511 uint64_t helper_efdcfuf (uint32_t val)
2512 {
2513 CPU_DoubleU u;
2514 float64 tmp;
2515
2516 u.d = uint32_to_float64(val, &env->spe_status);
2517 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2518 u.d = float64_div(u.d, tmp, &env->spe_status);
2519
2520 return u.ll;
2521 }
2522
2523 uint32_t helper_efdctsf (uint64_t val)
2524 {
2525 CPU_DoubleU u;
2526 float64 tmp;
2527
2528 u.ll = val;
2529 /* NaN are not treated the same way IEEE 754 does */
2530 if (unlikely(isnan(u.d)))
2531 return 0;
2532 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2533 u.d = float64_mul(u.d, tmp, &env->spe_status);
2534
2535 return float64_to_int32(u.d, &env->spe_status);
2536 }
2537
2538 uint32_t helper_efdctuf (uint64_t val)
2539 {
2540 CPU_DoubleU u;
2541 float64 tmp;
2542
2543 u.ll = val;
2544 /* NaN are not treated the same way IEEE 754 does */
2545 if (unlikely(isnan(u.d)))
2546 return 0;
2547 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2548 u.d = float64_mul(u.d, tmp, &env->spe_status);
2549
2550 return float64_to_uint32(u.d, &env->spe_status);
2551 }
2552
2553 uint32_t helper_efscfd (uint64_t val)
2554 {
2555 CPU_DoubleU u1;
2556 CPU_FloatU u2;
2557
2558 u1.ll = val;
2559 u2.f = float64_to_float32(u1.d, &env->spe_status);
2560
2561 return u2.l;
2562 }
2563
2564 uint64_t helper_efdcfs (uint32_t val)
2565 {
2566 CPU_DoubleU u2;
2567 CPU_FloatU u1;
2568
2569 u1.l = val;
2570 u2.d = float32_to_float64(u1.f, &env->spe_status);
2571
2572 return u2.ll;
2573 }
2574
2575 /* Double precision fixed-point arithmetic */
2576 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2577 {
2578 CPU_DoubleU u1, u2;
2579 u1.ll = op1;
2580 u2.ll = op2;
2581 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2582 return u1.ll;
2583 }
2584
2585 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2586 {
2587 CPU_DoubleU u1, u2;
2588 u1.ll = op1;
2589 u2.ll = op2;
2590 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2591 return u1.ll;
2592 }
2593
2594 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2595 {
2596 CPU_DoubleU u1, u2;
2597 u1.ll = op1;
2598 u2.ll = op2;
2599 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2600 return u1.ll;
2601 }
2602
2603 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2604 {
2605 CPU_DoubleU u1, u2;
2606 u1.ll = op1;
2607 u2.ll = op2;
2608 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2609 return u1.ll;
2610 }
2611
2612 /* Double precision floating point helpers */
2613 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2614 {
2615 CPU_DoubleU u1, u2;
2616 u1.ll = op1;
2617 u2.ll = op2;
2618 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2619 }
2620
2621 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2622 {
2623 CPU_DoubleU u1, u2;
2624 u1.ll = op1;
2625 u2.ll = op2;
2626 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2627 }
2628
2629 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2630 {
2631 CPU_DoubleU u1, u2;
2632 u1.ll = op1;
2633 u2.ll = op2;
2634 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2635 }
2636
2637 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2638 {
2639 /* XXX: TODO: test special values (NaN, infinites, ...) */
2640 return helper_efdtstlt(op1, op2);
2641 }
2642
2643 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2644 {
2645 /* XXX: TODO: test special values (NaN, infinites, ...) */
2646 return helper_efdtstgt(op1, op2);
2647 }
2648
2649 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2650 {
2651 /* XXX: TODO: test special values (NaN, infinites, ...) */
2652 return helper_efdtsteq(op1, op2);
2653 }
2654
2655 /*****************************************************************************/
2656 /* Softmmu support */
2657 #if !defined (CONFIG_USER_ONLY)
2658
2659 #define MMUSUFFIX _mmu
2660
2661 #define SHIFT 0
2662 #include "softmmu_template.h"
2663
2664 #define SHIFT 1
2665 #include "softmmu_template.h"
2666
2667 #define SHIFT 2
2668 #include "softmmu_template.h"
2669
2670 #define SHIFT 3
2671 #include "softmmu_template.h"
2672
2673 /* try to fill the TLB and return an exception if error. If retaddr is
2674 NULL, it means that the function was called in C code (i.e. not
2675 from generated code or from helper.c) */
2676 /* XXX: fix it to restore all registers */
2677 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2678 {
2679 TranslationBlock *tb;
2680 CPUState *saved_env;
2681 unsigned long pc;
2682 int ret;
2683
2684 /* XXX: hack to restore env in all cases, even if not called from
2685 generated code */
2686 saved_env = env;
2687 env = cpu_single_env;
2688 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2689 if (unlikely(ret != 0)) {
2690 if (likely(retaddr)) {
2691 /* now we have a real cpu fault */
2692 pc = (unsigned long)retaddr;
2693 tb = tb_find_pc(pc);
2694 if (likely(tb)) {
2695 /* the PC is inside the translated code. It means that we have
2696 a virtual CPU fault */
2697 cpu_restore_state(tb, env, pc, NULL);
2698 }
2699 }
2700 raise_exception_err(env, env->exception_index, env->error_code);
2701 }
2702 env = saved_env;
2703 }
2704
2705 /* Software driven TLBs management */
2706 /* PowerPC 602/603 software TLB load instructions helpers */
2707 static void helper_load_6xx_tlb (target_ulong new_EPN, int is_code)
2708 {
2709 target_ulong RPN, CMP, EPN;
2710 int way;
2711
2712 RPN = env->spr[SPR_RPA];
2713 if (is_code) {
2714 CMP = env->spr[SPR_ICMP];
2715 EPN = env->spr[SPR_IMISS];
2716 } else {
2717 CMP = env->spr[SPR_DCMP];
2718 EPN = env->spr[SPR_DMISS];
2719 }
2720 way = (env->spr[SPR_SRR1] >> 17) & 1;
2721 #if defined (DEBUG_SOFTWARE_TLB)
2722 if (loglevel != 0) {
2723 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2724 " PTE1 " ADDRX " way %d\n",
2725 __func__, T0, EPN, CMP, RPN, way);
2726 }
2727 #endif
2728 /* Store this TLB */
2729 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2730 way, is_code, CMP, RPN);
2731 }
2732
2733 void helper_load_6xx_tlbd (target_ulong EPN)
2734 {
2735 helper_load_6xx_tlb(EPN, 0);
2736 }
2737
2738 void helper_load_6xx_tlbi (target_ulong EPN)
2739 {
2740 helper_load_6xx_tlb(EPN, 1);
2741 }
2742
2743 /* PowerPC 74xx software TLB load instructions helpers */
2744 static void helper_load_74xx_tlb (target_ulong new_EPN, int is_code)
2745 {
2746 target_ulong RPN, CMP, EPN;
2747 int way;
2748
2749 RPN = env->spr[SPR_PTELO];
2750 CMP = env->spr[SPR_PTEHI];
2751 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2752 way = env->spr[SPR_TLBMISS] & 0x3;
2753 #if defined (DEBUG_SOFTWARE_TLB)
2754 if (loglevel != 0) {
2755 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2756 " PTE1 " ADDRX " way %d\n",
2757 __func__, T0, EPN, CMP, RPN, way);
2758 }
2759 #endif
2760 /* Store this TLB */
2761 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2762 way, is_code, CMP, RPN);
2763 }
2764
2765 void helper_load_74xx_tlbd (target_ulong EPN)
2766 {
2767 helper_load_74xx_tlb(EPN, 0);
2768 }
2769
2770 void helper_load_74xx_tlbi (target_ulong EPN)
2771 {
2772 helper_load_74xx_tlb(EPN, 1);
2773 }
2774
2775 static always_inline target_ulong booke_tlb_to_page_size (int size)
2776 {
2777 return 1024 << (2 * size);
2778 }
2779
2780 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2781 {
2782 int size;
2783
2784 switch (page_size) {
2785 case 0x00000400UL:
2786 size = 0x0;
2787 break;
2788 case 0x00001000UL:
2789 size = 0x1;
2790 break;
2791 case 0x00004000UL:
2792 size = 0x2;
2793 break;
2794 case 0x00010000UL:
2795 size = 0x3;
2796 break;
2797 case 0x00040000UL:
2798 size = 0x4;
2799 break;
2800 case 0x00100000UL:
2801 size = 0x5;
2802 break;
2803 case 0x00400000UL:
2804 size = 0x6;
2805 break;
2806 case 0x01000000UL:
2807 size = 0x7;
2808 break;
2809 case 0x04000000UL:
2810 size = 0x8;
2811 break;
2812 case 0x10000000UL:
2813 size = 0x9;
2814 break;
2815 case 0x40000000UL:
2816 size = 0xA;
2817 break;
2818 #if defined (TARGET_PPC64)
2819 case 0x000100000000ULL:
2820 size = 0xB;
2821 break;
2822 case 0x000400000000ULL:
2823 size = 0xC;
2824 break;
2825 case 0x001000000000ULL:
2826 size = 0xD;
2827 break;
2828 case 0x004000000000ULL:
2829 size = 0xE;
2830 break;
2831 case 0x010000000000ULL:
2832 size = 0xF;
2833 break;
2834 #endif
2835 default:
2836 size = -1;
2837 break;
2838 }
2839
2840 return size;
2841 }
2842
2843 /* Helpers for 4xx TLB management */
2844 void do_4xx_tlbre_lo (void)
2845 {
2846 ppcemb_tlb_t *tlb;
2847 int size;
2848
2849 T0 &= 0x3F;
2850 tlb = &env->tlb[T0].tlbe;
2851 T0 = tlb->EPN;
2852 if (tlb->prot & PAGE_VALID)
2853 T0 |= 0x400;
2854 size = booke_page_size_to_tlb(tlb->size);
2855 if (size < 0 || size > 0x7)
2856 size = 1;
2857 T0 |= size << 7;
2858 env->spr[SPR_40x_PID] = tlb->PID;
2859 }
2860
2861 void do_4xx_tlbre_hi (void)
2862 {
2863 ppcemb_tlb_t *tlb;
2864
2865 T0 &= 0x3F;
2866 tlb = &env->tlb[T0].tlbe;
2867 T0 = tlb->RPN;
2868 if (tlb->prot & PAGE_EXEC)
2869 T0 |= 0x200;
2870 if (tlb->prot & PAGE_WRITE)
2871 T0 |= 0x100;
2872 }
2873
2874 void do_4xx_tlbwe_hi (void)
2875 {
2876 ppcemb_tlb_t *tlb;
2877 target_ulong page, end;
2878
2879 #if defined (DEBUG_SOFTWARE_TLB)
2880 if (loglevel != 0) {
2881 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2882 }
2883 #endif
2884 T0 &= 0x3F;
2885 tlb = &env->tlb[T0].tlbe;
2886 /* Invalidate previous TLB (if it's valid) */
2887 if (tlb->prot & PAGE_VALID) {
2888 end = tlb->EPN + tlb->size;
2889 #if defined (DEBUG_SOFTWARE_TLB)
2890 if (loglevel != 0) {
2891 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2892 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2893 }
2894 #endif
2895 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2896 tlb_flush_page(env, page);
2897 }
2898 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2899 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2900 * If this ever occurs, one should use the ppcemb target instead
2901 * of the ppc or ppc64 one
2902 */
2903 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2904 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2905 "are not supported (%d)\n",
2906 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2907 }
2908 tlb->EPN = T1 & ~(tlb->size - 1);
2909 if (T1 & 0x40)
2910 tlb->prot |= PAGE_VALID;
2911 else
2912 tlb->prot &= ~PAGE_VALID;
2913 if (T1 & 0x20) {
2914 /* XXX: TO BE FIXED */
2915 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2916 }
2917 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2918 tlb->attr = T1 & 0xFF;
2919 #if defined (DEBUG_SOFTWARE_TLB)
2920 if (loglevel != 0) {
2921 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2922 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2923 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2924 tlb->prot & PAGE_READ ? 'r' : '-',
2925 tlb->prot & PAGE_WRITE ? 'w' : '-',
2926 tlb->prot & PAGE_EXEC ? 'x' : '-',
2927 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2928 }
2929 #endif
2930 /* Invalidate new TLB (if valid) */
2931 if (tlb->prot & PAGE_VALID) {
2932 end = tlb->EPN + tlb->size;
2933 #if defined (DEBUG_SOFTWARE_TLB)
2934 if (loglevel != 0) {
2935 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2936 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2937 }
2938 #endif
2939 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2940 tlb_flush_page(env, page);
2941 }
2942 }
2943
2944 void do_4xx_tlbwe_lo (void)
2945 {
2946 ppcemb_tlb_t *tlb;
2947
2948 #if defined (DEBUG_SOFTWARE_TLB)
2949 if (loglevel != 0) {
2950 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2951 }
2952 #endif
2953 T0 &= 0x3F;
2954 tlb = &env->tlb[T0].tlbe;
2955 tlb->RPN = T1 & 0xFFFFFC00;
2956 tlb->prot = PAGE_READ;
2957 if (T1 & 0x200)
2958 tlb->prot |= PAGE_EXEC;
2959 if (T1 & 0x100)
2960 tlb->prot |= PAGE_WRITE;
2961 #if defined (DEBUG_SOFTWARE_TLB)
2962 if (loglevel != 0) {
2963 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2964 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2965 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2966 tlb->prot & PAGE_READ ? 'r' : '-',
2967 tlb->prot & PAGE_WRITE ? 'w' : '-',
2968 tlb->prot & PAGE_EXEC ? 'x' : '-',
2969 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2970 }
2971 #endif
2972 }
2973
2974 /* PowerPC 440 TLB management */
2975 void do_440_tlbwe (int word)
2976 {
2977 ppcemb_tlb_t *tlb;
2978 target_ulong EPN, RPN, size;
2979 int do_flush_tlbs;
2980
2981 #if defined (DEBUG_SOFTWARE_TLB)
2982 if (loglevel != 0) {
2983 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2984 __func__, word, T0, T1);
2985 }
2986 #endif
2987 do_flush_tlbs = 0;
2988 T0 &= 0x3F;
2989 tlb = &env->tlb[T0].tlbe;
2990 switch (word) {
2991 default:
2992 /* Just here to please gcc */
2993 case 0:
2994 EPN = T1 & 0xFFFFFC00;
2995 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2996 do_flush_tlbs = 1;
2997 tlb->EPN = EPN;
2998 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2999 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3000 do_flush_tlbs = 1;
3001 tlb->size = size;
3002 tlb->attr &= ~0x1;
3003 tlb->attr |= (T1 >> 8) & 1;
3004 if (T1 & 0x200) {
3005 tlb->prot |= PAGE_VALID;
3006 } else {
3007 if (tlb->prot & PAGE_VALID) {
3008 tlb->prot &= ~PAGE_VALID;
3009 do_flush_tlbs = 1;
3010 }
3011 }
3012 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3013 if (do_flush_tlbs)
3014 tlb_flush(env, 1);
3015 break;
3016 case 1:
3017 RPN = T1 & 0xFFFFFC0F;
3018 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3019 tlb_flush(env, 1);
3020 tlb->RPN = RPN;
3021 break;
3022 case 2:
3023 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
3024 tlb->prot = tlb->prot & PAGE_VALID;
3025 if (T1 & 0x1)
3026 tlb->prot |= PAGE_READ << 4;
3027 if (T1 & 0x2)
3028 tlb->prot |= PAGE_WRITE << 4;
3029 if (T1 & 0x4)
3030 tlb->prot |= PAGE_EXEC << 4;
3031 if (T1 & 0x8)
3032 tlb->prot |= PAGE_READ;
3033 if (T1 & 0x10)
3034 tlb->prot |= PAGE_WRITE;
3035 if (T1 & 0x20)
3036 tlb->prot |= PAGE_EXEC;
3037 break;
3038 }
3039 }
3040
3041 void do_440_tlbre (int word)
3042 {
3043 ppcemb_tlb_t *tlb;
3044 int size;
3045
3046 T0 &= 0x3F;
3047 tlb = &env->tlb[T0].tlbe;
3048 switch (word) {
3049 default:
3050 /* Just here to please gcc */
3051 case 0:
3052 T0 = tlb->EPN;
3053 size = booke_page_size_to_tlb(tlb->size);
3054 if (size < 0 || size > 0xF)
3055 size = 1;
3056 T0 |= size << 4;
3057 if (tlb->attr & 0x1)
3058 T0 |= 0x100;
3059 if (tlb->prot & PAGE_VALID)
3060 T0 |= 0x200;
3061 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3062 env->spr[SPR_440_MMUCR] |= tlb->PID;
3063 break;
3064 case 1:
3065 T0 = tlb->RPN;
3066 break;
3067 case 2:
3068 T0 = tlb->attr & ~0x1;
3069 if (tlb->prot & (PAGE_READ << 4))
3070 T0 |= 0x1;
3071 if (tlb->prot & (PAGE_WRITE << 4))
3072 T0 |= 0x2;
3073 if (tlb->prot & (PAGE_EXEC << 4))
3074 T0 |= 0x4;
3075 if (tlb->prot & PAGE_READ)
3076 T0 |= 0x8;
3077 if (tlb->prot & PAGE_WRITE)
3078 T0 |= 0x10;
3079 if (tlb->prot & PAGE_EXEC)
3080 T0 |= 0x20;
3081 break;
3082 }
3083 }
3084 #endif /* !CONFIG_USER_ONLY */