]> git.proxmox.com Git - qemu.git/blob - target-ppc/op_helper.c
target-ppc: convert SLB/TLB instructions to TCG
[qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "helper.h"
23
24 #include "helper_regs.h"
25 #include "op_helper.h"
26
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35 {
36 raise_exception_err(env, exception, error_code);
37 }
38
39 void helper_raise_debug (void)
40 {
41 raise_exception(env, EXCP_DEBUG);
42 }
43
44 /*****************************************************************************/
45 /* Registers load and stores */
46 target_ulong helper_load_cr (void)
47 {
48 return (env->crf[0] << 28) |
49 (env->crf[1] << 24) |
50 (env->crf[2] << 20) |
51 (env->crf[3] << 16) |
52 (env->crf[4] << 12) |
53 (env->crf[5] << 8) |
54 (env->crf[6] << 4) |
55 (env->crf[7] << 0);
56 }
57
58 void helper_store_cr (target_ulong val, uint32_t mask)
59 {
60 int i, sh;
61
62 for (i = 0, sh = 7; i < 8; i++, sh--) {
63 if (mask & (1 << sh))
64 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
65 }
66 }
67
68 #if defined(TARGET_PPC64)
69 void do_store_pri (int prio)
70 {
71 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
72 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
73 }
74 #endif
75
76 target_ulong ppc_load_dump_spr (int sprn)
77 {
78 if (loglevel != 0) {
79 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
80 sprn, sprn, env->spr[sprn]);
81 }
82
83 return env->spr[sprn];
84 }
85
86 void ppc_store_dump_spr (int sprn, target_ulong val)
87 {
88 if (loglevel != 0) {
89 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
90 sprn, sprn, env->spr[sprn], val);
91 }
92 env->spr[sprn] = val;
93 }
94
95 /*****************************************************************************/
96 /* Memory load and stores */
97
98 static always_inline target_ulong get_addr(target_ulong addr)
99 {
100 #if defined(TARGET_PPC64)
101 if (msr_sf)
102 return addr;
103 else
104 #endif
105 return (uint32_t)addr;
106 }
107
108 void helper_lmw (target_ulong addr, uint32_t reg)
109 {
110 for (; reg < 32; reg++, addr += 4) {
111 if (msr_le)
112 env->gpr[reg] = bswap32(ldl(get_addr(addr)));
113 else
114 env->gpr[reg] = ldl(get_addr(addr));
115 }
116 }
117
118 void helper_stmw (target_ulong addr, uint32_t reg)
119 {
120 for (; reg < 32; reg++, addr += 4) {
121 if (msr_le)
122 stl(get_addr(addr), bswap32((uint32_t)env->gpr[reg]));
123 else
124 stl(get_addr(addr), (uint32_t)env->gpr[reg]);
125 }
126 }
127
128 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
129 {
130 int sh;
131 for (; nb > 3; nb -= 4, addr += 4) {
132 env->gpr[reg] = ldl(get_addr(addr));
133 reg = (reg + 1) % 32;
134 }
135 if (unlikely(nb > 0)) {
136 env->gpr[reg] = 0;
137 for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
138 env->gpr[reg] |= ldub(get_addr(addr)) << sh;
139 }
140 }
141 }
142 /* PPC32 specification says we must generate an exception if
143 * rA is in the range of registers to be loaded.
144 * In an other hand, IBM says this is valid, but rA won't be loaded.
145 * For now, I'll follow the spec...
146 */
147 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
148 {
149 if (likely(xer_bc != 0)) {
150 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
151 (reg < rb && (reg + xer_bc) > rb))) {
152 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
153 POWERPC_EXCP_INVAL |
154 POWERPC_EXCP_INVAL_LSWX);
155 } else {
156 helper_lsw(addr, xer_bc, reg);
157 }
158 }
159 }
160
161 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
162 {
163 int sh;
164 for (; nb > 3; nb -= 4, addr += 4) {
165 stl(get_addr(addr), env->gpr[reg]);
166 reg = (reg + 1) % 32;
167 }
168 if (unlikely(nb > 0)) {
169 for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
170 stb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
171 }
172 }
173
174 static void do_dcbz(target_ulong addr, int dcache_line_size)
175 {
176 target_long mask = get_addr(~(dcache_line_size - 1));
177 int i;
178 addr &= mask;
179 for (i = 0 ; i < dcache_line_size ; i += 4) {
180 stl(addr + i , 0);
181 }
182 if ((env->reserve & mask) == addr)
183 env->reserve = (target_ulong)-1ULL;
184 }
185
186 void helper_dcbz(target_ulong addr)
187 {
188 do_dcbz(addr, env->dcache_line_size);
189 }
190
191 void helper_dcbz_970(target_ulong addr)
192 {
193 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
194 do_dcbz(addr, 32);
195 else
196 do_dcbz(addr, env->dcache_line_size);
197 }
198
199 void helper_icbi(target_ulong addr)
200 {
201 uint32_t tmp;
202
203 addr = get_addr(addr & ~(env->dcache_line_size - 1));
204 /* Invalidate one cache line :
205 * PowerPC specification says this is to be treated like a load
206 * (not a fetch) by the MMU. To be sure it will be so,
207 * do the load "by hand".
208 */
209 tmp = ldl(addr);
210 tb_invalidate_page_range(addr, addr + env->icache_line_size);
211 }
212
213 // XXX: to be tested
214 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
215 {
216 int i, c, d;
217 d = 24;
218 for (i = 0; i < xer_bc; i++) {
219 c = ldub((uint32_t)addr++);
220 /* ra (if not 0) and rb are never modified */
221 if (likely(reg != rb && (ra == 0 || reg != ra))) {
222 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
223 }
224 if (unlikely(c == xer_cmp))
225 break;
226 if (likely(d != 0)) {
227 d -= 8;
228 } else {
229 d = 24;
230 reg++;
231 reg = reg & 0x1F;
232 }
233 }
234 return i;
235 }
236
237 /*****************************************************************************/
238 /* Fixed point operations helpers */
239 #if defined(TARGET_PPC64)
240
241 /* multiply high word */
242 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
243 {
244 uint64_t tl, th;
245
246 muls64(&tl, &th, arg1, arg2);
247 return th;
248 }
249
250 /* multiply high word unsigned */
251 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
252 {
253 uint64_t tl, th;
254
255 mulu64(&tl, &th, arg1, arg2);
256 return th;
257 }
258
259 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
260 {
261 int64_t th;
262 uint64_t tl;
263
264 muls64(&tl, (uint64_t *)&th, arg1, arg2);
265 /* If th != 0 && th != -1, then we had an overflow */
266 if (likely((uint64_t)(th + 1) <= 1)) {
267 env->xer &= ~(1 << XER_OV);
268 } else {
269 env->xer |= (1 << XER_OV) | (1 << XER_SO);
270 }
271 return (int64_t)tl;
272 }
273 #endif
274
275 target_ulong helper_cntlzw (target_ulong t)
276 {
277 return clz32(t);
278 }
279
280 #if defined(TARGET_PPC64)
281 target_ulong helper_cntlzd (target_ulong t)
282 {
283 return clz64(t);
284 }
285 #endif
286
287 /* shift right arithmetic helper */
288 target_ulong helper_sraw (target_ulong value, target_ulong shift)
289 {
290 int32_t ret;
291
292 if (likely(!(shift & 0x20))) {
293 if (likely((uint32_t)shift != 0)) {
294 shift &= 0x1f;
295 ret = (int32_t)value >> shift;
296 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
297 env->xer &= ~(1 << XER_CA);
298 } else {
299 env->xer |= (1 << XER_CA);
300 }
301 } else {
302 ret = (int32_t)value;
303 env->xer &= ~(1 << XER_CA);
304 }
305 } else {
306 ret = (int32_t)value >> 31;
307 if (ret) {
308 env->xer |= (1 << XER_CA);
309 } else {
310 env->xer &= ~(1 << XER_CA);
311 }
312 }
313 return (target_long)ret;
314 }
315
316 #if defined(TARGET_PPC64)
317 target_ulong helper_srad (target_ulong value, target_ulong shift)
318 {
319 int64_t ret;
320
321 if (likely(!(shift & 0x40))) {
322 if (likely((uint64_t)shift != 0)) {
323 shift &= 0x3f;
324 ret = (int64_t)value >> shift;
325 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
326 env->xer &= ~(1 << XER_CA);
327 } else {
328 env->xer |= (1 << XER_CA);
329 }
330 } else {
331 ret = (int64_t)value;
332 env->xer &= ~(1 << XER_CA);
333 }
334 } else {
335 ret = (int64_t)value >> 63;
336 if (ret) {
337 env->xer |= (1 << XER_CA);
338 } else {
339 env->xer &= ~(1 << XER_CA);
340 }
341 }
342 return ret;
343 }
344 #endif
345
346 target_ulong helper_popcntb (target_ulong val)
347 {
348 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
349 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
350 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
351 return val;
352 }
353
354 #if defined(TARGET_PPC64)
355 target_ulong helper_popcntb_64 (target_ulong val)
356 {
357 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
358 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
359 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
360 return val;
361 }
362 #endif
363
364 /*****************************************************************************/
365 /* Floating point operations helpers */
366 uint64_t helper_float32_to_float64(uint32_t arg)
367 {
368 CPU_FloatU f;
369 CPU_DoubleU d;
370 f.l = arg;
371 d.d = float32_to_float64(f.f, &env->fp_status);
372 return d.ll;
373 }
374
375 uint32_t helper_float64_to_float32(uint64_t arg)
376 {
377 CPU_FloatU f;
378 CPU_DoubleU d;
379 d.ll = arg;
380 f.f = float64_to_float32(d.d, &env->fp_status);
381 return f.l;
382 }
383
384 static always_inline int fpisneg (float64 d)
385 {
386 CPU_DoubleU u;
387
388 u.d = d;
389
390 return u.ll >> 63 != 0;
391 }
392
393 static always_inline int isden (float64 d)
394 {
395 CPU_DoubleU u;
396
397 u.d = d;
398
399 return ((u.ll >> 52) & 0x7FF) == 0;
400 }
401
402 static always_inline int iszero (float64 d)
403 {
404 CPU_DoubleU u;
405
406 u.d = d;
407
408 return (u.ll & ~0x8000000000000000ULL) == 0;
409 }
410
411 static always_inline int isinfinity (float64 d)
412 {
413 CPU_DoubleU u;
414
415 u.d = d;
416
417 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
418 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
419 }
420
421 #ifdef CONFIG_SOFTFLOAT
422 static always_inline int isfinite (float64 d)
423 {
424 CPU_DoubleU u;
425
426 u.d = d;
427
428 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
429 }
430
431 static always_inline int isnormal (float64 d)
432 {
433 CPU_DoubleU u;
434
435 u.d = d;
436
437 uint32_t exp = (u.ll >> 52) & 0x7FF;
438 return ((0 < exp) && (exp < 0x7FF));
439 }
440 #endif
441
442 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
443 {
444 CPU_DoubleU farg;
445 int isneg;
446 int ret;
447 farg.ll = arg;
448 isneg = fpisneg(farg.d);
449 if (unlikely(float64_is_nan(farg.d))) {
450 if (float64_is_signaling_nan(farg.d)) {
451 /* Signaling NaN: flags are undefined */
452 ret = 0x00;
453 } else {
454 /* Quiet NaN */
455 ret = 0x11;
456 }
457 } else if (unlikely(isinfinity(farg.d))) {
458 /* +/- infinity */
459 if (isneg)
460 ret = 0x09;
461 else
462 ret = 0x05;
463 } else {
464 if (iszero(farg.d)) {
465 /* +/- zero */
466 if (isneg)
467 ret = 0x12;
468 else
469 ret = 0x02;
470 } else {
471 if (isden(farg.d)) {
472 /* Denormalized numbers */
473 ret = 0x10;
474 } else {
475 /* Normalized numbers */
476 ret = 0x00;
477 }
478 if (isneg) {
479 ret |= 0x08;
480 } else {
481 ret |= 0x04;
482 }
483 }
484 }
485 if (set_fprf) {
486 /* We update FPSCR_FPRF */
487 env->fpscr &= ~(0x1F << FPSCR_FPRF);
488 env->fpscr |= ret << FPSCR_FPRF;
489 }
490 /* We just need fpcc to update Rc1 */
491 return ret & 0xF;
492 }
493
494 /* Floating-point invalid operations exception */
495 static always_inline uint64_t fload_invalid_op_excp (int op)
496 {
497 uint64_t ret = 0;
498 int ve;
499
500 ve = fpscr_ve;
501 if (op & POWERPC_EXCP_FP_VXSNAN) {
502 /* Operation on signaling NaN */
503 env->fpscr |= 1 << FPSCR_VXSNAN;
504 }
505 if (op & POWERPC_EXCP_FP_VXSOFT) {
506 /* Software-defined condition */
507 env->fpscr |= 1 << FPSCR_VXSOFT;
508 }
509 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
510 case POWERPC_EXCP_FP_VXISI:
511 /* Magnitude subtraction of infinities */
512 env->fpscr |= 1 << FPSCR_VXISI;
513 goto update_arith;
514 case POWERPC_EXCP_FP_VXIDI:
515 /* Division of infinity by infinity */
516 env->fpscr |= 1 << FPSCR_VXIDI;
517 goto update_arith;
518 case POWERPC_EXCP_FP_VXZDZ:
519 /* Division of zero by zero */
520 env->fpscr |= 1 << FPSCR_VXZDZ;
521 goto update_arith;
522 case POWERPC_EXCP_FP_VXIMZ:
523 /* Multiplication of zero by infinity */
524 env->fpscr |= 1 << FPSCR_VXIMZ;
525 goto update_arith;
526 case POWERPC_EXCP_FP_VXVC:
527 /* Ordered comparison of NaN */
528 env->fpscr |= 1 << FPSCR_VXVC;
529 env->fpscr &= ~(0xF << FPSCR_FPCC);
530 env->fpscr |= 0x11 << FPSCR_FPCC;
531 /* We must update the target FPR before raising the exception */
532 if (ve != 0) {
533 env->exception_index = POWERPC_EXCP_PROGRAM;
534 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
535 /* Update the floating-point enabled exception summary */
536 env->fpscr |= 1 << FPSCR_FEX;
537 /* Exception is differed */
538 ve = 0;
539 }
540 break;
541 case POWERPC_EXCP_FP_VXSQRT:
542 /* Square root of a negative number */
543 env->fpscr |= 1 << FPSCR_VXSQRT;
544 update_arith:
545 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
546 if (ve == 0) {
547 /* Set the result to quiet NaN */
548 ret = UINT64_MAX;
549 env->fpscr &= ~(0xF << FPSCR_FPCC);
550 env->fpscr |= 0x11 << FPSCR_FPCC;
551 }
552 break;
553 case POWERPC_EXCP_FP_VXCVI:
554 /* Invalid conversion */
555 env->fpscr |= 1 << FPSCR_VXCVI;
556 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
557 if (ve == 0) {
558 /* Set the result to quiet NaN */
559 ret = UINT64_MAX;
560 env->fpscr &= ~(0xF << FPSCR_FPCC);
561 env->fpscr |= 0x11 << FPSCR_FPCC;
562 }
563 break;
564 }
565 /* Update the floating-point invalid operation summary */
566 env->fpscr |= 1 << FPSCR_VX;
567 /* Update the floating-point exception summary */
568 env->fpscr |= 1 << FPSCR_FX;
569 if (ve != 0) {
570 /* Update the floating-point enabled exception summary */
571 env->fpscr |= 1 << FPSCR_FEX;
572 if (msr_fe0 != 0 || msr_fe1 != 0)
573 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
574 }
575 return ret;
576 }
577
578 static always_inline uint64_t float_zero_divide_excp (uint64_t arg1, uint64_t arg2)
579 {
580 env->fpscr |= 1 << FPSCR_ZX;
581 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
582 /* Update the floating-point exception summary */
583 env->fpscr |= 1 << FPSCR_FX;
584 if (fpscr_ze != 0) {
585 /* Update the floating-point enabled exception summary */
586 env->fpscr |= 1 << FPSCR_FEX;
587 if (msr_fe0 != 0 || msr_fe1 != 0) {
588 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
589 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
590 }
591 } else {
592 /* Set the result to infinity */
593 arg1 = ((arg1 ^ arg2) & 0x8000000000000000ULL);
594 arg1 |= 0x7FFULL << 52;
595 }
596 return arg1;
597 }
598
599 static always_inline void float_overflow_excp (void)
600 {
601 env->fpscr |= 1 << FPSCR_OX;
602 /* Update the floating-point exception summary */
603 env->fpscr |= 1 << FPSCR_FX;
604 if (fpscr_oe != 0) {
605 /* XXX: should adjust the result */
606 /* Update the floating-point enabled exception summary */
607 env->fpscr |= 1 << FPSCR_FEX;
608 /* We must update the target FPR before raising the exception */
609 env->exception_index = POWERPC_EXCP_PROGRAM;
610 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
611 } else {
612 env->fpscr |= 1 << FPSCR_XX;
613 env->fpscr |= 1 << FPSCR_FI;
614 }
615 }
616
617 static always_inline void float_underflow_excp (void)
618 {
619 env->fpscr |= 1 << FPSCR_UX;
620 /* Update the floating-point exception summary */
621 env->fpscr |= 1 << FPSCR_FX;
622 if (fpscr_ue != 0) {
623 /* XXX: should adjust the result */
624 /* Update the floating-point enabled exception summary */
625 env->fpscr |= 1 << FPSCR_FEX;
626 /* We must update the target FPR before raising the exception */
627 env->exception_index = POWERPC_EXCP_PROGRAM;
628 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
629 }
630 }
631
632 static always_inline void float_inexact_excp (void)
633 {
634 env->fpscr |= 1 << FPSCR_XX;
635 /* Update the floating-point exception summary */
636 env->fpscr |= 1 << FPSCR_FX;
637 if (fpscr_xe != 0) {
638 /* Update the floating-point enabled exception summary */
639 env->fpscr |= 1 << FPSCR_FEX;
640 /* We must update the target FPR before raising the exception */
641 env->exception_index = POWERPC_EXCP_PROGRAM;
642 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
643 }
644 }
645
646 static always_inline void fpscr_set_rounding_mode (void)
647 {
648 int rnd_type;
649
650 /* Set rounding mode */
651 switch (fpscr_rn) {
652 case 0:
653 /* Best approximation (round to nearest) */
654 rnd_type = float_round_nearest_even;
655 break;
656 case 1:
657 /* Smaller magnitude (round toward zero) */
658 rnd_type = float_round_to_zero;
659 break;
660 case 2:
661 /* Round toward +infinite */
662 rnd_type = float_round_up;
663 break;
664 default:
665 case 3:
666 /* Round toward -infinite */
667 rnd_type = float_round_down;
668 break;
669 }
670 set_float_rounding_mode(rnd_type, &env->fp_status);
671 }
672
673 void helper_fpscr_setbit (uint32_t bit)
674 {
675 int prev;
676
677 prev = (env->fpscr >> bit) & 1;
678 env->fpscr |= 1 << bit;
679 if (prev == 0) {
680 switch (bit) {
681 case FPSCR_VX:
682 env->fpscr |= 1 << FPSCR_FX;
683 if (fpscr_ve)
684 goto raise_ve;
685 case FPSCR_OX:
686 env->fpscr |= 1 << FPSCR_FX;
687 if (fpscr_oe)
688 goto raise_oe;
689 break;
690 case FPSCR_UX:
691 env->fpscr |= 1 << FPSCR_FX;
692 if (fpscr_ue)
693 goto raise_ue;
694 break;
695 case FPSCR_ZX:
696 env->fpscr |= 1 << FPSCR_FX;
697 if (fpscr_ze)
698 goto raise_ze;
699 break;
700 case FPSCR_XX:
701 env->fpscr |= 1 << FPSCR_FX;
702 if (fpscr_xe)
703 goto raise_xe;
704 break;
705 case FPSCR_VXSNAN:
706 case FPSCR_VXISI:
707 case FPSCR_VXIDI:
708 case FPSCR_VXZDZ:
709 case FPSCR_VXIMZ:
710 case FPSCR_VXVC:
711 case FPSCR_VXSOFT:
712 case FPSCR_VXSQRT:
713 case FPSCR_VXCVI:
714 env->fpscr |= 1 << FPSCR_VX;
715 env->fpscr |= 1 << FPSCR_FX;
716 if (fpscr_ve != 0)
717 goto raise_ve;
718 break;
719 case FPSCR_VE:
720 if (fpscr_vx != 0) {
721 raise_ve:
722 env->error_code = POWERPC_EXCP_FP;
723 if (fpscr_vxsnan)
724 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
725 if (fpscr_vxisi)
726 env->error_code |= POWERPC_EXCP_FP_VXISI;
727 if (fpscr_vxidi)
728 env->error_code |= POWERPC_EXCP_FP_VXIDI;
729 if (fpscr_vxzdz)
730 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
731 if (fpscr_vximz)
732 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
733 if (fpscr_vxvc)
734 env->error_code |= POWERPC_EXCP_FP_VXVC;
735 if (fpscr_vxsoft)
736 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
737 if (fpscr_vxsqrt)
738 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
739 if (fpscr_vxcvi)
740 env->error_code |= POWERPC_EXCP_FP_VXCVI;
741 goto raise_excp;
742 }
743 break;
744 case FPSCR_OE:
745 if (fpscr_ox != 0) {
746 raise_oe:
747 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
748 goto raise_excp;
749 }
750 break;
751 case FPSCR_UE:
752 if (fpscr_ux != 0) {
753 raise_ue:
754 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
755 goto raise_excp;
756 }
757 break;
758 case FPSCR_ZE:
759 if (fpscr_zx != 0) {
760 raise_ze:
761 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
762 goto raise_excp;
763 }
764 break;
765 case FPSCR_XE:
766 if (fpscr_xx != 0) {
767 raise_xe:
768 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
769 goto raise_excp;
770 }
771 break;
772 case FPSCR_RN1:
773 case FPSCR_RN:
774 fpscr_set_rounding_mode();
775 break;
776 default:
777 break;
778 raise_excp:
779 /* Update the floating-point enabled exception summary */
780 env->fpscr |= 1 << FPSCR_FEX;
781 /* We have to update Rc1 before raising the exception */
782 env->exception_index = POWERPC_EXCP_PROGRAM;
783 break;
784 }
785 }
786 }
787
788 void helper_store_fpscr (uint64_t arg, uint32_t mask)
789 {
790 /*
791 * We use only the 32 LSB of the incoming fpr
792 */
793 uint32_t prev, new;
794 int i;
795
796 prev = env->fpscr;
797 new = (uint32_t)arg;
798 new &= ~0x90000000;
799 new |= prev & 0x90000000;
800 for (i = 0; i < 7; i++) {
801 if (mask & (1 << i)) {
802 env->fpscr &= ~(0xF << (4 * i));
803 env->fpscr |= new & (0xF << (4 * i));
804 }
805 }
806 /* Update VX and FEX */
807 if (fpscr_ix != 0)
808 env->fpscr |= 1 << FPSCR_VX;
809 else
810 env->fpscr &= ~(1 << FPSCR_VX);
811 if ((fpscr_ex & fpscr_eex) != 0) {
812 env->fpscr |= 1 << FPSCR_FEX;
813 env->exception_index = POWERPC_EXCP_PROGRAM;
814 /* XXX: we should compute it properly */
815 env->error_code = POWERPC_EXCP_FP;
816 }
817 else
818 env->fpscr &= ~(1 << FPSCR_FEX);
819 fpscr_set_rounding_mode();
820 }
821
822 void helper_float_check_status (void)
823 {
824 #ifdef CONFIG_SOFTFLOAT
825 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
826 (env->error_code & POWERPC_EXCP_FP)) {
827 /* Differred floating-point exception after target FPR update */
828 if (msr_fe0 != 0 || msr_fe1 != 0)
829 raise_exception_err(env, env->exception_index, env->error_code);
830 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
831 float_overflow_excp();
832 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
833 float_underflow_excp();
834 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
835 float_inexact_excp();
836 }
837 #else
838 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
839 (env->error_code & POWERPC_EXCP_FP)) {
840 /* Differred floating-point exception after target FPR update */
841 if (msr_fe0 != 0 || msr_fe1 != 0)
842 raise_exception_err(env, env->exception_index, env->error_code);
843 }
844 RETURN();
845 #endif
846 }
847
848 #ifdef CONFIG_SOFTFLOAT
849 void helper_reset_fpstatus (void)
850 {
851 env->fp_status.float_exception_flags = 0;
852 }
853 #endif
854
855 /* fadd - fadd. */
856 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
857 {
858 CPU_DoubleU farg1, farg2;
859
860 farg1.ll = arg1;
861 farg2.ll = arg2;
862 #if USE_PRECISE_EMULATION
863 if (unlikely(float64_is_signaling_nan(farg1.d) ||
864 float64_is_signaling_nan(farg2.d))) {
865 /* sNaN addition */
866 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
867 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
868 fpisneg(farg1.d) == fpisneg(farg2.d))) {
869 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
870 } else {
871 /* Magnitude subtraction of infinities */
872 farg1.ll == fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
873 }
874 #else
875 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
876 #endif
877 return farg1.ll;
878 }
879
880 /* fsub - fsub. */
881 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
882 {
883 CPU_DoubleU farg1, farg2;
884
885 farg1.ll = arg1;
886 farg2.ll = arg2;
887 #if USE_PRECISE_EMULATION
888 {
889 if (unlikely(float64_is_signaling_nan(farg1.d) ||
890 float64_is_signaling_nan(farg2.d))) {
891 /* sNaN subtraction */
892 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
893 } else if (likely(isfinite(farg1.d) || isfinite(farg2.d) ||
894 fpisneg(farg1.d) != fpisneg(farg2.d))) {
895 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
896 } else {
897 /* Magnitude subtraction of infinities */
898 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
899 }
900 }
901 #else
902 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
903 #endif
904 return farg1.ll;
905 }
906
907 /* fmul - fmul. */
908 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
909 {
910 CPU_DoubleU farg1, farg2;
911
912 farg1.ll = arg1;
913 farg2.ll = arg2;
914 #if USE_PRECISE_EMULATION
915 if (unlikely(float64_is_signaling_nan(farg1.d) ||
916 float64_is_signaling_nan(farg2.d))) {
917 /* sNaN multiplication */
918 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
919 } else if (unlikely((isinfinity(farg1.d) && iszero(farg2.d)) ||
920 (iszero(farg1.d) && isinfinity(farg2.d)))) {
921 /* Multiplication of zero by infinity */
922 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
923 } else {
924 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
925 }
926 }
927 #else
928 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
929 #endif
930 return farg1.ll;
931 }
932
933 /* fdiv - fdiv. */
934 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
935 {
936 CPU_DoubleU farg1, farg2;
937
938 farg1.ll = arg1;
939 farg2.ll = arg2;
940 #if USE_PRECISE_EMULATION
941 if (unlikely(float64_is_signaling_nan(farg1.d) ||
942 float64_is_signaling_nan(farg2.d))) {
943 /* sNaN division */
944 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
945 } else if (unlikely(isinfinity(farg1.d) && isinfinity(farg2.d))) {
946 /* Division of infinity by infinity */
947 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
948 } else if (unlikely(iszero(farg2.d))) {
949 if (iszero(farg1.d)) {
950 /* Division of zero by zero */
951 farg1.ll fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
952 } else {
953 /* Division by zero */
954 farg1.ll = float_zero_divide_excp(farg1.d, farg2.d);
955 }
956 } else {
957 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
958 }
959 #else
960 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
961 #endif
962 return farg1.ll;
963 }
964
965 /* fabs */
966 uint64_t helper_fabs (uint64_t arg)
967 {
968 CPU_DoubleU farg;
969
970 farg.ll = arg;
971 farg.d = float64_abs(farg.d);
972 return farg.ll;
973 }
974
975 /* fnabs */
976 uint64_t helper_fnabs (uint64_t arg)
977 {
978 CPU_DoubleU farg;
979
980 farg.ll = arg;
981 farg.d = float64_abs(farg.d);
982 farg.d = float64_chs(farg.d);
983 return farg.ll;
984 }
985
986 /* fneg */
987 uint64_t helper_fneg (uint64_t arg)
988 {
989 CPU_DoubleU farg;
990
991 farg.ll = arg;
992 farg.d = float64_chs(farg.d);
993 return farg.ll;
994 }
995
996 /* fctiw - fctiw. */
997 uint64_t helper_fctiw (uint64_t arg)
998 {
999 CPU_DoubleU farg;
1000 farg.ll = arg;
1001
1002 if (unlikely(float64_is_signaling_nan(farg.d))) {
1003 /* sNaN conversion */
1004 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1005 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1006 /* qNan / infinity conversion */
1007 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1008 } else {
1009 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1010 #if USE_PRECISE_EMULATION
1011 /* XXX: higher bits are not supposed to be significant.
1012 * to make tests easier, return the same as a real PowerPC 750
1013 */
1014 farg.ll |= 0xFFF80000ULL << 32;
1015 #endif
1016 }
1017 return farg.ll;
1018 }
1019
1020 /* fctiwz - fctiwz. */
1021 uint64_t helper_fctiwz (uint64_t arg)
1022 {
1023 CPU_DoubleU farg;
1024 farg.ll = arg;
1025
1026 if (unlikely(float64_is_signaling_nan(farg.d))) {
1027 /* sNaN conversion */
1028 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1029 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1030 /* qNan / infinity conversion */
1031 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1032 } else {
1033 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1034 #if USE_PRECISE_EMULATION
1035 /* XXX: higher bits are not supposed to be significant.
1036 * to make tests easier, return the same as a real PowerPC 750
1037 */
1038 farg.ll |= 0xFFF80000ULL << 32;
1039 #endif
1040 }
1041 return farg.ll;
1042 }
1043
1044 #if defined(TARGET_PPC64)
1045 /* fcfid - fcfid. */
1046 uint64_t helper_fcfid (uint64_t arg)
1047 {
1048 CPU_DoubleU farg;
1049 farg.d = int64_to_float64(arg, &env->fp_status);
1050 return farg.ll;
1051 }
1052
1053 /* fctid - fctid. */
1054 uint64_t helper_fctid (uint64_t arg)
1055 {
1056 CPU_DoubleU farg;
1057 farg.ll = arg;
1058
1059 if (unlikely(float64_is_signaling_nan(farg.d))) {
1060 /* sNaN conversion */
1061 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1062 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1063 /* qNan / infinity conversion */
1064 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1065 } else {
1066 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1067 }
1068 return farg.ll;
1069 }
1070
1071 /* fctidz - fctidz. */
1072 uint64_t helper_fctidz (uint64_t arg)
1073 {
1074 CPU_DoubleU farg;
1075 farg.ll = arg;
1076
1077 if (unlikely(float64_is_signaling_nan(farg.d))) {
1078 /* sNaN conversion */
1079 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1080 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1081 /* qNan / infinity conversion */
1082 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1083 } else {
1084 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1085 }
1086 return farg.ll;
1087 }
1088
1089 #endif
1090
1091 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1092 {
1093 CPU_DoubleU farg;
1094 farg.ll = arg;
1095
1096 if (unlikely(float64_is_signaling_nan(farg.d))) {
1097 /* sNaN round */
1098 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1099 } else if (unlikely(float64_is_nan(farg.d) || isinfinity(farg.d))) {
1100 /* qNan / infinity round */
1101 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1102 } else {
1103 set_float_rounding_mode(rounding_mode, &env->fp_status);
1104 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1105 /* Restore rounding mode from FPSCR */
1106 fpscr_set_rounding_mode();
1107 }
1108 return farg.ll;
1109 }
1110
1111 uint64_t helper_frin (uint64_t arg)
1112 {
1113 return do_fri(arg, float_round_nearest_even);
1114 }
1115
1116 uint64_t helper_friz (uint64_t arg)
1117 {
1118 return do_fri(arg, float_round_to_zero);
1119 }
1120
1121 uint64_t helper_frip (uint64_t arg)
1122 {
1123 return do_fri(arg, float_round_up);
1124 }
1125
1126 uint64_t helper_frim (uint64_t arg)
1127 {
1128 return do_fri(arg, float_round_down);
1129 }
1130
1131 /* fmadd - fmadd. */
1132 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1133 {
1134 CPU_DoubleU farg1, farg2, farg3;
1135
1136 farg1.ll = arg1;
1137 farg2.ll = arg2;
1138 farg3.ll = arg3;
1139 #if USE_PRECISE_EMULATION
1140 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1141 float64_is_signaling_nan(farg2.d) ||
1142 float64_is_signaling_nan(farg3.d))) {
1143 /* sNaN operation */
1144 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1145 } else {
1146 #ifdef FLOAT128
1147 /* This is the way the PowerPC specification defines it */
1148 float128 ft0_128, ft1_128;
1149
1150 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1151 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1152 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1153 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1154 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1155 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1156 #else
1157 /* This is OK on x86 hosts */
1158 farg1.d = (farg1.d * farg2.d) + farg3.d;
1159 #endif
1160 }
1161 #else
1162 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1163 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1164 #endif
1165 return farg1.ll;
1166 }
1167
1168 /* fmsub - fmsub. */
1169 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1170 {
1171 CPU_DoubleU farg1, farg2, farg3;
1172
1173 farg1.ll = arg1;
1174 farg2.ll = arg2;
1175 farg3.ll = arg3;
1176 #if USE_PRECISE_EMULATION
1177 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1178 float64_is_signaling_nan(farg2.d) ||
1179 float64_is_signaling_nan(farg3.d))) {
1180 /* sNaN operation */
1181 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1182 } else {
1183 #ifdef FLOAT128
1184 /* This is the way the PowerPC specification defines it */
1185 float128 ft0_128, ft1_128;
1186
1187 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1188 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1189 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1190 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1191 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1192 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1193 #else
1194 /* This is OK on x86 hosts */
1195 farg1.d = (farg1.d * farg2.d) - farg3.d;
1196 #endif
1197 }
1198 #else
1199 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1200 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1201 #endif
1202 return farg1.ll;
1203 }
1204
1205 /* fnmadd - fnmadd. */
1206 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1207 {
1208 CPU_DoubleU farg1, farg2, farg3;
1209
1210 farg1.ll = arg1;
1211 farg2.ll = arg2;
1212 farg3.ll = arg3;
1213
1214 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1215 float64_is_signaling_nan(farg2.d) ||
1216 float64_is_signaling_nan(farg3.d))) {
1217 /* sNaN operation */
1218 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1219 } else {
1220 #if USE_PRECISE_EMULATION
1221 #ifdef FLOAT128
1222 /* This is the way the PowerPC specification defines it */
1223 float128 ft0_128, ft1_128;
1224
1225 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1226 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1227 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1228 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1229 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1230 farg1.d= float128_to_float64(ft0_128, &env->fp_status);
1231 #else
1232 /* This is OK on x86 hosts */
1233 farg1.d = (farg1.d * farg2.d) + farg3.d;
1234 #endif
1235 #else
1236 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1237 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1238 #endif
1239 if (likely(!isnan(farg1.d)))
1240 farg1.d = float64_chs(farg1.d);
1241 }
1242 return farg1.ll;
1243 }
1244
1245 /* fnmsub - fnmsub. */
1246 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1247 {
1248 CPU_DoubleU farg1, farg2, farg3;
1249
1250 farg1.ll = arg1;
1251 farg2.ll = arg2;
1252 farg3.ll = arg3;
1253
1254 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1255 float64_is_signaling_nan(farg2.d) ||
1256 float64_is_signaling_nan(farg3.d))) {
1257 /* sNaN operation */
1258 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1259 } else {
1260 #if USE_PRECISE_EMULATION
1261 #ifdef FLOAT128
1262 /* This is the way the PowerPC specification defines it */
1263 float128 ft0_128, ft1_128;
1264
1265 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1266 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1267 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1268 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1269 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1270 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1271 #else
1272 /* This is OK on x86 hosts */
1273 farg1.d = (farg1.d * farg2.d) - farg3.d;
1274 #endif
1275 #else
1276 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1277 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1278 #endif
1279 if (likely(!isnan(farg1.d)))
1280 farg1.d = float64_chs(farg1.d);
1281 }
1282 return farg1.ll;
1283 }
1284
1285 /* frsp - frsp. */
1286 uint64_t helper_frsp (uint64_t arg)
1287 {
1288 CPU_DoubleU farg;
1289 farg.ll = arg;
1290
1291 #if USE_PRECISE_EMULATION
1292 if (unlikely(float64_is_signaling_nan(farg.d))) {
1293 /* sNaN square root */
1294 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1295 } else {
1296 fard.d = float64_to_float32(farg.d, &env->fp_status);
1297 }
1298 #else
1299 farg.d = float64_to_float32(farg.d, &env->fp_status);
1300 #endif
1301 return farg.ll;
1302 }
1303
1304 /* fsqrt - fsqrt. */
1305 uint64_t helper_fsqrt (uint64_t arg)
1306 {
1307 CPU_DoubleU farg;
1308 farg.ll = arg;
1309
1310 if (unlikely(float64_is_signaling_nan(farg.d))) {
1311 /* sNaN square root */
1312 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1313 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1314 /* Square root of a negative nonzero number */
1315 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1316 } else {
1317 farg.d = float64_sqrt(farg.d, &env->fp_status);
1318 }
1319 return farg.ll;
1320 }
1321
1322 /* fre - fre. */
1323 uint64_t helper_fre (uint64_t arg)
1324 {
1325 CPU_DoubleU farg;
1326 farg.ll = arg;
1327
1328 if (unlikely(float64_is_signaling_nan(farg.d))) {
1329 /* sNaN reciprocal */
1330 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1331 } else if (unlikely(iszero(farg.d))) {
1332 /* Zero reciprocal */
1333 farg.ll = float_zero_divide_excp(1.0, farg.d);
1334 } else if (likely(isnormal(farg.d))) {
1335 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1336 } else {
1337 if (farg.ll == 0x8000000000000000ULL) {
1338 farg.ll = 0xFFF0000000000000ULL;
1339 } else if (farg.ll == 0x0000000000000000ULL) {
1340 farg.ll = 0x7FF0000000000000ULL;
1341 } else if (isnan(farg.d)) {
1342 farg.ll = 0x7FF8000000000000ULL;
1343 } else if (fpisneg(farg.d)) {
1344 farg.ll = 0x8000000000000000ULL;
1345 } else {
1346 farg.ll = 0x0000000000000000ULL;
1347 }
1348 }
1349 return farg.d;
1350 }
1351
1352 /* fres - fres. */
1353 uint64_t helper_fres (uint64_t arg)
1354 {
1355 CPU_DoubleU farg;
1356 farg.ll = arg;
1357
1358 if (unlikely(float64_is_signaling_nan(farg.d))) {
1359 /* sNaN reciprocal */
1360 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1361 } else if (unlikely(iszero(farg.d))) {
1362 /* Zero reciprocal */
1363 farg.ll = float_zero_divide_excp(1.0, farg.d);
1364 } else if (likely(isnormal(farg.d))) {
1365 #if USE_PRECISE_EMULATION
1366 farg.d = float64_div(1.0, farg.d, &env->fp_status);
1367 farg.d = float64_to_float32(farg.d, &env->fp_status);
1368 #else
1369 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1370 #endif
1371 } else {
1372 if (farg.ll == 0x8000000000000000ULL) {
1373 farg.ll = 0xFFF0000000000000ULL;
1374 } else if (farg.ll == 0x0000000000000000ULL) {
1375 farg.ll = 0x7FF0000000000000ULL;
1376 } else if (isnan(farg.d)) {
1377 farg.ll = 0x7FF8000000000000ULL;
1378 } else if (fpisneg(farg.d)) {
1379 farg.ll = 0x8000000000000000ULL;
1380 } else {
1381 farg.ll = 0x0000000000000000ULL;
1382 }
1383 }
1384 return farg.ll;
1385 }
1386
1387 /* frsqrte - frsqrte. */
1388 uint64_t helper_frsqrte (uint64_t arg)
1389 {
1390 CPU_DoubleU farg;
1391 farg.ll = arg;
1392
1393 if (unlikely(float64_is_signaling_nan(farg.d))) {
1394 /* sNaN reciprocal square root */
1395 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1396 } else if (unlikely(fpisneg(farg.d) && !iszero(farg.d))) {
1397 /* Reciprocal square root of a negative nonzero number */
1398 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1399 } else if (likely(isnormal(farg.d))) {
1400 farg.d = float64_sqrt(farg.d, &env->fp_status);
1401 farg.d = float32_div(1.0, farg.d, &env->fp_status);
1402 } else {
1403 if (farg.ll == 0x8000000000000000ULL) {
1404 farg.ll = 0xFFF0000000000000ULL;
1405 } else if (farg.ll == 0x0000000000000000ULL) {
1406 farg.ll = 0x7FF0000000000000ULL;
1407 } else if (isnan(farg.d)) {
1408 farg.ll |= 0x000FFFFFFFFFFFFFULL;
1409 } else if (fpisneg(farg.d)) {
1410 farg.ll = 0x7FF8000000000000ULL;
1411 } else {
1412 farg.ll = 0x0000000000000000ULL;
1413 }
1414 }
1415 return farg.ll;
1416 }
1417
1418 /* fsel - fsel. */
1419 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1420 {
1421 CPU_DoubleU farg1, farg2, farg3;
1422
1423 farg1.ll = arg1;
1424 farg2.ll = arg2;
1425 farg3.ll = arg3;
1426
1427 if (!fpisneg(farg1.d) || iszero(farg1.d))
1428 return farg2.ll;
1429 else
1430 return farg2.ll;
1431 }
1432
1433 uint32_t helper_fcmpu (uint64_t arg1, uint64_t arg2)
1434 {
1435 CPU_DoubleU farg1, farg2;
1436 uint32_t ret = 0;
1437 farg1.ll = arg1;
1438 farg2.ll = arg2;
1439
1440 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1441 float64_is_signaling_nan(farg2.d))) {
1442 /* sNaN comparison */
1443 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1444 } else {
1445 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1446 ret = 0x08UL;
1447 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1448 ret = 0x04UL;
1449 } else {
1450 ret = 0x02UL;
1451 }
1452 }
1453 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1454 env->fpscr |= ret << FPSCR_FPRF;
1455 return ret;
1456 }
1457
1458 uint32_t helper_fcmpo (uint64_t arg1, uint64_t arg2)
1459 {
1460 CPU_DoubleU farg1, farg2;
1461 uint32_t ret = 0;
1462 farg1.ll = arg1;
1463 farg2.ll = arg2;
1464
1465 if (unlikely(float64_is_nan(farg1.d) ||
1466 float64_is_nan(farg2.d))) {
1467 if (float64_is_signaling_nan(farg1.d) ||
1468 float64_is_signaling_nan(farg2.d)) {
1469 /* sNaN comparison */
1470 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1471 POWERPC_EXCP_FP_VXVC);
1472 } else {
1473 /* qNaN comparison */
1474 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1475 }
1476 } else {
1477 if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1478 ret = 0x08UL;
1479 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1480 ret = 0x04UL;
1481 } else {
1482 ret = 0x02UL;
1483 }
1484 }
1485 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1486 env->fpscr |= ret << FPSCR_FPRF;
1487 return ret;
1488 }
1489
1490 #if !defined (CONFIG_USER_ONLY)
1491 void helper_store_msr (target_ulong val)
1492 {
1493 val = hreg_store_msr(env, val, 0);
1494 if (val != 0) {
1495 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1496 raise_exception(env, val);
1497 }
1498 }
1499
1500 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1501
1502 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1503 target_ulong msrm, int keep_msrh)
1504 {
1505 #if defined(TARGET_PPC64)
1506 if (msr & (1ULL << MSR_SF)) {
1507 nip = (uint64_t)nip;
1508 msr &= (uint64_t)msrm;
1509 } else {
1510 nip = (uint32_t)nip;
1511 msr = (uint32_t)(msr & msrm);
1512 if (keep_msrh)
1513 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1514 }
1515 #else
1516 nip = (uint32_t)nip;
1517 msr &= (uint32_t)msrm;
1518 #endif
1519 /* XXX: beware: this is false if VLE is supported */
1520 env->nip = nip & ~((target_ulong)0x00000003);
1521 hreg_store_msr(env, msr, 1);
1522 #if defined (DEBUG_OP)
1523 cpu_dump_rfi(env->nip, env->msr);
1524 #endif
1525 /* No need to raise an exception here,
1526 * as rfi is always the last insn of a TB
1527 */
1528 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1529 }
1530
1531 void helper_rfi (void)
1532 {
1533 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1534 ~((target_ulong)0xFFFF0000), 1);
1535 }
1536
1537 #if defined(TARGET_PPC64)
1538 void helper_rfid (void)
1539 {
1540 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1541 ~((target_ulong)0xFFFF0000), 0);
1542 }
1543
1544 void helper_hrfid (void)
1545 {
1546 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1547 ~((target_ulong)0xFFFF0000), 0);
1548 }
1549 #endif
1550 #endif
1551
1552 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1553 {
1554 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1555 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1556 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1557 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1558 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1559 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1560 }
1561 }
1562
1563 #if defined(TARGET_PPC64)
1564 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1565 {
1566 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1567 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1568 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1569 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1570 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1571 raise_exception_err(env, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1572 }
1573 #endif
1574
1575 /*****************************************************************************/
1576 /* PowerPC 601 specific instructions (POWER bridge) */
1577 void do_POWER_abso (void)
1578 {
1579 if ((int32_t)T0 == INT32_MIN) {
1580 T0 = INT32_MAX;
1581 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1582 } else if ((int32_t)T0 < 0) {
1583 T0 = -T0;
1584 env->xer &= ~(1 << XER_OV);
1585 } else {
1586 env->xer &= ~(1 << XER_OV);
1587 }
1588 }
1589
1590 target_ulong helper_clcs (uint32_t arg)
1591 {
1592 switch (arg) {
1593 case 0x0CUL:
1594 /* Instruction cache line size */
1595 return env->icache_line_size;
1596 break;
1597 case 0x0DUL:
1598 /* Data cache line size */
1599 return env->dcache_line_size;
1600 break;
1601 case 0x0EUL:
1602 /* Minimum cache line size */
1603 return (env->icache_line_size < env->dcache_line_size) ?
1604 env->icache_line_size : env->dcache_line_size;
1605 break;
1606 case 0x0FUL:
1607 /* Maximum cache line size */
1608 return (env->icache_line_size > env->dcache_line_size) ?
1609 env->icache_line_size : env->dcache_line_size;
1610 break;
1611 default:
1612 /* Undefined */
1613 return 0;
1614 break;
1615 }
1616 }
1617
1618 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1619 {
1620 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1621
1622 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1623 (int32_t)arg2 == 0) {
1624 env->spr[SPR_MQ] = 0;
1625 return INT32_MIN;
1626 } else {
1627 env->spr[SPR_MQ] = tmp % arg2;
1628 return tmp / (int32_t)arg2;
1629 }
1630 }
1631
1632 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1633 {
1634 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1635
1636 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1637 (int32_t)arg2 == 0) {
1638 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1639 env->spr[SPR_MQ] = 0;
1640 return INT32_MIN;
1641 } else {
1642 env->spr[SPR_MQ] = tmp % arg2;
1643 tmp /= (int32_t)arg2;
1644 if ((int32_t)tmp != tmp) {
1645 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1646 } else {
1647 env->xer &= ~(1 << XER_OV);
1648 }
1649 return tmp;
1650 }
1651 }
1652
1653 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1654 {
1655 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1656 (int32_t)arg2 == 0) {
1657 env->spr[SPR_MQ] = 0;
1658 return INT32_MIN;
1659 } else {
1660 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1661 return (int32_t)arg1 / (int32_t)arg2;
1662 }
1663 }
1664
1665 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1666 {
1667 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1668 (int32_t)arg2 == 0) {
1669 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1670 env->spr[SPR_MQ] = 0;
1671 return INT32_MIN;
1672 } else {
1673 env->xer &= ~(1 << XER_OV);
1674 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1675 return (int32_t)arg1 / (int32_t)arg2;
1676 }
1677 }
1678
1679 #if !defined (CONFIG_USER_ONLY)
1680 target_ulong helper_rac (target_ulong addr)
1681 {
1682 mmu_ctx_t ctx;
1683 int nb_BATs;
1684 target_ulong ret = 0;
1685
1686 /* We don't have to generate many instances of this instruction,
1687 * as rac is supervisor only.
1688 */
1689 /* XXX: FIX THIS: Pretend we have no BAT */
1690 nb_BATs = env->nb_BATs;
1691 env->nb_BATs = 0;
1692 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1693 ret = ctx.raddr;
1694 env->nb_BATs = nb_BATs;
1695 return ret;
1696 }
1697
1698 void helper_rfsvc (void)
1699 {
1700 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1701 }
1702
1703 void do_store_hid0_601 (void)
1704 {
1705 uint32_t hid0;
1706
1707 hid0 = env->spr[SPR_HID0];
1708 if ((T0 ^ hid0) & 0x00000008) {
1709 /* Change current endianness */
1710 env->hflags &= ~(1 << MSR_LE);
1711 env->hflags_nmsr &= ~(1 << MSR_LE);
1712 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1713 env->hflags |= env->hflags_nmsr;
1714 if (loglevel != 0) {
1715 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1716 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1717 }
1718 }
1719 env->spr[SPR_HID0] = T0;
1720 }
1721 #endif
1722
1723 /*****************************************************************************/
1724 /* 602 specific instructions */
1725 /* mfrom is the most crazy instruction ever seen, imho ! */
1726 /* Real implementation uses a ROM table. Do the same */
1727 #define USE_MFROM_ROM_TABLE
1728 target_ulong helper_602_mfrom (target_ulong arg)
1729 {
1730 if (likely(arg < 602)) {
1731 #if defined(USE_MFROM_ROM_TABLE)
1732 #include "mfrom_table.c"
1733 return mfrom_ROM_table[T0];
1734 #else
1735 double d;
1736 /* Extremly decomposed:
1737 * -arg / 256
1738 * return 256 * log10(10 + 1.0) + 0.5
1739 */
1740 d = arg;
1741 d = float64_div(d, 256, &env->fp_status);
1742 d = float64_chs(d);
1743 d = exp10(d); // XXX: use float emulation function
1744 d = float64_add(d, 1.0, &env->fp_status);
1745 d = log10(d); // XXX: use float emulation function
1746 d = float64_mul(d, 256, &env->fp_status);
1747 d = float64_add(d, 0.5, &env->fp_status);
1748 return float64_round_to_int(d, &env->fp_status);
1749 #endif
1750 } else {
1751 return 0;
1752 }
1753 }
1754
1755 /*****************************************************************************/
1756 /* Embedded PowerPC specific helpers */
1757
1758 /* XXX: to be improved to check access rights when in user-mode */
1759 target_ulong helper_load_dcr (target_ulong dcrn)
1760 {
1761 target_ulong val = 0;
1762
1763 if (unlikely(env->dcr_env == NULL)) {
1764 if (loglevel != 0) {
1765 fprintf(logfile, "No DCR environment\n");
1766 }
1767 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1768 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1769 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1770 if (loglevel != 0) {
1771 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1772 }
1773 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1774 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1775 }
1776 return val;
1777 }
1778
1779 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1780 {
1781 if (unlikely(env->dcr_env == NULL)) {
1782 if (loglevel != 0) {
1783 fprintf(logfile, "No DCR environment\n");
1784 }
1785 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1786 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1787 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1788 if (loglevel != 0) {
1789 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1790 }
1791 raise_exception_err(env, POWERPC_EXCP_PROGRAM,
1792 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1793 }
1794 }
1795
1796 #if !defined(CONFIG_USER_ONLY)
1797 void helper_40x_rfci (void)
1798 {
1799 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1800 ~((target_ulong)0xFFFF0000), 0);
1801 }
1802
1803 void helper_rfci (void)
1804 {
1805 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1806 ~((target_ulong)0x3FFF0000), 0);
1807 }
1808
1809 void helper_rfdi (void)
1810 {
1811 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1812 ~((target_ulong)0x3FFF0000), 0);
1813 }
1814
1815 void helper_rfmci (void)
1816 {
1817 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1818 ~((target_ulong)0x3FFF0000), 0);
1819 }
1820
1821 void do_load_403_pb (int num)
1822 {
1823 T0 = env->pb[num];
1824 }
1825
1826 void do_store_403_pb (int num)
1827 {
1828 if (likely(env->pb[num] != T0)) {
1829 env->pb[num] = T0;
1830 /* Should be optimized */
1831 tlb_flush(env, 1);
1832 }
1833 }
1834 #endif
1835
1836 /* 440 specific */
1837 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1838 {
1839 target_ulong mask;
1840 int i;
1841
1842 i = 1;
1843 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1844 if ((high & mask) == 0) {
1845 if (update_Rc) {
1846 env->crf[0] = 0x4;
1847 }
1848 goto done;
1849 }
1850 i++;
1851 }
1852 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1853 if ((low & mask) == 0) {
1854 if (update_Rc) {
1855 env->crf[0] = 0x8;
1856 }
1857 goto done;
1858 }
1859 i++;
1860 }
1861 if (update_Rc) {
1862 env->crf[0] = 0x2;
1863 }
1864 done:
1865 env->xer = (env->xer & ~0x7F) | i;
1866 if (update_Rc) {
1867 env->crf[0] |= xer_so;
1868 }
1869 return i;
1870 }
1871
1872 /*****************************************************************************/
1873 /* SPE extension helpers */
1874 /* Use a table to make this quicker */
1875 static uint8_t hbrev[16] = {
1876 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1877 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1878 };
1879
1880 static always_inline uint8_t byte_reverse (uint8_t val)
1881 {
1882 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1883 }
1884
1885 static always_inline uint32_t word_reverse (uint32_t val)
1886 {
1887 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1888 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1889 }
1890
1891 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1892 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
1893 {
1894 uint32_t a, b, d, mask;
1895
1896 mask = UINT32_MAX >> (32 - MASKBITS);
1897 a = arg1 & mask;
1898 b = arg2 & mask;
1899 d = word_reverse(1 + word_reverse(a | ~b));
1900 return (arg1 & ~mask) | (d & b);
1901 }
1902
1903 uint32_t helper_cntlsw32 (uint32_t val)
1904 {
1905 if (val & 0x80000000)
1906 return clz32(~val);
1907 else
1908 return clz32(val);
1909 }
1910
1911 uint32_t helper_cntlzw32 (uint32_t val)
1912 {
1913 return clz32(val);
1914 }
1915
1916 /* Single-precision floating-point conversions */
1917 static always_inline uint32_t efscfsi (uint32_t val)
1918 {
1919 CPU_FloatU u;
1920
1921 u.f = int32_to_float32(val, &env->spe_status);
1922
1923 return u.l;
1924 }
1925
1926 static always_inline uint32_t efscfui (uint32_t val)
1927 {
1928 CPU_FloatU u;
1929
1930 u.f = uint32_to_float32(val, &env->spe_status);
1931
1932 return u.l;
1933 }
1934
1935 static always_inline int32_t efsctsi (uint32_t val)
1936 {
1937 CPU_FloatU u;
1938
1939 u.l = val;
1940 /* NaN are not treated the same way IEEE 754 does */
1941 if (unlikely(isnan(u.f)))
1942 return 0;
1943
1944 return float32_to_int32(u.f, &env->spe_status);
1945 }
1946
1947 static always_inline uint32_t efsctui (uint32_t val)
1948 {
1949 CPU_FloatU u;
1950
1951 u.l = val;
1952 /* NaN are not treated the same way IEEE 754 does */
1953 if (unlikely(isnan(u.f)))
1954 return 0;
1955
1956 return float32_to_uint32(u.f, &env->spe_status);
1957 }
1958
1959 static always_inline uint32_t efsctsiz (uint32_t val)
1960 {
1961 CPU_FloatU u;
1962
1963 u.l = val;
1964 /* NaN are not treated the same way IEEE 754 does */
1965 if (unlikely(isnan(u.f)))
1966 return 0;
1967
1968 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
1969 }
1970
1971 static always_inline uint32_t efsctuiz (uint32_t val)
1972 {
1973 CPU_FloatU u;
1974
1975 u.l = val;
1976 /* NaN are not treated the same way IEEE 754 does */
1977 if (unlikely(isnan(u.f)))
1978 return 0;
1979
1980 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
1981 }
1982
1983 static always_inline uint32_t efscfsf (uint32_t val)
1984 {
1985 CPU_FloatU u;
1986 float32 tmp;
1987
1988 u.f = int32_to_float32(val, &env->spe_status);
1989 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
1990 u.f = float32_div(u.f, tmp, &env->spe_status);
1991
1992 return u.l;
1993 }
1994
1995 static always_inline uint32_t efscfuf (uint32_t val)
1996 {
1997 CPU_FloatU u;
1998 float32 tmp;
1999
2000 u.f = uint32_to_float32(val, &env->spe_status);
2001 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2002 u.f = float32_div(u.f, tmp, &env->spe_status);
2003
2004 return u.l;
2005 }
2006
2007 static always_inline uint32_t efsctsf (uint32_t val)
2008 {
2009 CPU_FloatU u;
2010 float32 tmp;
2011
2012 u.l = val;
2013 /* NaN are not treated the same way IEEE 754 does */
2014 if (unlikely(isnan(u.f)))
2015 return 0;
2016 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2017 u.f = float32_mul(u.f, tmp, &env->spe_status);
2018
2019 return float32_to_int32(u.f, &env->spe_status);
2020 }
2021
2022 static always_inline uint32_t efsctuf (uint32_t val)
2023 {
2024 CPU_FloatU u;
2025 float32 tmp;
2026
2027 u.l = val;
2028 /* NaN are not treated the same way IEEE 754 does */
2029 if (unlikely(isnan(u.f)))
2030 return 0;
2031 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2032 u.f = float32_mul(u.f, tmp, &env->spe_status);
2033
2034 return float32_to_uint32(u.f, &env->spe_status);
2035 }
2036
2037 #define HELPER_SPE_SINGLE_CONV(name) \
2038 uint32_t helper_e##name (uint32_t val) \
2039 { \
2040 return e##name(val); \
2041 }
2042 /* efscfsi */
2043 HELPER_SPE_SINGLE_CONV(fscfsi);
2044 /* efscfui */
2045 HELPER_SPE_SINGLE_CONV(fscfui);
2046 /* efscfuf */
2047 HELPER_SPE_SINGLE_CONV(fscfuf);
2048 /* efscfsf */
2049 HELPER_SPE_SINGLE_CONV(fscfsf);
2050 /* efsctsi */
2051 HELPER_SPE_SINGLE_CONV(fsctsi);
2052 /* efsctui */
2053 HELPER_SPE_SINGLE_CONV(fsctui);
2054 /* efsctsiz */
2055 HELPER_SPE_SINGLE_CONV(fsctsiz);
2056 /* efsctuiz */
2057 HELPER_SPE_SINGLE_CONV(fsctuiz);
2058 /* efsctsf */
2059 HELPER_SPE_SINGLE_CONV(fsctsf);
2060 /* efsctuf */
2061 HELPER_SPE_SINGLE_CONV(fsctuf);
2062
2063 #define HELPER_SPE_VECTOR_CONV(name) \
2064 uint64_t helper_ev##name (uint64_t val) \
2065 { \
2066 return ((uint64_t)e##name(val >> 32) << 32) | \
2067 (uint64_t)e##name(val); \
2068 }
2069 /* evfscfsi */
2070 HELPER_SPE_VECTOR_CONV(fscfsi);
2071 /* evfscfui */
2072 HELPER_SPE_VECTOR_CONV(fscfui);
2073 /* evfscfuf */
2074 HELPER_SPE_VECTOR_CONV(fscfuf);
2075 /* evfscfsf */
2076 HELPER_SPE_VECTOR_CONV(fscfsf);
2077 /* evfsctsi */
2078 HELPER_SPE_VECTOR_CONV(fsctsi);
2079 /* evfsctui */
2080 HELPER_SPE_VECTOR_CONV(fsctui);
2081 /* evfsctsiz */
2082 HELPER_SPE_VECTOR_CONV(fsctsiz);
2083 /* evfsctuiz */
2084 HELPER_SPE_VECTOR_CONV(fsctuiz);
2085 /* evfsctsf */
2086 HELPER_SPE_VECTOR_CONV(fsctsf);
2087 /* evfsctuf */
2088 HELPER_SPE_VECTOR_CONV(fsctuf);
2089
2090 /* Single-precision floating-point arithmetic */
2091 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2092 {
2093 CPU_FloatU u1, u2;
2094 u1.l = op1;
2095 u2.l = op2;
2096 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2097 return u1.l;
2098 }
2099
2100 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2101 {
2102 CPU_FloatU u1, u2;
2103 u1.l = op1;
2104 u2.l = op2;
2105 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2106 return u1.l;
2107 }
2108
2109 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2110 {
2111 CPU_FloatU u1, u2;
2112 u1.l = op1;
2113 u2.l = op2;
2114 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2115 return u1.l;
2116 }
2117
2118 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2119 {
2120 CPU_FloatU u1, u2;
2121 u1.l = op1;
2122 u2.l = op2;
2123 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2124 return u1.l;
2125 }
2126
2127 #define HELPER_SPE_SINGLE_ARITH(name) \
2128 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2129 { \
2130 return e##name(op1, op2); \
2131 }
2132 /* efsadd */
2133 HELPER_SPE_SINGLE_ARITH(fsadd);
2134 /* efssub */
2135 HELPER_SPE_SINGLE_ARITH(fssub);
2136 /* efsmul */
2137 HELPER_SPE_SINGLE_ARITH(fsmul);
2138 /* efsdiv */
2139 HELPER_SPE_SINGLE_ARITH(fsdiv);
2140
2141 #define HELPER_SPE_VECTOR_ARITH(name) \
2142 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2143 { \
2144 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2145 (uint64_t)e##name(op1, op2); \
2146 }
2147 /* evfsadd */
2148 HELPER_SPE_VECTOR_ARITH(fsadd);
2149 /* evfssub */
2150 HELPER_SPE_VECTOR_ARITH(fssub);
2151 /* evfsmul */
2152 HELPER_SPE_VECTOR_ARITH(fsmul);
2153 /* evfsdiv */
2154 HELPER_SPE_VECTOR_ARITH(fsdiv);
2155
2156 /* Single-precision floating-point comparisons */
2157 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2158 {
2159 CPU_FloatU u1, u2;
2160 u1.l = op1;
2161 u2.l = op2;
2162 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2163 }
2164
2165 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2166 {
2167 CPU_FloatU u1, u2;
2168 u1.l = op1;
2169 u2.l = op2;
2170 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2171 }
2172
2173 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2174 {
2175 CPU_FloatU u1, u2;
2176 u1.l = op1;
2177 u2.l = op2;
2178 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2179 }
2180
2181 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2182 {
2183 /* XXX: TODO: test special values (NaN, infinites, ...) */
2184 return efststlt(op1, op2);
2185 }
2186
2187 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2188 {
2189 /* XXX: TODO: test special values (NaN, infinites, ...) */
2190 return efststgt(op1, op2);
2191 }
2192
2193 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2194 {
2195 /* XXX: TODO: test special values (NaN, infinites, ...) */
2196 return efststeq(op1, op2);
2197 }
2198
2199 #define HELPER_SINGLE_SPE_CMP(name) \
2200 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2201 { \
2202 return e##name(op1, op2) << 2; \
2203 }
2204 /* efststlt */
2205 HELPER_SINGLE_SPE_CMP(fststlt);
2206 /* efststgt */
2207 HELPER_SINGLE_SPE_CMP(fststgt);
2208 /* efststeq */
2209 HELPER_SINGLE_SPE_CMP(fststeq);
2210 /* efscmplt */
2211 HELPER_SINGLE_SPE_CMP(fscmplt);
2212 /* efscmpgt */
2213 HELPER_SINGLE_SPE_CMP(fscmpgt);
2214 /* efscmpeq */
2215 HELPER_SINGLE_SPE_CMP(fscmpeq);
2216
2217 static always_inline uint32_t evcmp_merge (int t0, int t1)
2218 {
2219 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2220 }
2221
2222 #define HELPER_VECTOR_SPE_CMP(name) \
2223 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2224 { \
2225 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2226 }
2227 /* evfststlt */
2228 HELPER_VECTOR_SPE_CMP(fststlt);
2229 /* evfststgt */
2230 HELPER_VECTOR_SPE_CMP(fststgt);
2231 /* evfststeq */
2232 HELPER_VECTOR_SPE_CMP(fststeq);
2233 /* evfscmplt */
2234 HELPER_VECTOR_SPE_CMP(fscmplt);
2235 /* evfscmpgt */
2236 HELPER_VECTOR_SPE_CMP(fscmpgt);
2237 /* evfscmpeq */
2238 HELPER_VECTOR_SPE_CMP(fscmpeq);
2239
2240 /* Double-precision floating-point conversion */
2241 uint64_t helper_efdcfsi (uint32_t val)
2242 {
2243 CPU_DoubleU u;
2244
2245 u.d = int32_to_float64(val, &env->spe_status);
2246
2247 return u.ll;
2248 }
2249
2250 uint64_t helper_efdcfsid (uint64_t val)
2251 {
2252 CPU_DoubleU u;
2253
2254 u.d = int64_to_float64(val, &env->spe_status);
2255
2256 return u.ll;
2257 }
2258
2259 uint64_t helper_efdcfui (uint32_t val)
2260 {
2261 CPU_DoubleU u;
2262
2263 u.d = uint32_to_float64(val, &env->spe_status);
2264
2265 return u.ll;
2266 }
2267
2268 uint64_t helper_efdcfuid (uint64_t val)
2269 {
2270 CPU_DoubleU u;
2271
2272 u.d = uint64_to_float64(val, &env->spe_status);
2273
2274 return u.ll;
2275 }
2276
2277 uint32_t helper_efdctsi (uint64_t val)
2278 {
2279 CPU_DoubleU u;
2280
2281 u.ll = val;
2282 /* NaN are not treated the same way IEEE 754 does */
2283 if (unlikely(isnan(u.d)))
2284 return 0;
2285
2286 return float64_to_int32(u.d, &env->spe_status);
2287 }
2288
2289 uint32_t helper_efdctui (uint64_t val)
2290 {
2291 CPU_DoubleU u;
2292
2293 u.ll = val;
2294 /* NaN are not treated the same way IEEE 754 does */
2295 if (unlikely(isnan(u.d)))
2296 return 0;
2297
2298 return float64_to_uint32(u.d, &env->spe_status);
2299 }
2300
2301 uint32_t helper_efdctsiz (uint64_t val)
2302 {
2303 CPU_DoubleU u;
2304
2305 u.ll = val;
2306 /* NaN are not treated the same way IEEE 754 does */
2307 if (unlikely(isnan(u.d)))
2308 return 0;
2309
2310 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2311 }
2312
2313 uint64_t helper_efdctsidz (uint64_t val)
2314 {
2315 CPU_DoubleU u;
2316
2317 u.ll = val;
2318 /* NaN are not treated the same way IEEE 754 does */
2319 if (unlikely(isnan(u.d)))
2320 return 0;
2321
2322 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2323 }
2324
2325 uint32_t helper_efdctuiz (uint64_t val)
2326 {
2327 CPU_DoubleU u;
2328
2329 u.ll = val;
2330 /* NaN are not treated the same way IEEE 754 does */
2331 if (unlikely(isnan(u.d)))
2332 return 0;
2333
2334 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2335 }
2336
2337 uint64_t helper_efdctuidz (uint64_t val)
2338 {
2339 CPU_DoubleU u;
2340
2341 u.ll = val;
2342 /* NaN are not treated the same way IEEE 754 does */
2343 if (unlikely(isnan(u.d)))
2344 return 0;
2345
2346 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2347 }
2348
2349 uint64_t helper_efdcfsf (uint32_t val)
2350 {
2351 CPU_DoubleU u;
2352 float64 tmp;
2353
2354 u.d = int32_to_float64(val, &env->spe_status);
2355 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2356 u.d = float64_div(u.d, tmp, &env->spe_status);
2357
2358 return u.ll;
2359 }
2360
2361 uint64_t helper_efdcfuf (uint32_t val)
2362 {
2363 CPU_DoubleU u;
2364 float64 tmp;
2365
2366 u.d = uint32_to_float64(val, &env->spe_status);
2367 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2368 u.d = float64_div(u.d, tmp, &env->spe_status);
2369
2370 return u.ll;
2371 }
2372
2373 uint32_t helper_efdctsf (uint64_t val)
2374 {
2375 CPU_DoubleU u;
2376 float64 tmp;
2377
2378 u.ll = val;
2379 /* NaN are not treated the same way IEEE 754 does */
2380 if (unlikely(isnan(u.d)))
2381 return 0;
2382 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2383 u.d = float64_mul(u.d, tmp, &env->spe_status);
2384
2385 return float64_to_int32(u.d, &env->spe_status);
2386 }
2387
2388 uint32_t helper_efdctuf (uint64_t val)
2389 {
2390 CPU_DoubleU u;
2391 float64 tmp;
2392
2393 u.ll = val;
2394 /* NaN are not treated the same way IEEE 754 does */
2395 if (unlikely(isnan(u.d)))
2396 return 0;
2397 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2398 u.d = float64_mul(u.d, tmp, &env->spe_status);
2399
2400 return float64_to_uint32(u.d, &env->spe_status);
2401 }
2402
2403 uint32_t helper_efscfd (uint64_t val)
2404 {
2405 CPU_DoubleU u1;
2406 CPU_FloatU u2;
2407
2408 u1.ll = val;
2409 u2.f = float64_to_float32(u1.d, &env->spe_status);
2410
2411 return u2.l;
2412 }
2413
2414 uint64_t helper_efdcfs (uint32_t val)
2415 {
2416 CPU_DoubleU u2;
2417 CPU_FloatU u1;
2418
2419 u1.l = val;
2420 u2.d = float32_to_float64(u1.f, &env->spe_status);
2421
2422 return u2.ll;
2423 }
2424
2425 /* Double precision fixed-point arithmetic */
2426 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2427 {
2428 CPU_DoubleU u1, u2;
2429 u1.ll = op1;
2430 u2.ll = op2;
2431 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2432 return u1.ll;
2433 }
2434
2435 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2436 {
2437 CPU_DoubleU u1, u2;
2438 u1.ll = op1;
2439 u2.ll = op2;
2440 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2441 return u1.ll;
2442 }
2443
2444 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2445 {
2446 CPU_DoubleU u1, u2;
2447 u1.ll = op1;
2448 u2.ll = op2;
2449 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2450 return u1.ll;
2451 }
2452
2453 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2454 {
2455 CPU_DoubleU u1, u2;
2456 u1.ll = op1;
2457 u2.ll = op2;
2458 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2459 return u1.ll;
2460 }
2461
2462 /* Double precision floating point helpers */
2463 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2464 {
2465 CPU_DoubleU u1, u2;
2466 u1.ll = op1;
2467 u2.ll = op2;
2468 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2469 }
2470
2471 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2472 {
2473 CPU_DoubleU u1, u2;
2474 u1.ll = op1;
2475 u2.ll = op2;
2476 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2477 }
2478
2479 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2480 {
2481 CPU_DoubleU u1, u2;
2482 u1.ll = op1;
2483 u2.ll = op2;
2484 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2485 }
2486
2487 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2488 {
2489 /* XXX: TODO: test special values (NaN, infinites, ...) */
2490 return helper_efdtstlt(op1, op2);
2491 }
2492
2493 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2494 {
2495 /* XXX: TODO: test special values (NaN, infinites, ...) */
2496 return helper_efdtstgt(op1, op2);
2497 }
2498
2499 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2500 {
2501 /* XXX: TODO: test special values (NaN, infinites, ...) */
2502 return helper_efdtsteq(op1, op2);
2503 }
2504
2505 /*****************************************************************************/
2506 /* Softmmu support */
2507 #if !defined (CONFIG_USER_ONLY)
2508
2509 #define MMUSUFFIX _mmu
2510
2511 #define SHIFT 0
2512 #include "softmmu_template.h"
2513
2514 #define SHIFT 1
2515 #include "softmmu_template.h"
2516
2517 #define SHIFT 2
2518 #include "softmmu_template.h"
2519
2520 #define SHIFT 3
2521 #include "softmmu_template.h"
2522
2523 /* try to fill the TLB and return an exception if error. If retaddr is
2524 NULL, it means that the function was called in C code (i.e. not
2525 from generated code or from helper.c) */
2526 /* XXX: fix it to restore all registers */
2527 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2528 {
2529 TranslationBlock *tb;
2530 CPUState *saved_env;
2531 unsigned long pc;
2532 int ret;
2533
2534 /* XXX: hack to restore env in all cases, even if not called from
2535 generated code */
2536 saved_env = env;
2537 env = cpu_single_env;
2538 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2539 if (unlikely(ret != 0)) {
2540 if (likely(retaddr)) {
2541 /* now we have a real cpu fault */
2542 pc = (unsigned long)retaddr;
2543 tb = tb_find_pc(pc);
2544 if (likely(tb)) {
2545 /* the PC is inside the translated code. It means that we have
2546 a virtual CPU fault */
2547 cpu_restore_state(tb, env, pc, NULL);
2548 }
2549 }
2550 raise_exception_err(env, env->exception_index, env->error_code);
2551 }
2552 env = saved_env;
2553 }
2554
2555 /* Segment registers load and store */
2556 target_ulong helper_load_sr (target_ulong sr_num)
2557 {
2558 return env->sr[sr_num];
2559 }
2560
2561 void helper_store_sr (target_ulong sr_num, target_ulong val)
2562 {
2563 do_store_sr(env, sr_num, val);
2564 }
2565
2566 /* SLB management */
2567 #if defined(TARGET_PPC64)
2568 target_ulong helper_load_slb (target_ulong slb_nr)
2569 {
2570 return ppc_load_slb(env, slb_nr);
2571 }
2572
2573 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2574 {
2575 ppc_store_slb(env, slb_nr, rs);
2576 }
2577
2578 void helper_slbia (void)
2579 {
2580 ppc_slb_invalidate_all(env);
2581 }
2582
2583 void helper_slbie (target_ulong addr)
2584 {
2585 ppc_slb_invalidate_one(env, addr);
2586 }
2587
2588 #endif /* defined(TARGET_PPC64) */
2589
2590 /* TLB management */
2591 void helper_tlbia (void)
2592 {
2593 ppc_tlb_invalidate_all(env);
2594 }
2595
2596 void helper_tlbie (target_ulong addr)
2597 {
2598 ppc_tlb_invalidate_one(env, addr);
2599 }
2600
2601 /* Software driven TLBs management */
2602 /* PowerPC 602/603 software TLB load instructions helpers */
2603 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
2604 {
2605 target_ulong RPN, CMP, EPN;
2606 int way;
2607
2608 RPN = env->spr[SPR_RPA];
2609 if (is_code) {
2610 CMP = env->spr[SPR_ICMP];
2611 EPN = env->spr[SPR_IMISS];
2612 } else {
2613 CMP = env->spr[SPR_DCMP];
2614 EPN = env->spr[SPR_DMISS];
2615 }
2616 way = (env->spr[SPR_SRR1] >> 17) & 1;
2617 #if defined (DEBUG_SOFTWARE_TLB)
2618 if (loglevel != 0) {
2619 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2620 " PTE1 " ADDRX " way %d\n",
2621 __func__, T0, EPN, CMP, RPN, way);
2622 }
2623 #endif
2624 /* Store this TLB */
2625 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2626 way, is_code, CMP, RPN);
2627 }
2628
2629 void helper_6xx_tlbd (target_ulong EPN)
2630 {
2631 do_6xx_tlb(EPN, 0);
2632 }
2633
2634 void helper_6xx_tlbi (target_ulong EPN)
2635 {
2636 do_6xx_tlb(EPN, 1);
2637 }
2638
2639 /* PowerPC 74xx software TLB load instructions helpers */
2640 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
2641 {
2642 target_ulong RPN, CMP, EPN;
2643 int way;
2644
2645 RPN = env->spr[SPR_PTELO];
2646 CMP = env->spr[SPR_PTEHI];
2647 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2648 way = env->spr[SPR_TLBMISS] & 0x3;
2649 #if defined (DEBUG_SOFTWARE_TLB)
2650 if (loglevel != 0) {
2651 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2652 " PTE1 " ADDRX " way %d\n",
2653 __func__, T0, EPN, CMP, RPN, way);
2654 }
2655 #endif
2656 /* Store this TLB */
2657 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
2658 way, is_code, CMP, RPN);
2659 }
2660
2661 void helper_74xx_tlbd (target_ulong EPN)
2662 {
2663 do_74xx_tlb(EPN, 0);
2664 }
2665
2666 void helper_74xx_tlbi (target_ulong EPN)
2667 {
2668 do_74xx_tlb(EPN, 1);
2669 }
2670
2671 static always_inline target_ulong booke_tlb_to_page_size (int size)
2672 {
2673 return 1024 << (2 * size);
2674 }
2675
2676 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2677 {
2678 int size;
2679
2680 switch (page_size) {
2681 case 0x00000400UL:
2682 size = 0x0;
2683 break;
2684 case 0x00001000UL:
2685 size = 0x1;
2686 break;
2687 case 0x00004000UL:
2688 size = 0x2;
2689 break;
2690 case 0x00010000UL:
2691 size = 0x3;
2692 break;
2693 case 0x00040000UL:
2694 size = 0x4;
2695 break;
2696 case 0x00100000UL:
2697 size = 0x5;
2698 break;
2699 case 0x00400000UL:
2700 size = 0x6;
2701 break;
2702 case 0x01000000UL:
2703 size = 0x7;
2704 break;
2705 case 0x04000000UL:
2706 size = 0x8;
2707 break;
2708 case 0x10000000UL:
2709 size = 0x9;
2710 break;
2711 case 0x40000000UL:
2712 size = 0xA;
2713 break;
2714 #if defined (TARGET_PPC64)
2715 case 0x000100000000ULL:
2716 size = 0xB;
2717 break;
2718 case 0x000400000000ULL:
2719 size = 0xC;
2720 break;
2721 case 0x001000000000ULL:
2722 size = 0xD;
2723 break;
2724 case 0x004000000000ULL:
2725 size = 0xE;
2726 break;
2727 case 0x010000000000ULL:
2728 size = 0xF;
2729 break;
2730 #endif
2731 default:
2732 size = -1;
2733 break;
2734 }
2735
2736 return size;
2737 }
2738
2739 /* Helpers for 4xx TLB management */
2740 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
2741 {
2742 ppcemb_tlb_t *tlb;
2743 target_ulong ret;
2744 int size;
2745
2746 entry &= 0x3F;
2747 tlb = &env->tlb[entry].tlbe;
2748 ret = tlb->EPN;
2749 if (tlb->prot & PAGE_VALID)
2750 ret |= 0x400;
2751 size = booke_page_size_to_tlb(tlb->size);
2752 if (size < 0 || size > 0x7)
2753 size = 1;
2754 ret |= size << 7;
2755 env->spr[SPR_40x_PID] = tlb->PID;
2756 return ret;
2757 }
2758
2759 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
2760 {
2761 ppcemb_tlb_t *tlb;
2762 target_ulong ret;
2763
2764 entry &= 0x3F;
2765 tlb = &env->tlb[entry].tlbe;
2766 ret = tlb->RPN;
2767 if (tlb->prot & PAGE_EXEC)
2768 ret |= 0x200;
2769 if (tlb->prot & PAGE_WRITE)
2770 ret |= 0x100;
2771 return ret;
2772 }
2773
2774 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
2775 {
2776 ppcemb_tlb_t *tlb;
2777 target_ulong page, end;
2778
2779 #if defined (DEBUG_SOFTWARE_TLB)
2780 if (loglevel != 0) {
2781 fprintf(logfile, "%s entry " TDX " val " TDX "\n", __func__, entry, val);
2782 }
2783 #endif
2784 entry &= 0x3F;
2785 tlb = &env->tlb[entry].tlbe;
2786 /* Invalidate previous TLB (if it's valid) */
2787 if (tlb->prot & PAGE_VALID) {
2788 end = tlb->EPN + tlb->size;
2789 #if defined (DEBUG_SOFTWARE_TLB)
2790 if (loglevel != 0) {
2791 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2792 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
2793 }
2794 #endif
2795 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2796 tlb_flush_page(env, page);
2797 }
2798 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
2799 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2800 * If this ever occurs, one should use the ppcemb target instead
2801 * of the ppc or ppc64 one
2802 */
2803 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2804 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2805 "are not supported (%d)\n",
2806 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
2807 }
2808 tlb->EPN = val & ~(tlb->size - 1);
2809 if (val & 0x40)
2810 tlb->prot |= PAGE_VALID;
2811 else
2812 tlb->prot &= ~PAGE_VALID;
2813 if (val & 0x20) {
2814 /* XXX: TO BE FIXED */
2815 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2816 }
2817 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2818 tlb->attr = val & 0xFF;
2819 #if defined (DEBUG_SOFTWARE_TLB)
2820 if (loglevel != 0) {
2821 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2822 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2823 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2824 tlb->prot & PAGE_READ ? 'r' : '-',
2825 tlb->prot & PAGE_WRITE ? 'w' : '-',
2826 tlb->prot & PAGE_EXEC ? 'x' : '-',
2827 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2828 }
2829 #endif
2830 /* Invalidate new TLB (if valid) */
2831 if (tlb->prot & PAGE_VALID) {
2832 end = tlb->EPN + tlb->size;
2833 #if defined (DEBUG_SOFTWARE_TLB)
2834 if (loglevel != 0) {
2835 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2836 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2837 }
2838 #endif
2839 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2840 tlb_flush_page(env, page);
2841 }
2842 }
2843
2844 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
2845 {
2846 ppcemb_tlb_t *tlb;
2847
2848 #if defined (DEBUG_SOFTWARE_TLB)
2849 if (loglevel != 0) {
2850 fprintf(logfile, "%s entry " TDX " val " TDX "\n", __func__, entry, val);
2851 }
2852 #endif
2853 entry &= 0x3F;
2854 tlb = &env->tlb[entry].tlbe;
2855 tlb->RPN = val & 0xFFFFFC00;
2856 tlb->prot = PAGE_READ;
2857 if (val & 0x200)
2858 tlb->prot |= PAGE_EXEC;
2859 if (val & 0x100)
2860 tlb->prot |= PAGE_WRITE;
2861 #if defined (DEBUG_SOFTWARE_TLB)
2862 if (loglevel != 0) {
2863 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2864 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2865 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
2866 tlb->prot & PAGE_READ ? 'r' : '-',
2867 tlb->prot & PAGE_WRITE ? 'w' : '-',
2868 tlb->prot & PAGE_EXEC ? 'x' : '-',
2869 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2870 }
2871 #endif
2872 }
2873
2874 target_ulong helper_4xx_tlbsx (target_ulong address)
2875 {
2876 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
2877 }
2878
2879 /* PowerPC 440 TLB management */
2880 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
2881 {
2882 ppcemb_tlb_t *tlb;
2883 target_ulong EPN, RPN, size;
2884 int do_flush_tlbs;
2885
2886 #if defined (DEBUG_SOFTWARE_TLB)
2887 if (loglevel != 0) {
2888 fprintf(logfile, "%s word %d entry " TDX " value " TDX "\n",
2889 __func__, word, entry, value);
2890 }
2891 #endif
2892 do_flush_tlbs = 0;
2893 entry &= 0x3F;
2894 tlb = &env->tlb[entry].tlbe;
2895 switch (word) {
2896 default:
2897 /* Just here to please gcc */
2898 case 0:
2899 EPN = value & 0xFFFFFC00;
2900 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2901 do_flush_tlbs = 1;
2902 tlb->EPN = EPN;
2903 size = booke_tlb_to_page_size((value >> 4) & 0xF);
2904 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2905 do_flush_tlbs = 1;
2906 tlb->size = size;
2907 tlb->attr &= ~0x1;
2908 tlb->attr |= (value >> 8) & 1;
2909 if (value & 0x200) {
2910 tlb->prot |= PAGE_VALID;
2911 } else {
2912 if (tlb->prot & PAGE_VALID) {
2913 tlb->prot &= ~PAGE_VALID;
2914 do_flush_tlbs = 1;
2915 }
2916 }
2917 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2918 if (do_flush_tlbs)
2919 tlb_flush(env, 1);
2920 break;
2921 case 1:
2922 RPN = value & 0xFFFFFC0F;
2923 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2924 tlb_flush(env, 1);
2925 tlb->RPN = RPN;
2926 break;
2927 case 2:
2928 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
2929 tlb->prot = tlb->prot & PAGE_VALID;
2930 if (value & 0x1)
2931 tlb->prot |= PAGE_READ << 4;
2932 if (value & 0x2)
2933 tlb->prot |= PAGE_WRITE << 4;
2934 if (value & 0x4)
2935 tlb->prot |= PAGE_EXEC << 4;
2936 if (value & 0x8)
2937 tlb->prot |= PAGE_READ;
2938 if (value & 0x10)
2939 tlb->prot |= PAGE_WRITE;
2940 if (value & 0x20)
2941 tlb->prot |= PAGE_EXEC;
2942 break;
2943 }
2944 }
2945
2946 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
2947 {
2948 ppcemb_tlb_t *tlb;
2949 target_ulong ret;
2950 int size;
2951
2952 entry &= 0x3F;
2953 tlb = &env->tlb[entry].tlbe;
2954 switch (word) {
2955 default:
2956 /* Just here to please gcc */
2957 case 0:
2958 ret = tlb->EPN;
2959 size = booke_page_size_to_tlb(tlb->size);
2960 if (size < 0 || size > 0xF)
2961 size = 1;
2962 ret |= size << 4;
2963 if (tlb->attr & 0x1)
2964 ret |= 0x100;
2965 if (tlb->prot & PAGE_VALID)
2966 ret |= 0x200;
2967 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2968 env->spr[SPR_440_MMUCR] |= tlb->PID;
2969 break;
2970 case 1:
2971 ret = tlb->RPN;
2972 break;
2973 case 2:
2974 ret = tlb->attr & ~0x1;
2975 if (tlb->prot & (PAGE_READ << 4))
2976 ret |= 0x1;
2977 if (tlb->prot & (PAGE_WRITE << 4))
2978 ret |= 0x2;
2979 if (tlb->prot & (PAGE_EXEC << 4))
2980 ret |= 0x4;
2981 if (tlb->prot & PAGE_READ)
2982 ret |= 0x8;
2983 if (tlb->prot & PAGE_WRITE)
2984 ret |= 0x10;
2985 if (tlb->prot & PAGE_EXEC)
2986 ret |= 0x20;
2987 break;
2988 }
2989 return ret;
2990 }
2991
2992 target_ulong helper_440_tlbsx (target_ulong address)
2993 {
2994 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
2995 }
2996
2997 #endif /* !CONFIG_USER_ONLY */