]> git.proxmox.com Git - qemu.git/blob - target-ppc/op_helper.c
Add vupk{h,l}px instructions.
[qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
19 */
20 #include <string.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helper.h"
24
25 #include "helper_regs.h"
26
27 //#define DEBUG_OP
28 //#define DEBUG_EXCEPTIONS
29 //#define DEBUG_SOFTWARE_TLB
30
31 /*****************************************************************************/
32 /* Exceptions processing helpers */
33
34 void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
35 {
36 #if 0
37 printf("Raise exception %3x code : %d\n", exception, error_code);
38 #endif
39 env->exception_index = exception;
40 env->error_code = error_code;
41 cpu_loop_exit();
42 }
43
44 void helper_raise_exception (uint32_t exception)
45 {
46 helper_raise_exception_err(exception, 0);
47 }
48
49 /*****************************************************************************/
50 /* Registers load and stores */
51 target_ulong helper_load_cr (void)
52 {
53 return (env->crf[0] << 28) |
54 (env->crf[1] << 24) |
55 (env->crf[2] << 20) |
56 (env->crf[3] << 16) |
57 (env->crf[4] << 12) |
58 (env->crf[5] << 8) |
59 (env->crf[6] << 4) |
60 (env->crf[7] << 0);
61 }
62
63 void helper_store_cr (target_ulong val, uint32_t mask)
64 {
65 int i, sh;
66
67 for (i = 0, sh = 7; i < 8; i++, sh--) {
68 if (mask & (1 << sh))
69 env->crf[i] = (val >> (sh * 4)) & 0xFUL;
70 }
71 }
72
73 /*****************************************************************************/
74 /* SPR accesses */
75 void helper_load_dump_spr (uint32_t sprn)
76 {
77 if (loglevel != 0) {
78 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
79 sprn, sprn, env->spr[sprn]);
80 }
81 }
82
83 void helper_store_dump_spr (uint32_t sprn)
84 {
85 if (loglevel != 0) {
86 fprintf(logfile, "Write SPR %d %03x <= " ADDRX "\n",
87 sprn, sprn, env->spr[sprn]);
88 }
89 }
90
91 target_ulong helper_load_tbl (void)
92 {
93 return cpu_ppc_load_tbl(env);
94 }
95
96 target_ulong helper_load_tbu (void)
97 {
98 return cpu_ppc_load_tbu(env);
99 }
100
101 target_ulong helper_load_atbl (void)
102 {
103 return cpu_ppc_load_atbl(env);
104 }
105
106 target_ulong helper_load_atbu (void)
107 {
108 return cpu_ppc_load_atbu(env);
109 }
110
111 target_ulong helper_load_601_rtcl (void)
112 {
113 return cpu_ppc601_load_rtcl(env);
114 }
115
116 target_ulong helper_load_601_rtcu (void)
117 {
118 return cpu_ppc601_load_rtcu(env);
119 }
120
121 #if !defined(CONFIG_USER_ONLY)
122 #if defined (TARGET_PPC64)
123 void helper_store_asr (target_ulong val)
124 {
125 ppc_store_asr(env, val);
126 }
127 #endif
128
129 void helper_store_sdr1 (target_ulong val)
130 {
131 ppc_store_sdr1(env, val);
132 }
133
134 void helper_store_tbl (target_ulong val)
135 {
136 cpu_ppc_store_tbl(env, val);
137 }
138
139 void helper_store_tbu (target_ulong val)
140 {
141 cpu_ppc_store_tbu(env, val);
142 }
143
144 void helper_store_atbl (target_ulong val)
145 {
146 cpu_ppc_store_atbl(env, val);
147 }
148
149 void helper_store_atbu (target_ulong val)
150 {
151 cpu_ppc_store_atbu(env, val);
152 }
153
154 void helper_store_601_rtcl (target_ulong val)
155 {
156 cpu_ppc601_store_rtcl(env, val);
157 }
158
159 void helper_store_601_rtcu (target_ulong val)
160 {
161 cpu_ppc601_store_rtcu(env, val);
162 }
163
164 target_ulong helper_load_decr (void)
165 {
166 return cpu_ppc_load_decr(env);
167 }
168
169 void helper_store_decr (target_ulong val)
170 {
171 cpu_ppc_store_decr(env, val);
172 }
173
174 void helper_store_hid0_601 (target_ulong val)
175 {
176 target_ulong hid0;
177
178 hid0 = env->spr[SPR_HID0];
179 if ((val ^ hid0) & 0x00000008) {
180 /* Change current endianness */
181 env->hflags &= ~(1 << MSR_LE);
182 env->hflags_nmsr &= ~(1 << MSR_LE);
183 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
184 env->hflags |= env->hflags_nmsr;
185 if (loglevel != 0) {
186 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
187 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
188 }
189 }
190 env->spr[SPR_HID0] = (uint32_t)val;
191 }
192
193 void helper_store_403_pbr (uint32_t num, target_ulong value)
194 {
195 if (likely(env->pb[num] != value)) {
196 env->pb[num] = value;
197 /* Should be optimized */
198 tlb_flush(env, 1);
199 }
200 }
201
202 target_ulong helper_load_40x_pit (void)
203 {
204 return load_40x_pit(env);
205 }
206
207 void helper_store_40x_pit (target_ulong val)
208 {
209 store_40x_pit(env, val);
210 }
211
212 void helper_store_40x_dbcr0 (target_ulong val)
213 {
214 store_40x_dbcr0(env, val);
215 }
216
217 void helper_store_40x_sler (target_ulong val)
218 {
219 store_40x_sler(env, val);
220 }
221
222 void helper_store_booke_tcr (target_ulong val)
223 {
224 store_booke_tcr(env, val);
225 }
226
227 void helper_store_booke_tsr (target_ulong val)
228 {
229 store_booke_tsr(env, val);
230 }
231
232 void helper_store_ibatu (uint32_t nr, target_ulong val)
233 {
234 ppc_store_ibatu(env, nr, val);
235 }
236
237 void helper_store_ibatl (uint32_t nr, target_ulong val)
238 {
239 ppc_store_ibatl(env, nr, val);
240 }
241
242 void helper_store_dbatu (uint32_t nr, target_ulong val)
243 {
244 ppc_store_dbatu(env, nr, val);
245 }
246
247 void helper_store_dbatl (uint32_t nr, target_ulong val)
248 {
249 ppc_store_dbatl(env, nr, val);
250 }
251
252 void helper_store_601_batl (uint32_t nr, target_ulong val)
253 {
254 ppc_store_ibatl_601(env, nr, val);
255 }
256
257 void helper_store_601_batu (uint32_t nr, target_ulong val)
258 {
259 ppc_store_ibatu_601(env, nr, val);
260 }
261 #endif
262
263 /*****************************************************************************/
264 /* Memory load and stores */
265
266 static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
267 {
268 #if defined(TARGET_PPC64)
269 if (!msr_sf)
270 return (uint32_t)(addr + arg);
271 else
272 #endif
273 return addr + arg;
274 }
275
276 void helper_lmw (target_ulong addr, uint32_t reg)
277 {
278 for (; reg < 32; reg++) {
279 if (msr_le)
280 env->gpr[reg] = bswap32(ldl(addr));
281 else
282 env->gpr[reg] = ldl(addr);
283 addr = addr_add(addr, 4);
284 }
285 }
286
287 void helper_stmw (target_ulong addr, uint32_t reg)
288 {
289 for (; reg < 32; reg++) {
290 if (msr_le)
291 stl(addr, bswap32((uint32_t)env->gpr[reg]));
292 else
293 stl(addr, (uint32_t)env->gpr[reg]);
294 addr = addr_add(addr, 4);
295 }
296 }
297
298 void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
299 {
300 int sh;
301 for (; nb > 3; nb -= 4) {
302 env->gpr[reg] = ldl(addr);
303 reg = (reg + 1) % 32;
304 addr = addr_add(addr, 4);
305 }
306 if (unlikely(nb > 0)) {
307 env->gpr[reg] = 0;
308 for (sh = 24; nb > 0; nb--, sh -= 8) {
309 env->gpr[reg] |= ldub(addr) << sh;
310 addr = addr_add(addr, 1);
311 }
312 }
313 }
314 /* PPC32 specification says we must generate an exception if
315 * rA is in the range of registers to be loaded.
316 * In an other hand, IBM says this is valid, but rA won't be loaded.
317 * For now, I'll follow the spec...
318 */
319 void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
320 {
321 if (likely(xer_bc != 0)) {
322 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
323 (reg < rb && (reg + xer_bc) > rb))) {
324 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
325 POWERPC_EXCP_INVAL |
326 POWERPC_EXCP_INVAL_LSWX);
327 } else {
328 helper_lsw(addr, xer_bc, reg);
329 }
330 }
331 }
332
333 void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
334 {
335 int sh;
336 for (; nb > 3; nb -= 4) {
337 stl(addr, env->gpr[reg]);
338 reg = (reg + 1) % 32;
339 addr = addr_add(addr, 4);
340 }
341 if (unlikely(nb > 0)) {
342 for (sh = 24; nb > 0; nb--, sh -= 8) {
343 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
344 addr = addr_add(addr, 1);
345 }
346 }
347 }
348
349 static void do_dcbz(target_ulong addr, int dcache_line_size)
350 {
351 addr &= ~(dcache_line_size - 1);
352 int i;
353 for (i = 0 ; i < dcache_line_size ; i += 4) {
354 stl(addr + i , 0);
355 }
356 if (env->reserve == addr)
357 env->reserve = (target_ulong)-1ULL;
358 }
359
360 void helper_dcbz(target_ulong addr)
361 {
362 do_dcbz(addr, env->dcache_line_size);
363 }
364
365 void helper_dcbz_970(target_ulong addr)
366 {
367 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
368 do_dcbz(addr, 32);
369 else
370 do_dcbz(addr, env->dcache_line_size);
371 }
372
373 void helper_icbi(target_ulong addr)
374 {
375 uint32_t tmp;
376
377 addr &= ~(env->dcache_line_size - 1);
378 /* Invalidate one cache line :
379 * PowerPC specification says this is to be treated like a load
380 * (not a fetch) by the MMU. To be sure it will be so,
381 * do the load "by hand".
382 */
383 tmp = ldl(addr);
384 tb_invalidate_page_range(addr, addr + env->icache_line_size);
385 }
386
387 // XXX: to be tested
388 target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
389 {
390 int i, c, d;
391 d = 24;
392 for (i = 0; i < xer_bc; i++) {
393 c = ldub(addr);
394 addr = addr_add(addr, 1);
395 /* ra (if not 0) and rb are never modified */
396 if (likely(reg != rb && (ra == 0 || reg != ra))) {
397 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
398 }
399 if (unlikely(c == xer_cmp))
400 break;
401 if (likely(d != 0)) {
402 d -= 8;
403 } else {
404 d = 24;
405 reg++;
406 reg = reg & 0x1F;
407 }
408 }
409 return i;
410 }
411
412 /*****************************************************************************/
413 /* Fixed point operations helpers */
414 #if defined(TARGET_PPC64)
415
416 /* multiply high word */
417 uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
418 {
419 uint64_t tl, th;
420
421 muls64(&tl, &th, arg1, arg2);
422 return th;
423 }
424
425 /* multiply high word unsigned */
426 uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
427 {
428 uint64_t tl, th;
429
430 mulu64(&tl, &th, arg1, arg2);
431 return th;
432 }
433
434 uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
435 {
436 int64_t th;
437 uint64_t tl;
438
439 muls64(&tl, (uint64_t *)&th, arg1, arg2);
440 /* If th != 0 && th != -1, then we had an overflow */
441 if (likely((uint64_t)(th + 1) <= 1)) {
442 env->xer &= ~(1 << XER_OV);
443 } else {
444 env->xer |= (1 << XER_OV) | (1 << XER_SO);
445 }
446 return (int64_t)tl;
447 }
448 #endif
449
450 target_ulong helper_cntlzw (target_ulong t)
451 {
452 return clz32(t);
453 }
454
455 #if defined(TARGET_PPC64)
456 target_ulong helper_cntlzd (target_ulong t)
457 {
458 return clz64(t);
459 }
460 #endif
461
462 /* shift right arithmetic helper */
463 target_ulong helper_sraw (target_ulong value, target_ulong shift)
464 {
465 int32_t ret;
466
467 if (likely(!(shift & 0x20))) {
468 if (likely((uint32_t)shift != 0)) {
469 shift &= 0x1f;
470 ret = (int32_t)value >> shift;
471 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
472 env->xer &= ~(1 << XER_CA);
473 } else {
474 env->xer |= (1 << XER_CA);
475 }
476 } else {
477 ret = (int32_t)value;
478 env->xer &= ~(1 << XER_CA);
479 }
480 } else {
481 ret = (int32_t)value >> 31;
482 if (ret) {
483 env->xer |= (1 << XER_CA);
484 } else {
485 env->xer &= ~(1 << XER_CA);
486 }
487 }
488 return (target_long)ret;
489 }
490
491 #if defined(TARGET_PPC64)
492 target_ulong helper_srad (target_ulong value, target_ulong shift)
493 {
494 int64_t ret;
495
496 if (likely(!(shift & 0x40))) {
497 if (likely((uint64_t)shift != 0)) {
498 shift &= 0x3f;
499 ret = (int64_t)value >> shift;
500 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
501 env->xer &= ~(1 << XER_CA);
502 } else {
503 env->xer |= (1 << XER_CA);
504 }
505 } else {
506 ret = (int64_t)value;
507 env->xer &= ~(1 << XER_CA);
508 }
509 } else {
510 ret = (int64_t)value >> 63;
511 if (ret) {
512 env->xer |= (1 << XER_CA);
513 } else {
514 env->xer &= ~(1 << XER_CA);
515 }
516 }
517 return ret;
518 }
519 #endif
520
521 target_ulong helper_popcntb (target_ulong val)
522 {
523 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
524 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
525 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
526 return val;
527 }
528
529 #if defined(TARGET_PPC64)
530 target_ulong helper_popcntb_64 (target_ulong val)
531 {
532 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
533 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
534 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
535 return val;
536 }
537 #endif
538
539 /*****************************************************************************/
540 /* Floating point operations helpers */
541 uint64_t helper_float32_to_float64(uint32_t arg)
542 {
543 CPU_FloatU f;
544 CPU_DoubleU d;
545 f.l = arg;
546 d.d = float32_to_float64(f.f, &env->fp_status);
547 return d.ll;
548 }
549
550 uint32_t helper_float64_to_float32(uint64_t arg)
551 {
552 CPU_FloatU f;
553 CPU_DoubleU d;
554 d.ll = arg;
555 f.f = float64_to_float32(d.d, &env->fp_status);
556 return f.l;
557 }
558
559 static always_inline int isden (float64 d)
560 {
561 CPU_DoubleU u;
562
563 u.d = d;
564
565 return ((u.ll >> 52) & 0x7FF) == 0;
566 }
567
568 uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
569 {
570 CPU_DoubleU farg;
571 int isneg;
572 int ret;
573 farg.ll = arg;
574 isneg = float64_is_neg(farg.d);
575 if (unlikely(float64_is_nan(farg.d))) {
576 if (float64_is_signaling_nan(farg.d)) {
577 /* Signaling NaN: flags are undefined */
578 ret = 0x00;
579 } else {
580 /* Quiet NaN */
581 ret = 0x11;
582 }
583 } else if (unlikely(float64_is_infinity(farg.d))) {
584 /* +/- infinity */
585 if (isneg)
586 ret = 0x09;
587 else
588 ret = 0x05;
589 } else {
590 if (float64_is_zero(farg.d)) {
591 /* +/- zero */
592 if (isneg)
593 ret = 0x12;
594 else
595 ret = 0x02;
596 } else {
597 if (isden(farg.d)) {
598 /* Denormalized numbers */
599 ret = 0x10;
600 } else {
601 /* Normalized numbers */
602 ret = 0x00;
603 }
604 if (isneg) {
605 ret |= 0x08;
606 } else {
607 ret |= 0x04;
608 }
609 }
610 }
611 if (set_fprf) {
612 /* We update FPSCR_FPRF */
613 env->fpscr &= ~(0x1F << FPSCR_FPRF);
614 env->fpscr |= ret << FPSCR_FPRF;
615 }
616 /* We just need fpcc to update Rc1 */
617 return ret & 0xF;
618 }
619
620 /* Floating-point invalid operations exception */
621 static always_inline uint64_t fload_invalid_op_excp (int op)
622 {
623 uint64_t ret = 0;
624 int ve;
625
626 ve = fpscr_ve;
627 switch (op) {
628 case POWERPC_EXCP_FP_VXSNAN:
629 env->fpscr |= 1 << FPSCR_VXSNAN;
630 break;
631 case POWERPC_EXCP_FP_VXSOFT:
632 env->fpscr |= 1 << FPSCR_VXSOFT;
633 break;
634 case POWERPC_EXCP_FP_VXISI:
635 /* Magnitude subtraction of infinities */
636 env->fpscr |= 1 << FPSCR_VXISI;
637 goto update_arith;
638 case POWERPC_EXCP_FP_VXIDI:
639 /* Division of infinity by infinity */
640 env->fpscr |= 1 << FPSCR_VXIDI;
641 goto update_arith;
642 case POWERPC_EXCP_FP_VXZDZ:
643 /* Division of zero by zero */
644 env->fpscr |= 1 << FPSCR_VXZDZ;
645 goto update_arith;
646 case POWERPC_EXCP_FP_VXIMZ:
647 /* Multiplication of zero by infinity */
648 env->fpscr |= 1 << FPSCR_VXIMZ;
649 goto update_arith;
650 case POWERPC_EXCP_FP_VXVC:
651 /* Ordered comparison of NaN */
652 env->fpscr |= 1 << FPSCR_VXVC;
653 env->fpscr &= ~(0xF << FPSCR_FPCC);
654 env->fpscr |= 0x11 << FPSCR_FPCC;
655 /* We must update the target FPR before raising the exception */
656 if (ve != 0) {
657 env->exception_index = POWERPC_EXCP_PROGRAM;
658 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
659 /* Update the floating-point enabled exception summary */
660 env->fpscr |= 1 << FPSCR_FEX;
661 /* Exception is differed */
662 ve = 0;
663 }
664 break;
665 case POWERPC_EXCP_FP_VXSQRT:
666 /* Square root of a negative number */
667 env->fpscr |= 1 << FPSCR_VXSQRT;
668 update_arith:
669 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
670 if (ve == 0) {
671 /* Set the result to quiet NaN */
672 ret = 0xFFF8000000000000ULL;
673 env->fpscr &= ~(0xF << FPSCR_FPCC);
674 env->fpscr |= 0x11 << FPSCR_FPCC;
675 }
676 break;
677 case POWERPC_EXCP_FP_VXCVI:
678 /* Invalid conversion */
679 env->fpscr |= 1 << FPSCR_VXCVI;
680 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
681 if (ve == 0) {
682 /* Set the result to quiet NaN */
683 ret = 0xFFF8000000000000ULL;
684 env->fpscr &= ~(0xF << FPSCR_FPCC);
685 env->fpscr |= 0x11 << FPSCR_FPCC;
686 }
687 break;
688 }
689 /* Update the floating-point invalid operation summary */
690 env->fpscr |= 1 << FPSCR_VX;
691 /* Update the floating-point exception summary */
692 env->fpscr |= 1 << FPSCR_FX;
693 if (ve != 0) {
694 /* Update the floating-point enabled exception summary */
695 env->fpscr |= 1 << FPSCR_FEX;
696 if (msr_fe0 != 0 || msr_fe1 != 0)
697 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
698 }
699 return ret;
700 }
701
702 static always_inline void float_zero_divide_excp (void)
703 {
704 env->fpscr |= 1 << FPSCR_ZX;
705 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
706 /* Update the floating-point exception summary */
707 env->fpscr |= 1 << FPSCR_FX;
708 if (fpscr_ze != 0) {
709 /* Update the floating-point enabled exception summary */
710 env->fpscr |= 1 << FPSCR_FEX;
711 if (msr_fe0 != 0 || msr_fe1 != 0) {
712 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
713 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
714 }
715 }
716 }
717
718 static always_inline void float_overflow_excp (void)
719 {
720 env->fpscr |= 1 << FPSCR_OX;
721 /* Update the floating-point exception summary */
722 env->fpscr |= 1 << FPSCR_FX;
723 if (fpscr_oe != 0) {
724 /* XXX: should adjust the result */
725 /* Update the floating-point enabled exception summary */
726 env->fpscr |= 1 << FPSCR_FEX;
727 /* We must update the target FPR before raising the exception */
728 env->exception_index = POWERPC_EXCP_PROGRAM;
729 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
730 } else {
731 env->fpscr |= 1 << FPSCR_XX;
732 env->fpscr |= 1 << FPSCR_FI;
733 }
734 }
735
736 static always_inline void float_underflow_excp (void)
737 {
738 env->fpscr |= 1 << FPSCR_UX;
739 /* Update the floating-point exception summary */
740 env->fpscr |= 1 << FPSCR_FX;
741 if (fpscr_ue != 0) {
742 /* XXX: should adjust the result */
743 /* Update the floating-point enabled exception summary */
744 env->fpscr |= 1 << FPSCR_FEX;
745 /* We must update the target FPR before raising the exception */
746 env->exception_index = POWERPC_EXCP_PROGRAM;
747 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
748 }
749 }
750
751 static always_inline void float_inexact_excp (void)
752 {
753 env->fpscr |= 1 << FPSCR_XX;
754 /* Update the floating-point exception summary */
755 env->fpscr |= 1 << FPSCR_FX;
756 if (fpscr_xe != 0) {
757 /* Update the floating-point enabled exception summary */
758 env->fpscr |= 1 << FPSCR_FEX;
759 /* We must update the target FPR before raising the exception */
760 env->exception_index = POWERPC_EXCP_PROGRAM;
761 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
762 }
763 }
764
765 static always_inline void fpscr_set_rounding_mode (void)
766 {
767 int rnd_type;
768
769 /* Set rounding mode */
770 switch (fpscr_rn) {
771 case 0:
772 /* Best approximation (round to nearest) */
773 rnd_type = float_round_nearest_even;
774 break;
775 case 1:
776 /* Smaller magnitude (round toward zero) */
777 rnd_type = float_round_to_zero;
778 break;
779 case 2:
780 /* Round toward +infinite */
781 rnd_type = float_round_up;
782 break;
783 default:
784 case 3:
785 /* Round toward -infinite */
786 rnd_type = float_round_down;
787 break;
788 }
789 set_float_rounding_mode(rnd_type, &env->fp_status);
790 }
791
792 void helper_fpscr_clrbit (uint32_t bit)
793 {
794 int prev;
795
796 prev = (env->fpscr >> bit) & 1;
797 env->fpscr &= ~(1 << bit);
798 if (prev == 1) {
799 switch (bit) {
800 case FPSCR_RN1:
801 case FPSCR_RN:
802 fpscr_set_rounding_mode();
803 break;
804 default:
805 break;
806 }
807 }
808 }
809
810 void helper_fpscr_setbit (uint32_t bit)
811 {
812 int prev;
813
814 prev = (env->fpscr >> bit) & 1;
815 env->fpscr |= 1 << bit;
816 if (prev == 0) {
817 switch (bit) {
818 case FPSCR_VX:
819 env->fpscr |= 1 << FPSCR_FX;
820 if (fpscr_ve)
821 goto raise_ve;
822 case FPSCR_OX:
823 env->fpscr |= 1 << FPSCR_FX;
824 if (fpscr_oe)
825 goto raise_oe;
826 break;
827 case FPSCR_UX:
828 env->fpscr |= 1 << FPSCR_FX;
829 if (fpscr_ue)
830 goto raise_ue;
831 break;
832 case FPSCR_ZX:
833 env->fpscr |= 1 << FPSCR_FX;
834 if (fpscr_ze)
835 goto raise_ze;
836 break;
837 case FPSCR_XX:
838 env->fpscr |= 1 << FPSCR_FX;
839 if (fpscr_xe)
840 goto raise_xe;
841 break;
842 case FPSCR_VXSNAN:
843 case FPSCR_VXISI:
844 case FPSCR_VXIDI:
845 case FPSCR_VXZDZ:
846 case FPSCR_VXIMZ:
847 case FPSCR_VXVC:
848 case FPSCR_VXSOFT:
849 case FPSCR_VXSQRT:
850 case FPSCR_VXCVI:
851 env->fpscr |= 1 << FPSCR_VX;
852 env->fpscr |= 1 << FPSCR_FX;
853 if (fpscr_ve != 0)
854 goto raise_ve;
855 break;
856 case FPSCR_VE:
857 if (fpscr_vx != 0) {
858 raise_ve:
859 env->error_code = POWERPC_EXCP_FP;
860 if (fpscr_vxsnan)
861 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
862 if (fpscr_vxisi)
863 env->error_code |= POWERPC_EXCP_FP_VXISI;
864 if (fpscr_vxidi)
865 env->error_code |= POWERPC_EXCP_FP_VXIDI;
866 if (fpscr_vxzdz)
867 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
868 if (fpscr_vximz)
869 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
870 if (fpscr_vxvc)
871 env->error_code |= POWERPC_EXCP_FP_VXVC;
872 if (fpscr_vxsoft)
873 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
874 if (fpscr_vxsqrt)
875 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
876 if (fpscr_vxcvi)
877 env->error_code |= POWERPC_EXCP_FP_VXCVI;
878 goto raise_excp;
879 }
880 break;
881 case FPSCR_OE:
882 if (fpscr_ox != 0) {
883 raise_oe:
884 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
885 goto raise_excp;
886 }
887 break;
888 case FPSCR_UE:
889 if (fpscr_ux != 0) {
890 raise_ue:
891 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
892 goto raise_excp;
893 }
894 break;
895 case FPSCR_ZE:
896 if (fpscr_zx != 0) {
897 raise_ze:
898 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
899 goto raise_excp;
900 }
901 break;
902 case FPSCR_XE:
903 if (fpscr_xx != 0) {
904 raise_xe:
905 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
906 goto raise_excp;
907 }
908 break;
909 case FPSCR_RN1:
910 case FPSCR_RN:
911 fpscr_set_rounding_mode();
912 break;
913 default:
914 break;
915 raise_excp:
916 /* Update the floating-point enabled exception summary */
917 env->fpscr |= 1 << FPSCR_FEX;
918 /* We have to update Rc1 before raising the exception */
919 env->exception_index = POWERPC_EXCP_PROGRAM;
920 break;
921 }
922 }
923 }
924
925 void helper_store_fpscr (uint64_t arg, uint32_t mask)
926 {
927 /*
928 * We use only the 32 LSB of the incoming fpr
929 */
930 uint32_t prev, new;
931 int i;
932
933 prev = env->fpscr;
934 new = (uint32_t)arg;
935 new &= ~0x60000000;
936 new |= prev & 0x60000000;
937 for (i = 0; i < 8; i++) {
938 if (mask & (1 << i)) {
939 env->fpscr &= ~(0xF << (4 * i));
940 env->fpscr |= new & (0xF << (4 * i));
941 }
942 }
943 /* Update VX and FEX */
944 if (fpscr_ix != 0)
945 env->fpscr |= 1 << FPSCR_VX;
946 else
947 env->fpscr &= ~(1 << FPSCR_VX);
948 if ((fpscr_ex & fpscr_eex) != 0) {
949 env->fpscr |= 1 << FPSCR_FEX;
950 env->exception_index = POWERPC_EXCP_PROGRAM;
951 /* XXX: we should compute it properly */
952 env->error_code = POWERPC_EXCP_FP;
953 }
954 else
955 env->fpscr &= ~(1 << FPSCR_FEX);
956 fpscr_set_rounding_mode();
957 }
958
959 void helper_float_check_status (void)
960 {
961 #ifdef CONFIG_SOFTFLOAT
962 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
963 (env->error_code & POWERPC_EXCP_FP)) {
964 /* Differred floating-point exception after target FPR update */
965 if (msr_fe0 != 0 || msr_fe1 != 0)
966 helper_raise_exception_err(env->exception_index, env->error_code);
967 } else {
968 int status = get_float_exception_flags(&env->fp_status);
969 if (status & float_flag_divbyzero) {
970 float_zero_divide_excp();
971 } else if (status & float_flag_overflow) {
972 float_overflow_excp();
973 } else if (status & float_flag_underflow) {
974 float_underflow_excp();
975 } else if (status & float_flag_inexact) {
976 float_inexact_excp();
977 }
978 }
979 #else
980 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
981 (env->error_code & POWERPC_EXCP_FP)) {
982 /* Differred floating-point exception after target FPR update */
983 if (msr_fe0 != 0 || msr_fe1 != 0)
984 helper_raise_exception_err(env->exception_index, env->error_code);
985 }
986 #endif
987 }
988
989 #ifdef CONFIG_SOFTFLOAT
990 void helper_reset_fpstatus (void)
991 {
992 set_float_exception_flags(0, &env->fp_status);
993 }
994 #endif
995
996 /* fadd - fadd. */
997 uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
998 {
999 CPU_DoubleU farg1, farg2;
1000
1001 farg1.ll = arg1;
1002 farg2.ll = arg2;
1003 #if USE_PRECISE_EMULATION
1004 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1005 float64_is_signaling_nan(farg2.d))) {
1006 /* sNaN addition */
1007 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1008 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1009 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
1010 /* Magnitude subtraction of infinities */
1011 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1012 } else {
1013 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1014 }
1015 #else
1016 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
1017 #endif
1018 return farg1.ll;
1019 }
1020
1021 /* fsub - fsub. */
1022 uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1023 {
1024 CPU_DoubleU farg1, farg2;
1025
1026 farg1.ll = arg1;
1027 farg2.ll = arg2;
1028 #if USE_PRECISE_EMULATION
1029 {
1030 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1031 float64_is_signaling_nan(farg2.d))) {
1032 /* sNaN subtraction */
1033 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1034 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1035 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
1036 /* Magnitude subtraction of infinities */
1037 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1038 } else {
1039 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1040 }
1041 }
1042 #else
1043 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1044 #endif
1045 return farg1.ll;
1046 }
1047
1048 /* fmul - fmul. */
1049 uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
1050 {
1051 CPU_DoubleU farg1, farg2;
1052
1053 farg1.ll = arg1;
1054 farg2.ll = arg2;
1055 #if USE_PRECISE_EMULATION
1056 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1057 float64_is_signaling_nan(farg2.d))) {
1058 /* sNaN multiplication */
1059 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1060 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1061 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1062 /* Multiplication of zero by infinity */
1063 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1064 } else {
1065 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1066 }
1067 #else
1068 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1069 #endif
1070 return farg1.ll;
1071 }
1072
1073 /* fdiv - fdiv. */
1074 uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
1075 {
1076 CPU_DoubleU farg1, farg2;
1077
1078 farg1.ll = arg1;
1079 farg2.ll = arg2;
1080 #if USE_PRECISE_EMULATION
1081 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1082 float64_is_signaling_nan(farg2.d))) {
1083 /* sNaN division */
1084 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1085 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
1086 /* Division of infinity by infinity */
1087 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
1088 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1089 /* Division of zero by zero */
1090 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
1091 } else {
1092 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1093 }
1094 #else
1095 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1096 #endif
1097 return farg1.ll;
1098 }
1099
1100 /* fabs */
1101 uint64_t helper_fabs (uint64_t arg)
1102 {
1103 CPU_DoubleU farg;
1104
1105 farg.ll = arg;
1106 farg.d = float64_abs(farg.d);
1107 return farg.ll;
1108 }
1109
1110 /* fnabs */
1111 uint64_t helper_fnabs (uint64_t arg)
1112 {
1113 CPU_DoubleU farg;
1114
1115 farg.ll = arg;
1116 farg.d = float64_abs(farg.d);
1117 farg.d = float64_chs(farg.d);
1118 return farg.ll;
1119 }
1120
1121 /* fneg */
1122 uint64_t helper_fneg (uint64_t arg)
1123 {
1124 CPU_DoubleU farg;
1125
1126 farg.ll = arg;
1127 farg.d = float64_chs(farg.d);
1128 return farg.ll;
1129 }
1130
1131 /* fctiw - fctiw. */
1132 uint64_t helper_fctiw (uint64_t arg)
1133 {
1134 CPU_DoubleU farg;
1135 farg.ll = arg;
1136
1137 if (unlikely(float64_is_signaling_nan(farg.d))) {
1138 /* sNaN conversion */
1139 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1140 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1141 /* qNan / infinity conversion */
1142 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1143 } else {
1144 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1145 #if USE_PRECISE_EMULATION
1146 /* XXX: higher bits are not supposed to be significant.
1147 * to make tests easier, return the same as a real PowerPC 750
1148 */
1149 farg.ll |= 0xFFF80000ULL << 32;
1150 #endif
1151 }
1152 return farg.ll;
1153 }
1154
1155 /* fctiwz - fctiwz. */
1156 uint64_t helper_fctiwz (uint64_t arg)
1157 {
1158 CPU_DoubleU farg;
1159 farg.ll = arg;
1160
1161 if (unlikely(float64_is_signaling_nan(farg.d))) {
1162 /* sNaN conversion */
1163 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1164 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1165 /* qNan / infinity conversion */
1166 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1167 } else {
1168 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1169 #if USE_PRECISE_EMULATION
1170 /* XXX: higher bits are not supposed to be significant.
1171 * to make tests easier, return the same as a real PowerPC 750
1172 */
1173 farg.ll |= 0xFFF80000ULL << 32;
1174 #endif
1175 }
1176 return farg.ll;
1177 }
1178
1179 #if defined(TARGET_PPC64)
1180 /* fcfid - fcfid. */
1181 uint64_t helper_fcfid (uint64_t arg)
1182 {
1183 CPU_DoubleU farg;
1184 farg.d = int64_to_float64(arg, &env->fp_status);
1185 return farg.ll;
1186 }
1187
1188 /* fctid - fctid. */
1189 uint64_t helper_fctid (uint64_t arg)
1190 {
1191 CPU_DoubleU farg;
1192 farg.ll = arg;
1193
1194 if (unlikely(float64_is_signaling_nan(farg.d))) {
1195 /* sNaN conversion */
1196 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1197 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1198 /* qNan / infinity conversion */
1199 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1200 } else {
1201 farg.ll = float64_to_int64(farg.d, &env->fp_status);
1202 }
1203 return farg.ll;
1204 }
1205
1206 /* fctidz - fctidz. */
1207 uint64_t helper_fctidz (uint64_t arg)
1208 {
1209 CPU_DoubleU farg;
1210 farg.ll = arg;
1211
1212 if (unlikely(float64_is_signaling_nan(farg.d))) {
1213 /* sNaN conversion */
1214 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1215 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1216 /* qNan / infinity conversion */
1217 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1218 } else {
1219 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
1220 }
1221 return farg.ll;
1222 }
1223
1224 #endif
1225
1226 static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
1227 {
1228 CPU_DoubleU farg;
1229 farg.ll = arg;
1230
1231 if (unlikely(float64_is_signaling_nan(farg.d))) {
1232 /* sNaN round */
1233 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1234 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
1235 /* qNan / infinity round */
1236 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1237 } else {
1238 set_float_rounding_mode(rounding_mode, &env->fp_status);
1239 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
1240 /* Restore rounding mode from FPSCR */
1241 fpscr_set_rounding_mode();
1242 }
1243 return farg.ll;
1244 }
1245
1246 uint64_t helper_frin (uint64_t arg)
1247 {
1248 return do_fri(arg, float_round_nearest_even);
1249 }
1250
1251 uint64_t helper_friz (uint64_t arg)
1252 {
1253 return do_fri(arg, float_round_to_zero);
1254 }
1255
1256 uint64_t helper_frip (uint64_t arg)
1257 {
1258 return do_fri(arg, float_round_up);
1259 }
1260
1261 uint64_t helper_frim (uint64_t arg)
1262 {
1263 return do_fri(arg, float_round_down);
1264 }
1265
1266 /* fmadd - fmadd. */
1267 uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1268 {
1269 CPU_DoubleU farg1, farg2, farg3;
1270
1271 farg1.ll = arg1;
1272 farg2.ll = arg2;
1273 farg3.ll = arg3;
1274 #if USE_PRECISE_EMULATION
1275 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1276 float64_is_signaling_nan(farg2.d) ||
1277 float64_is_signaling_nan(farg3.d))) {
1278 /* sNaN operation */
1279 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1280 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1281 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1282 /* Multiplication of zero by infinity */
1283 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1284 } else {
1285 #ifdef FLOAT128
1286 /* This is the way the PowerPC specification defines it */
1287 float128 ft0_128, ft1_128;
1288
1289 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1290 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1291 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1292 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1293 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1294 /* Magnitude subtraction of infinities */
1295 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1296 } else {
1297 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1298 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1299 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1300 }
1301 #else
1302 /* This is OK on x86 hosts */
1303 farg1.d = (farg1.d * farg2.d) + farg3.d;
1304 #endif
1305 }
1306 #else
1307 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1308 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1309 #endif
1310 return farg1.ll;
1311 }
1312
1313 /* fmsub - fmsub. */
1314 uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1315 {
1316 CPU_DoubleU farg1, farg2, farg3;
1317
1318 farg1.ll = arg1;
1319 farg2.ll = arg2;
1320 farg3.ll = arg3;
1321 #if USE_PRECISE_EMULATION
1322 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1323 float64_is_signaling_nan(farg2.d) ||
1324 float64_is_signaling_nan(farg3.d))) {
1325 /* sNaN operation */
1326 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1327 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1328 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1329 /* Multiplication of zero by infinity */
1330 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1331 } else {
1332 #ifdef FLOAT128
1333 /* This is the way the PowerPC specification defines it */
1334 float128 ft0_128, ft1_128;
1335
1336 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1337 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1338 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1339 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1340 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1341 /* Magnitude subtraction of infinities */
1342 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1343 } else {
1344 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1345 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1346 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1347 }
1348 #else
1349 /* This is OK on x86 hosts */
1350 farg1.d = (farg1.d * farg2.d) - farg3.d;
1351 #endif
1352 }
1353 #else
1354 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1355 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1356 #endif
1357 return farg1.ll;
1358 }
1359
1360 /* fnmadd - fnmadd. */
1361 uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1362 {
1363 CPU_DoubleU farg1, farg2, farg3;
1364
1365 farg1.ll = arg1;
1366 farg2.ll = arg2;
1367 farg3.ll = arg3;
1368
1369 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1370 float64_is_signaling_nan(farg2.d) ||
1371 float64_is_signaling_nan(farg3.d))) {
1372 /* sNaN operation */
1373 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1374 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1375 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1376 /* Multiplication of zero by infinity */
1377 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1378 } else {
1379 #if USE_PRECISE_EMULATION
1380 #ifdef FLOAT128
1381 /* This is the way the PowerPC specification defines it */
1382 float128 ft0_128, ft1_128;
1383
1384 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1385 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1386 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1387 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1388 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1389 /* Magnitude subtraction of infinities */
1390 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1391 } else {
1392 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1393 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1394 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1395 }
1396 #else
1397 /* This is OK on x86 hosts */
1398 farg1.d = (farg1.d * farg2.d) + farg3.d;
1399 #endif
1400 #else
1401 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1402 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1403 #endif
1404 if (likely(!float64_is_nan(farg1.d)))
1405 farg1.d = float64_chs(farg1.d);
1406 }
1407 return farg1.ll;
1408 }
1409
1410 /* fnmsub - fnmsub. */
1411 uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1412 {
1413 CPU_DoubleU farg1, farg2, farg3;
1414
1415 farg1.ll = arg1;
1416 farg2.ll = arg2;
1417 farg3.ll = arg3;
1418
1419 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1420 float64_is_signaling_nan(farg2.d) ||
1421 float64_is_signaling_nan(farg3.d))) {
1422 /* sNaN operation */
1423 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1424 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1425 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1426 /* Multiplication of zero by infinity */
1427 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
1428 } else {
1429 #if USE_PRECISE_EMULATION
1430 #ifdef FLOAT128
1431 /* This is the way the PowerPC specification defines it */
1432 float128 ft0_128, ft1_128;
1433
1434 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1435 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
1436 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1437 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1438 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1439 /* Magnitude subtraction of infinities */
1440 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1441 } else {
1442 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1443 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1444 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1445 }
1446 #else
1447 /* This is OK on x86 hosts */
1448 farg1.d = (farg1.d * farg2.d) - farg3.d;
1449 #endif
1450 #else
1451 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1452 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1453 #endif
1454 if (likely(!float64_is_nan(farg1.d)))
1455 farg1.d = float64_chs(farg1.d);
1456 }
1457 return farg1.ll;
1458 }
1459
1460 /* frsp - frsp. */
1461 uint64_t helper_frsp (uint64_t arg)
1462 {
1463 CPU_DoubleU farg;
1464 float32 f32;
1465 farg.ll = arg;
1466
1467 #if USE_PRECISE_EMULATION
1468 if (unlikely(float64_is_signaling_nan(farg.d))) {
1469 /* sNaN square root */
1470 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1471 } else {
1472 f32 = float64_to_float32(farg.d, &env->fp_status);
1473 farg.d = float32_to_float64(f32, &env->fp_status);
1474 }
1475 #else
1476 f32 = float64_to_float32(farg.d, &env->fp_status);
1477 farg.d = float32_to_float64(f32, &env->fp_status);
1478 #endif
1479 return farg.ll;
1480 }
1481
1482 /* fsqrt - fsqrt. */
1483 uint64_t helper_fsqrt (uint64_t arg)
1484 {
1485 CPU_DoubleU farg;
1486 farg.ll = arg;
1487
1488 if (unlikely(float64_is_signaling_nan(farg.d))) {
1489 /* sNaN square root */
1490 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1491 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1492 /* Square root of a negative nonzero number */
1493 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1494 } else {
1495 farg.d = float64_sqrt(farg.d, &env->fp_status);
1496 }
1497 return farg.ll;
1498 }
1499
1500 /* fre - fre. */
1501 uint64_t helper_fre (uint64_t arg)
1502 {
1503 CPU_DoubleU fone, farg;
1504 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1505 farg.ll = arg;
1506
1507 if (unlikely(float64_is_signaling_nan(farg.d))) {
1508 /* sNaN reciprocal */
1509 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1510 } else {
1511 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1512 }
1513 return farg.d;
1514 }
1515
1516 /* fres - fres. */
1517 uint64_t helper_fres (uint64_t arg)
1518 {
1519 CPU_DoubleU fone, farg;
1520 float32 f32;
1521 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1522 farg.ll = arg;
1523
1524 if (unlikely(float64_is_signaling_nan(farg.d))) {
1525 /* sNaN reciprocal */
1526 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1527 } else {
1528 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1529 f32 = float64_to_float32(farg.d, &env->fp_status);
1530 farg.d = float32_to_float64(f32, &env->fp_status);
1531 }
1532 return farg.ll;
1533 }
1534
1535 /* frsqrte - frsqrte. */
1536 uint64_t helper_frsqrte (uint64_t arg)
1537 {
1538 CPU_DoubleU fone, farg;
1539 float32 f32;
1540 fone.ll = 0x3FF0000000000000ULL; /* 1.0 */
1541 farg.ll = arg;
1542
1543 if (unlikely(float64_is_signaling_nan(farg.d))) {
1544 /* sNaN reciprocal square root */
1545 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1546 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
1547 /* Reciprocal square root of a negative nonzero number */
1548 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1549 } else {
1550 farg.d = float64_sqrt(farg.d, &env->fp_status);
1551 farg.d = float64_div(fone.d, farg.d, &env->fp_status);
1552 f32 = float64_to_float32(farg.d, &env->fp_status);
1553 farg.d = float32_to_float64(f32, &env->fp_status);
1554 }
1555 return farg.ll;
1556 }
1557
1558 /* fsel - fsel. */
1559 uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
1560 {
1561 CPU_DoubleU farg1;
1562
1563 farg1.ll = arg1;
1564
1565 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
1566 return arg2;
1567 else
1568 return arg3;
1569 }
1570
1571 void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1572 {
1573 CPU_DoubleU farg1, farg2;
1574 uint32_t ret = 0;
1575 farg1.ll = arg1;
1576 farg2.ll = arg2;
1577
1578 if (unlikely(float64_is_nan(farg1.d) ||
1579 float64_is_nan(farg2.d))) {
1580 ret = 0x01UL;
1581 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1582 ret = 0x08UL;
1583 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1584 ret = 0x04UL;
1585 } else {
1586 ret = 0x02UL;
1587 }
1588
1589 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1590 env->fpscr |= ret << FPSCR_FPRF;
1591 env->crf[crfD] = ret;
1592 if (unlikely(ret == 0x01UL
1593 && (float64_is_signaling_nan(farg1.d) ||
1594 float64_is_signaling_nan(farg2.d)))) {
1595 /* sNaN comparison */
1596 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1597 }
1598 }
1599
1600 void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
1601 {
1602 CPU_DoubleU farg1, farg2;
1603 uint32_t ret = 0;
1604 farg1.ll = arg1;
1605 farg2.ll = arg2;
1606
1607 if (unlikely(float64_is_nan(farg1.d) ||
1608 float64_is_nan(farg2.d))) {
1609 ret = 0x01UL;
1610 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1611 ret = 0x08UL;
1612 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1613 ret = 0x04UL;
1614 } else {
1615 ret = 0x02UL;
1616 }
1617
1618 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1619 env->fpscr |= ret << FPSCR_FPRF;
1620 env->crf[crfD] = ret;
1621 if (unlikely (ret == 0x01UL)) {
1622 if (float64_is_signaling_nan(farg1.d) ||
1623 float64_is_signaling_nan(farg2.d)) {
1624 /* sNaN comparison */
1625 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1626 POWERPC_EXCP_FP_VXVC);
1627 } else {
1628 /* qNaN comparison */
1629 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1630 }
1631 }
1632 }
1633
1634 #if !defined (CONFIG_USER_ONLY)
1635 void helper_store_msr (target_ulong val)
1636 {
1637 val = hreg_store_msr(env, val, 0);
1638 if (val != 0) {
1639 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1640 helper_raise_exception(val);
1641 }
1642 }
1643
1644 static always_inline void do_rfi (target_ulong nip, target_ulong msr,
1645 target_ulong msrm, int keep_msrh)
1646 {
1647 #if defined(TARGET_PPC64)
1648 if (msr & (1ULL << MSR_SF)) {
1649 nip = (uint64_t)nip;
1650 msr &= (uint64_t)msrm;
1651 } else {
1652 nip = (uint32_t)nip;
1653 msr = (uint32_t)(msr & msrm);
1654 if (keep_msrh)
1655 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1656 }
1657 #else
1658 nip = (uint32_t)nip;
1659 msr &= (uint32_t)msrm;
1660 #endif
1661 /* XXX: beware: this is false if VLE is supported */
1662 env->nip = nip & ~((target_ulong)0x00000003);
1663 hreg_store_msr(env, msr, 1);
1664 #if defined (DEBUG_OP)
1665 cpu_dump_rfi(env->nip, env->msr);
1666 #endif
1667 /* No need to raise an exception here,
1668 * as rfi is always the last insn of a TB
1669 */
1670 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1671 }
1672
1673 void helper_rfi (void)
1674 {
1675 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1676 ~((target_ulong)0xFFFF0000), 1);
1677 }
1678
1679 #if defined(TARGET_PPC64)
1680 void helper_rfid (void)
1681 {
1682 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1683 ~((target_ulong)0xFFFF0000), 0);
1684 }
1685
1686 void helper_hrfid (void)
1687 {
1688 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1689 ~((target_ulong)0xFFFF0000), 0);
1690 }
1691 #endif
1692 #endif
1693
1694 void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
1695 {
1696 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1697 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1698 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1699 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1700 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
1701 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1702 }
1703 }
1704
1705 #if defined(TARGET_PPC64)
1706 void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
1707 {
1708 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1709 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1710 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1711 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1712 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
1713 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1714 }
1715 #endif
1716
1717 /*****************************************************************************/
1718 /* PowerPC 601 specific instructions (POWER bridge) */
1719
1720 target_ulong helper_clcs (uint32_t arg)
1721 {
1722 switch (arg) {
1723 case 0x0CUL:
1724 /* Instruction cache line size */
1725 return env->icache_line_size;
1726 break;
1727 case 0x0DUL:
1728 /* Data cache line size */
1729 return env->dcache_line_size;
1730 break;
1731 case 0x0EUL:
1732 /* Minimum cache line size */
1733 return (env->icache_line_size < env->dcache_line_size) ?
1734 env->icache_line_size : env->dcache_line_size;
1735 break;
1736 case 0x0FUL:
1737 /* Maximum cache line size */
1738 return (env->icache_line_size > env->dcache_line_size) ?
1739 env->icache_line_size : env->dcache_line_size;
1740 break;
1741 default:
1742 /* Undefined */
1743 return 0;
1744 break;
1745 }
1746 }
1747
1748 target_ulong helper_div (target_ulong arg1, target_ulong arg2)
1749 {
1750 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1751
1752 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1753 (int32_t)arg2 == 0) {
1754 env->spr[SPR_MQ] = 0;
1755 return INT32_MIN;
1756 } else {
1757 env->spr[SPR_MQ] = tmp % arg2;
1758 return tmp / (int32_t)arg2;
1759 }
1760 }
1761
1762 target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
1763 {
1764 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
1765
1766 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1767 (int32_t)arg2 == 0) {
1768 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1769 env->spr[SPR_MQ] = 0;
1770 return INT32_MIN;
1771 } else {
1772 env->spr[SPR_MQ] = tmp % arg2;
1773 tmp /= (int32_t)arg2;
1774 if ((int32_t)tmp != tmp) {
1775 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1776 } else {
1777 env->xer &= ~(1 << XER_OV);
1778 }
1779 return tmp;
1780 }
1781 }
1782
1783 target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
1784 {
1785 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1786 (int32_t)arg2 == 0) {
1787 env->spr[SPR_MQ] = 0;
1788 return INT32_MIN;
1789 } else {
1790 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1791 return (int32_t)arg1 / (int32_t)arg2;
1792 }
1793 }
1794
1795 target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
1796 {
1797 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1798 (int32_t)arg2 == 0) {
1799 env->xer |= (1 << XER_OV) | (1 << XER_SO);
1800 env->spr[SPR_MQ] = 0;
1801 return INT32_MIN;
1802 } else {
1803 env->xer &= ~(1 << XER_OV);
1804 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1805 return (int32_t)arg1 / (int32_t)arg2;
1806 }
1807 }
1808
1809 #if !defined (CONFIG_USER_ONLY)
1810 target_ulong helper_rac (target_ulong addr)
1811 {
1812 mmu_ctx_t ctx;
1813 int nb_BATs;
1814 target_ulong ret = 0;
1815
1816 /* We don't have to generate many instances of this instruction,
1817 * as rac is supervisor only.
1818 */
1819 /* XXX: FIX THIS: Pretend we have no BAT */
1820 nb_BATs = env->nb_BATs;
1821 env->nb_BATs = 0;
1822 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1823 ret = ctx.raddr;
1824 env->nb_BATs = nb_BATs;
1825 return ret;
1826 }
1827
1828 void helper_rfsvc (void)
1829 {
1830 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1831 }
1832 #endif
1833
1834 /*****************************************************************************/
1835 /* 602 specific instructions */
1836 /* mfrom is the most crazy instruction ever seen, imho ! */
1837 /* Real implementation uses a ROM table. Do the same */
1838 /* Extremly decomposed:
1839 * -arg / 256
1840 * return 256 * log10(10 + 1.0) + 0.5
1841 */
1842 #if !defined (CONFIG_USER_ONLY)
1843 target_ulong helper_602_mfrom (target_ulong arg)
1844 {
1845 if (likely(arg < 602)) {
1846 #include "mfrom_table.c"
1847 return mfrom_ROM_table[arg];
1848 } else {
1849 return 0;
1850 }
1851 }
1852 #endif
1853
1854 /*****************************************************************************/
1855 /* Embedded PowerPC specific helpers */
1856
1857 /* XXX: to be improved to check access rights when in user-mode */
1858 target_ulong helper_load_dcr (target_ulong dcrn)
1859 {
1860 target_ulong val = 0;
1861
1862 if (unlikely(env->dcr_env == NULL)) {
1863 if (loglevel != 0) {
1864 fprintf(logfile, "No DCR environment\n");
1865 }
1866 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1867 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1868 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
1869 if (loglevel != 0) {
1870 fprintf(logfile, "DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
1871 }
1872 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1873 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1874 }
1875 return val;
1876 }
1877
1878 void helper_store_dcr (target_ulong dcrn, target_ulong val)
1879 {
1880 if (unlikely(env->dcr_env == NULL)) {
1881 if (loglevel != 0) {
1882 fprintf(logfile, "No DCR environment\n");
1883 }
1884 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1885 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1886 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
1887 if (loglevel != 0) {
1888 fprintf(logfile, "DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
1889 }
1890 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1891 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1892 }
1893 }
1894
1895 #if !defined(CONFIG_USER_ONLY)
1896 void helper_40x_rfci (void)
1897 {
1898 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1899 ~((target_ulong)0xFFFF0000), 0);
1900 }
1901
1902 void helper_rfci (void)
1903 {
1904 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1905 ~((target_ulong)0x3FFF0000), 0);
1906 }
1907
1908 void helper_rfdi (void)
1909 {
1910 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1911 ~((target_ulong)0x3FFF0000), 0);
1912 }
1913
1914 void helper_rfmci (void)
1915 {
1916 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1917 ~((target_ulong)0x3FFF0000), 0);
1918 }
1919 #endif
1920
1921 /* 440 specific */
1922 target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
1923 {
1924 target_ulong mask;
1925 int i;
1926
1927 i = 1;
1928 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1929 if ((high & mask) == 0) {
1930 if (update_Rc) {
1931 env->crf[0] = 0x4;
1932 }
1933 goto done;
1934 }
1935 i++;
1936 }
1937 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1938 if ((low & mask) == 0) {
1939 if (update_Rc) {
1940 env->crf[0] = 0x8;
1941 }
1942 goto done;
1943 }
1944 i++;
1945 }
1946 if (update_Rc) {
1947 env->crf[0] = 0x2;
1948 }
1949 done:
1950 env->xer = (env->xer & ~0x7F) | i;
1951 if (update_Rc) {
1952 env->crf[0] |= xer_so;
1953 }
1954 return i;
1955 }
1956
1957 /*****************************************************************************/
1958 /* Altivec extension helpers */
1959 #if defined(WORDS_BIGENDIAN)
1960 #define HI_IDX 0
1961 #define LO_IDX 1
1962 #else
1963 #define HI_IDX 1
1964 #define LO_IDX 0
1965 #endif
1966
1967 #if defined(WORDS_BIGENDIAN)
1968 #define VECTOR_FOR_INORDER_I(index, element) \
1969 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1970 #else
1971 #define VECTOR_FOR_INORDER_I(index, element) \
1972 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1973 #endif
1974
1975 void helper_lvsl (ppc_avr_t *r, target_ulong sh)
1976 {
1977 int i, j = (sh & 0xf);
1978
1979 VECTOR_FOR_INORDER_I (i, u8) {
1980 r->u8[i] = j++;
1981 }
1982 }
1983
1984 void helper_lvsr (ppc_avr_t *r, target_ulong sh)
1985 {
1986 int i, j = 0x10 - (sh & 0xf);
1987
1988 VECTOR_FOR_INORDER_I (i, u8) {
1989 r->u8[i] = j++;
1990 }
1991 }
1992
1993 void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1994 {
1995 int i;
1996 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1997 r->u32[i] = ~a->u32[i] < b->u32[i];
1998 }
1999 }
2000
2001 #define VARITH_DO(name, op, element) \
2002 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2003 { \
2004 int i; \
2005 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2006 r->element[i] = a->element[i] op b->element[i]; \
2007 } \
2008 }
2009 #define VARITH(suffix, element) \
2010 VARITH_DO(add##suffix, +, element) \
2011 VARITH_DO(sub##suffix, -, element)
2012 VARITH(ubm, u8)
2013 VARITH(uhm, u16)
2014 VARITH(uwm, u32)
2015 #undef VARITH_DO
2016 #undef VARITH
2017
2018 #define VAVG_DO(name, element, etype) \
2019 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2020 { \
2021 int i; \
2022 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2023 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2024 r->element[i] = x >> 1; \
2025 } \
2026 }
2027
2028 #define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2029 VAVG_DO(avgs##type, signed_element, signed_type) \
2030 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2031 VAVG(b, s8, int16_t, u8, uint16_t)
2032 VAVG(h, s16, int32_t, u16, uint32_t)
2033 VAVG(w, s32, int64_t, u32, uint64_t)
2034 #undef VAVG_DO
2035 #undef VAVG
2036
2037 #define VMINMAX_DO(name, compare, element) \
2038 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2039 { \
2040 int i; \
2041 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2042 if (a->element[i] compare b->element[i]) { \
2043 r->element[i] = b->element[i]; \
2044 } else { \
2045 r->element[i] = a->element[i]; \
2046 } \
2047 } \
2048 }
2049 #define VMINMAX(suffix, element) \
2050 VMINMAX_DO(min##suffix, >, element) \
2051 VMINMAX_DO(max##suffix, <, element)
2052 VMINMAX(sb, s8)
2053 VMINMAX(sh, s16)
2054 VMINMAX(sw, s32)
2055 VMINMAX(ub, u8)
2056 VMINMAX(uh, u16)
2057 VMINMAX(uw, u32)
2058 #undef VMINMAX_DO
2059 #undef VMINMAX
2060
2061 #define VMRG_DO(name, element, highp) \
2062 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2063 { \
2064 ppc_avr_t result; \
2065 int i; \
2066 size_t n_elems = ARRAY_SIZE(r->element); \
2067 for (i = 0; i < n_elems/2; i++) { \
2068 if (highp) { \
2069 result.element[i*2+HI_IDX] = a->element[i]; \
2070 result.element[i*2+LO_IDX] = b->element[i]; \
2071 } else { \
2072 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2073 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2074 } \
2075 } \
2076 *r = result; \
2077 }
2078 #if defined(WORDS_BIGENDIAN)
2079 #define MRGHI 0
2080 #define MRGL0 1
2081 #else
2082 #define MRGHI 1
2083 #define MRGLO 0
2084 #endif
2085 #define VMRG(suffix, element) \
2086 VMRG_DO(mrgl##suffix, element, MRGHI) \
2087 VMRG_DO(mrgh##suffix, element, MRGLO)
2088 VMRG(b, u8)
2089 VMRG(h, u16)
2090 VMRG(w, u32)
2091 #undef VMRG_DO
2092 #undef VMRG
2093 #undef MRGHI
2094 #undef MRGLO
2095
2096 #define VMUL_DO(name, mul_element, prod_element, evenp) \
2097 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2098 { \
2099 int i; \
2100 VECTOR_FOR_INORDER_I(i, prod_element) { \
2101 if (evenp) { \
2102 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2103 } else { \
2104 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2105 } \
2106 } \
2107 }
2108 #define VMUL(suffix, mul_element, prod_element) \
2109 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2110 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2111 VMUL(sb, s8, s16)
2112 VMUL(sh, s16, s32)
2113 VMUL(ub, u8, u16)
2114 VMUL(uh, u16, u32)
2115 #undef VMUL_DO
2116 #undef VMUL
2117
2118 #define VROTATE(suffix, element) \
2119 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2120 { \
2121 int i; \
2122 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2123 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2124 unsigned int shift = b->element[i] & mask; \
2125 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2126 } \
2127 }
2128 VROTATE(b, u8)
2129 VROTATE(h, u16)
2130 VROTATE(w, u32)
2131 #undef VROTATE
2132
2133 #define VSL(suffix, element) \
2134 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2135 { \
2136 int i; \
2137 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2138 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2139 unsigned int shift = b->element[i] & mask; \
2140 r->element[i] = a->element[i] << shift; \
2141 } \
2142 }
2143 VSL(b, u8)
2144 VSL(h, u16)
2145 VSL(w, u32)
2146 #undef VSL
2147
2148 void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2149 {
2150 int sh = shift & 0xf;
2151 int i;
2152 ppc_avr_t result;
2153
2154 #if defined(WORDS_BIGENDIAN)
2155 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2156 int index = sh + i;
2157 if (index > 0xf) {
2158 result.u8[i] = b->u8[index-0x10];
2159 } else {
2160 result.u8[i] = a->u8[index];
2161 }
2162 }
2163 #else
2164 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2165 int index = (16 - sh) + i;
2166 if (index > 0xf) {
2167 result.u8[i] = a->u8[index-0x10];
2168 } else {
2169 result.u8[i] = b->u8[index];
2170 }
2171 }
2172 #endif
2173 *r = result;
2174 }
2175
2176 void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2177 {
2178 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2179
2180 #if defined (WORDS_BIGENDIAN)
2181 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2182 memset (&r->u8[16-sh], 0, sh);
2183 #else
2184 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2185 memset (&r->u8[0], 0, sh);
2186 #endif
2187 }
2188
2189 /* Experimental testing shows that hardware masks the immediate. */
2190 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2191 #if defined(WORDS_BIGENDIAN)
2192 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2193 #else
2194 #define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2195 #endif
2196 #define VSPLT(suffix, element) \
2197 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2198 { \
2199 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2200 int i; \
2201 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2202 r->element[i] = s; \
2203 } \
2204 }
2205 VSPLT(b, u8)
2206 VSPLT(h, u16)
2207 VSPLT(w, u32)
2208 #undef VSPLT
2209 #undef SPLAT_ELEMENT
2210 #undef _SPLAT_MASKED
2211
2212 #define VSR(suffix, element) \
2213 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2214 { \
2215 int i; \
2216 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2217 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2218 unsigned int shift = b->element[i] & mask; \
2219 r->element[i] = a->element[i] >> shift; \
2220 } \
2221 }
2222 VSR(ab, s8)
2223 VSR(ah, s16)
2224 VSR(aw, s32)
2225 VSR(b, u8)
2226 VSR(h, u16)
2227 VSR(w, u32)
2228 #undef VSR
2229
2230 void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2231 {
2232 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2233
2234 #if defined (WORDS_BIGENDIAN)
2235 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2236 memset (&r->u8[0], 0, sh);
2237 #else
2238 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2239 memset (&r->u8[16-sh], 0, sh);
2240 #endif
2241 }
2242
2243 void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2244 {
2245 int i;
2246 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2247 r->u32[i] = a->u32[i] >= b->u32[i];
2248 }
2249 }
2250
2251 #if defined(WORDS_BIGENDIAN)
2252 #define UPKHI 1
2253 #define UPKLO 0
2254 #else
2255 #define UPKHI 0
2256 #define UPKLO 1
2257 #endif
2258 #define VUPKPX(suffix, hi) \
2259 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2260 { \
2261 int i; \
2262 ppc_avr_t result; \
2263 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
2264 uint16_t e = b->u16[hi ? i : i+4]; \
2265 uint8_t a = (e >> 15) ? 0xff : 0; \
2266 uint8_t r = (e >> 10) & 0x1f; \
2267 uint8_t g = (e >> 5) & 0x1f; \
2268 uint8_t b = e & 0x1f; \
2269 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
2270 } \
2271 *r = result; \
2272 }
2273 VUPKPX(lpx, UPKLO)
2274 VUPKPX(hpx, UPKHI)
2275 #undef VUPKPX
2276
2277 #undef UPKHI
2278 #undef UPKLO
2279
2280 #undef VECTOR_FOR_INORDER_I
2281 #undef HI_IDX
2282 #undef LO_IDX
2283
2284 /*****************************************************************************/
2285 /* SPE extension helpers */
2286 /* Use a table to make this quicker */
2287 static uint8_t hbrev[16] = {
2288 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
2289 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
2290 };
2291
2292 static always_inline uint8_t byte_reverse (uint8_t val)
2293 {
2294 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
2295 }
2296
2297 static always_inline uint32_t word_reverse (uint32_t val)
2298 {
2299 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
2300 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
2301 }
2302
2303 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
2304 target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
2305 {
2306 uint32_t a, b, d, mask;
2307
2308 mask = UINT32_MAX >> (32 - MASKBITS);
2309 a = arg1 & mask;
2310 b = arg2 & mask;
2311 d = word_reverse(1 + word_reverse(a | ~b));
2312 return (arg1 & ~mask) | (d & b);
2313 }
2314
2315 uint32_t helper_cntlsw32 (uint32_t val)
2316 {
2317 if (val & 0x80000000)
2318 return clz32(~val);
2319 else
2320 return clz32(val);
2321 }
2322
2323 uint32_t helper_cntlzw32 (uint32_t val)
2324 {
2325 return clz32(val);
2326 }
2327
2328 /* Single-precision floating-point conversions */
2329 static always_inline uint32_t efscfsi (uint32_t val)
2330 {
2331 CPU_FloatU u;
2332
2333 u.f = int32_to_float32(val, &env->spe_status);
2334
2335 return u.l;
2336 }
2337
2338 static always_inline uint32_t efscfui (uint32_t val)
2339 {
2340 CPU_FloatU u;
2341
2342 u.f = uint32_to_float32(val, &env->spe_status);
2343
2344 return u.l;
2345 }
2346
2347 static always_inline int32_t efsctsi (uint32_t val)
2348 {
2349 CPU_FloatU u;
2350
2351 u.l = val;
2352 /* NaN are not treated the same way IEEE 754 does */
2353 if (unlikely(float32_is_nan(u.f)))
2354 return 0;
2355
2356 return float32_to_int32(u.f, &env->spe_status);
2357 }
2358
2359 static always_inline uint32_t efsctui (uint32_t val)
2360 {
2361 CPU_FloatU u;
2362
2363 u.l = val;
2364 /* NaN are not treated the same way IEEE 754 does */
2365 if (unlikely(float32_is_nan(u.f)))
2366 return 0;
2367
2368 return float32_to_uint32(u.f, &env->spe_status);
2369 }
2370
2371 static always_inline uint32_t efsctsiz (uint32_t val)
2372 {
2373 CPU_FloatU u;
2374
2375 u.l = val;
2376 /* NaN are not treated the same way IEEE 754 does */
2377 if (unlikely(float32_is_nan(u.f)))
2378 return 0;
2379
2380 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2381 }
2382
2383 static always_inline uint32_t efsctuiz (uint32_t val)
2384 {
2385 CPU_FloatU u;
2386
2387 u.l = val;
2388 /* NaN are not treated the same way IEEE 754 does */
2389 if (unlikely(float32_is_nan(u.f)))
2390 return 0;
2391
2392 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2393 }
2394
2395 static always_inline uint32_t efscfsf (uint32_t val)
2396 {
2397 CPU_FloatU u;
2398 float32 tmp;
2399
2400 u.f = int32_to_float32(val, &env->spe_status);
2401 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2402 u.f = float32_div(u.f, tmp, &env->spe_status);
2403
2404 return u.l;
2405 }
2406
2407 static always_inline uint32_t efscfuf (uint32_t val)
2408 {
2409 CPU_FloatU u;
2410 float32 tmp;
2411
2412 u.f = uint32_to_float32(val, &env->spe_status);
2413 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2414 u.f = float32_div(u.f, tmp, &env->spe_status);
2415
2416 return u.l;
2417 }
2418
2419 static always_inline uint32_t efsctsf (uint32_t val)
2420 {
2421 CPU_FloatU u;
2422 float32 tmp;
2423
2424 u.l = val;
2425 /* NaN are not treated the same way IEEE 754 does */
2426 if (unlikely(float32_is_nan(u.f)))
2427 return 0;
2428 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2429 u.f = float32_mul(u.f, tmp, &env->spe_status);
2430
2431 return float32_to_int32(u.f, &env->spe_status);
2432 }
2433
2434 static always_inline uint32_t efsctuf (uint32_t val)
2435 {
2436 CPU_FloatU u;
2437 float32 tmp;
2438
2439 u.l = val;
2440 /* NaN are not treated the same way IEEE 754 does */
2441 if (unlikely(float32_is_nan(u.f)))
2442 return 0;
2443 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2444 u.f = float32_mul(u.f, tmp, &env->spe_status);
2445
2446 return float32_to_uint32(u.f, &env->spe_status);
2447 }
2448
2449 #define HELPER_SPE_SINGLE_CONV(name) \
2450 uint32_t helper_e##name (uint32_t val) \
2451 { \
2452 return e##name(val); \
2453 }
2454 /* efscfsi */
2455 HELPER_SPE_SINGLE_CONV(fscfsi);
2456 /* efscfui */
2457 HELPER_SPE_SINGLE_CONV(fscfui);
2458 /* efscfuf */
2459 HELPER_SPE_SINGLE_CONV(fscfuf);
2460 /* efscfsf */
2461 HELPER_SPE_SINGLE_CONV(fscfsf);
2462 /* efsctsi */
2463 HELPER_SPE_SINGLE_CONV(fsctsi);
2464 /* efsctui */
2465 HELPER_SPE_SINGLE_CONV(fsctui);
2466 /* efsctsiz */
2467 HELPER_SPE_SINGLE_CONV(fsctsiz);
2468 /* efsctuiz */
2469 HELPER_SPE_SINGLE_CONV(fsctuiz);
2470 /* efsctsf */
2471 HELPER_SPE_SINGLE_CONV(fsctsf);
2472 /* efsctuf */
2473 HELPER_SPE_SINGLE_CONV(fsctuf);
2474
2475 #define HELPER_SPE_VECTOR_CONV(name) \
2476 uint64_t helper_ev##name (uint64_t val) \
2477 { \
2478 return ((uint64_t)e##name(val >> 32) << 32) | \
2479 (uint64_t)e##name(val); \
2480 }
2481 /* evfscfsi */
2482 HELPER_SPE_VECTOR_CONV(fscfsi);
2483 /* evfscfui */
2484 HELPER_SPE_VECTOR_CONV(fscfui);
2485 /* evfscfuf */
2486 HELPER_SPE_VECTOR_CONV(fscfuf);
2487 /* evfscfsf */
2488 HELPER_SPE_VECTOR_CONV(fscfsf);
2489 /* evfsctsi */
2490 HELPER_SPE_VECTOR_CONV(fsctsi);
2491 /* evfsctui */
2492 HELPER_SPE_VECTOR_CONV(fsctui);
2493 /* evfsctsiz */
2494 HELPER_SPE_VECTOR_CONV(fsctsiz);
2495 /* evfsctuiz */
2496 HELPER_SPE_VECTOR_CONV(fsctuiz);
2497 /* evfsctsf */
2498 HELPER_SPE_VECTOR_CONV(fsctsf);
2499 /* evfsctuf */
2500 HELPER_SPE_VECTOR_CONV(fsctuf);
2501
2502 /* Single-precision floating-point arithmetic */
2503 static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
2504 {
2505 CPU_FloatU u1, u2;
2506 u1.l = op1;
2507 u2.l = op2;
2508 u1.f = float32_add(u1.f, u2.f, &env->spe_status);
2509 return u1.l;
2510 }
2511
2512 static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
2513 {
2514 CPU_FloatU u1, u2;
2515 u1.l = op1;
2516 u2.l = op2;
2517 u1.f = float32_sub(u1.f, u2.f, &env->spe_status);
2518 return u1.l;
2519 }
2520
2521 static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
2522 {
2523 CPU_FloatU u1, u2;
2524 u1.l = op1;
2525 u2.l = op2;
2526 u1.f = float32_mul(u1.f, u2.f, &env->spe_status);
2527 return u1.l;
2528 }
2529
2530 static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
2531 {
2532 CPU_FloatU u1, u2;
2533 u1.l = op1;
2534 u2.l = op2;
2535 u1.f = float32_div(u1.f, u2.f, &env->spe_status);
2536 return u1.l;
2537 }
2538
2539 #define HELPER_SPE_SINGLE_ARITH(name) \
2540 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2541 { \
2542 return e##name(op1, op2); \
2543 }
2544 /* efsadd */
2545 HELPER_SPE_SINGLE_ARITH(fsadd);
2546 /* efssub */
2547 HELPER_SPE_SINGLE_ARITH(fssub);
2548 /* efsmul */
2549 HELPER_SPE_SINGLE_ARITH(fsmul);
2550 /* efsdiv */
2551 HELPER_SPE_SINGLE_ARITH(fsdiv);
2552
2553 #define HELPER_SPE_VECTOR_ARITH(name) \
2554 uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
2555 { \
2556 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
2557 (uint64_t)e##name(op1, op2); \
2558 }
2559 /* evfsadd */
2560 HELPER_SPE_VECTOR_ARITH(fsadd);
2561 /* evfssub */
2562 HELPER_SPE_VECTOR_ARITH(fssub);
2563 /* evfsmul */
2564 HELPER_SPE_VECTOR_ARITH(fsmul);
2565 /* evfsdiv */
2566 HELPER_SPE_VECTOR_ARITH(fsdiv);
2567
2568 /* Single-precision floating-point comparisons */
2569 static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
2570 {
2571 CPU_FloatU u1, u2;
2572 u1.l = op1;
2573 u2.l = op2;
2574 return float32_lt(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2575 }
2576
2577 static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
2578 {
2579 CPU_FloatU u1, u2;
2580 u1.l = op1;
2581 u2.l = op2;
2582 return float32_le(u1.f, u2.f, &env->spe_status) ? 0 : 4;
2583 }
2584
2585 static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
2586 {
2587 CPU_FloatU u1, u2;
2588 u1.l = op1;
2589 u2.l = op2;
2590 return float32_eq(u1.f, u2.f, &env->spe_status) ? 4 : 0;
2591 }
2592
2593 static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
2594 {
2595 /* XXX: TODO: test special values (NaN, infinites, ...) */
2596 return efststlt(op1, op2);
2597 }
2598
2599 static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
2600 {
2601 /* XXX: TODO: test special values (NaN, infinites, ...) */
2602 return efststgt(op1, op2);
2603 }
2604
2605 static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
2606 {
2607 /* XXX: TODO: test special values (NaN, infinites, ...) */
2608 return efststeq(op1, op2);
2609 }
2610
2611 #define HELPER_SINGLE_SPE_CMP(name) \
2612 uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
2613 { \
2614 return e##name(op1, op2) << 2; \
2615 }
2616 /* efststlt */
2617 HELPER_SINGLE_SPE_CMP(fststlt);
2618 /* efststgt */
2619 HELPER_SINGLE_SPE_CMP(fststgt);
2620 /* efststeq */
2621 HELPER_SINGLE_SPE_CMP(fststeq);
2622 /* efscmplt */
2623 HELPER_SINGLE_SPE_CMP(fscmplt);
2624 /* efscmpgt */
2625 HELPER_SINGLE_SPE_CMP(fscmpgt);
2626 /* efscmpeq */
2627 HELPER_SINGLE_SPE_CMP(fscmpeq);
2628
2629 static always_inline uint32_t evcmp_merge (int t0, int t1)
2630 {
2631 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
2632 }
2633
2634 #define HELPER_VECTOR_SPE_CMP(name) \
2635 uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
2636 { \
2637 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
2638 }
2639 /* evfststlt */
2640 HELPER_VECTOR_SPE_CMP(fststlt);
2641 /* evfststgt */
2642 HELPER_VECTOR_SPE_CMP(fststgt);
2643 /* evfststeq */
2644 HELPER_VECTOR_SPE_CMP(fststeq);
2645 /* evfscmplt */
2646 HELPER_VECTOR_SPE_CMP(fscmplt);
2647 /* evfscmpgt */
2648 HELPER_VECTOR_SPE_CMP(fscmpgt);
2649 /* evfscmpeq */
2650 HELPER_VECTOR_SPE_CMP(fscmpeq);
2651
2652 /* Double-precision floating-point conversion */
2653 uint64_t helper_efdcfsi (uint32_t val)
2654 {
2655 CPU_DoubleU u;
2656
2657 u.d = int32_to_float64(val, &env->spe_status);
2658
2659 return u.ll;
2660 }
2661
2662 uint64_t helper_efdcfsid (uint64_t val)
2663 {
2664 CPU_DoubleU u;
2665
2666 u.d = int64_to_float64(val, &env->spe_status);
2667
2668 return u.ll;
2669 }
2670
2671 uint64_t helper_efdcfui (uint32_t val)
2672 {
2673 CPU_DoubleU u;
2674
2675 u.d = uint32_to_float64(val, &env->spe_status);
2676
2677 return u.ll;
2678 }
2679
2680 uint64_t helper_efdcfuid (uint64_t val)
2681 {
2682 CPU_DoubleU u;
2683
2684 u.d = uint64_to_float64(val, &env->spe_status);
2685
2686 return u.ll;
2687 }
2688
2689 uint32_t helper_efdctsi (uint64_t val)
2690 {
2691 CPU_DoubleU u;
2692
2693 u.ll = val;
2694 /* NaN are not treated the same way IEEE 754 does */
2695 if (unlikely(float64_is_nan(u.d)))
2696 return 0;
2697
2698 return float64_to_int32(u.d, &env->spe_status);
2699 }
2700
2701 uint32_t helper_efdctui (uint64_t val)
2702 {
2703 CPU_DoubleU u;
2704
2705 u.ll = val;
2706 /* NaN are not treated the same way IEEE 754 does */
2707 if (unlikely(float64_is_nan(u.d)))
2708 return 0;
2709
2710 return float64_to_uint32(u.d, &env->spe_status);
2711 }
2712
2713 uint32_t helper_efdctsiz (uint64_t val)
2714 {
2715 CPU_DoubleU u;
2716
2717 u.ll = val;
2718 /* NaN are not treated the same way IEEE 754 does */
2719 if (unlikely(float64_is_nan(u.d)))
2720 return 0;
2721
2722 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2723 }
2724
2725 uint64_t helper_efdctsidz (uint64_t val)
2726 {
2727 CPU_DoubleU u;
2728
2729 u.ll = val;
2730 /* NaN are not treated the same way IEEE 754 does */
2731 if (unlikely(float64_is_nan(u.d)))
2732 return 0;
2733
2734 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2735 }
2736
2737 uint32_t helper_efdctuiz (uint64_t val)
2738 {
2739 CPU_DoubleU u;
2740
2741 u.ll = val;
2742 /* NaN are not treated the same way IEEE 754 does */
2743 if (unlikely(float64_is_nan(u.d)))
2744 return 0;
2745
2746 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2747 }
2748
2749 uint64_t helper_efdctuidz (uint64_t val)
2750 {
2751 CPU_DoubleU u;
2752
2753 u.ll = val;
2754 /* NaN are not treated the same way IEEE 754 does */
2755 if (unlikely(float64_is_nan(u.d)))
2756 return 0;
2757
2758 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2759 }
2760
2761 uint64_t helper_efdcfsf (uint32_t val)
2762 {
2763 CPU_DoubleU u;
2764 float64 tmp;
2765
2766 u.d = int32_to_float64(val, &env->spe_status);
2767 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2768 u.d = float64_div(u.d, tmp, &env->spe_status);
2769
2770 return u.ll;
2771 }
2772
2773 uint64_t helper_efdcfuf (uint32_t val)
2774 {
2775 CPU_DoubleU u;
2776 float64 tmp;
2777
2778 u.d = uint32_to_float64(val, &env->spe_status);
2779 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2780 u.d = float64_div(u.d, tmp, &env->spe_status);
2781
2782 return u.ll;
2783 }
2784
2785 uint32_t helper_efdctsf (uint64_t val)
2786 {
2787 CPU_DoubleU u;
2788 float64 tmp;
2789
2790 u.ll = val;
2791 /* NaN are not treated the same way IEEE 754 does */
2792 if (unlikely(float64_is_nan(u.d)))
2793 return 0;
2794 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2795 u.d = float64_mul(u.d, tmp, &env->spe_status);
2796
2797 return float64_to_int32(u.d, &env->spe_status);
2798 }
2799
2800 uint32_t helper_efdctuf (uint64_t val)
2801 {
2802 CPU_DoubleU u;
2803 float64 tmp;
2804
2805 u.ll = val;
2806 /* NaN are not treated the same way IEEE 754 does */
2807 if (unlikely(float64_is_nan(u.d)))
2808 return 0;
2809 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2810 u.d = float64_mul(u.d, tmp, &env->spe_status);
2811
2812 return float64_to_uint32(u.d, &env->spe_status);
2813 }
2814
2815 uint32_t helper_efscfd (uint64_t val)
2816 {
2817 CPU_DoubleU u1;
2818 CPU_FloatU u2;
2819
2820 u1.ll = val;
2821 u2.f = float64_to_float32(u1.d, &env->spe_status);
2822
2823 return u2.l;
2824 }
2825
2826 uint64_t helper_efdcfs (uint32_t val)
2827 {
2828 CPU_DoubleU u2;
2829 CPU_FloatU u1;
2830
2831 u1.l = val;
2832 u2.d = float32_to_float64(u1.f, &env->spe_status);
2833
2834 return u2.ll;
2835 }
2836
2837 /* Double precision fixed-point arithmetic */
2838 uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
2839 {
2840 CPU_DoubleU u1, u2;
2841 u1.ll = op1;
2842 u2.ll = op2;
2843 u1.d = float64_add(u1.d, u2.d, &env->spe_status);
2844 return u1.ll;
2845 }
2846
2847 uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
2848 {
2849 CPU_DoubleU u1, u2;
2850 u1.ll = op1;
2851 u2.ll = op2;
2852 u1.d = float64_sub(u1.d, u2.d, &env->spe_status);
2853 return u1.ll;
2854 }
2855
2856 uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
2857 {
2858 CPU_DoubleU u1, u2;
2859 u1.ll = op1;
2860 u2.ll = op2;
2861 u1.d = float64_mul(u1.d, u2.d, &env->spe_status);
2862 return u1.ll;
2863 }
2864
2865 uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
2866 {
2867 CPU_DoubleU u1, u2;
2868 u1.ll = op1;
2869 u2.ll = op2;
2870 u1.d = float64_div(u1.d, u2.d, &env->spe_status);
2871 return u1.ll;
2872 }
2873
2874 /* Double precision floating point helpers */
2875 uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
2876 {
2877 CPU_DoubleU u1, u2;
2878 u1.ll = op1;
2879 u2.ll = op2;
2880 return float64_lt(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2881 }
2882
2883 uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
2884 {
2885 CPU_DoubleU u1, u2;
2886 u1.ll = op1;
2887 u2.ll = op2;
2888 return float64_le(u1.d, u2.d, &env->spe_status) ? 0 : 4;
2889 }
2890
2891 uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
2892 {
2893 CPU_DoubleU u1, u2;
2894 u1.ll = op1;
2895 u2.ll = op2;
2896 return float64_eq(u1.d, u2.d, &env->spe_status) ? 4 : 0;
2897 }
2898
2899 uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
2900 {
2901 /* XXX: TODO: test special values (NaN, infinites, ...) */
2902 return helper_efdtstlt(op1, op2);
2903 }
2904
2905 uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
2906 {
2907 /* XXX: TODO: test special values (NaN, infinites, ...) */
2908 return helper_efdtstgt(op1, op2);
2909 }
2910
2911 uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
2912 {
2913 /* XXX: TODO: test special values (NaN, infinites, ...) */
2914 return helper_efdtsteq(op1, op2);
2915 }
2916
2917 /*****************************************************************************/
2918 /* Softmmu support */
2919 #if !defined (CONFIG_USER_ONLY)
2920
2921 #define MMUSUFFIX _mmu
2922
2923 #define SHIFT 0
2924 #include "softmmu_template.h"
2925
2926 #define SHIFT 1
2927 #include "softmmu_template.h"
2928
2929 #define SHIFT 2
2930 #include "softmmu_template.h"
2931
2932 #define SHIFT 3
2933 #include "softmmu_template.h"
2934
2935 /* try to fill the TLB and return an exception if error. If retaddr is
2936 NULL, it means that the function was called in C code (i.e. not
2937 from generated code or from helper.c) */
2938 /* XXX: fix it to restore all registers */
2939 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2940 {
2941 TranslationBlock *tb;
2942 CPUState *saved_env;
2943 unsigned long pc;
2944 int ret;
2945
2946 /* XXX: hack to restore env in all cases, even if not called from
2947 generated code */
2948 saved_env = env;
2949 env = cpu_single_env;
2950 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2951 if (unlikely(ret != 0)) {
2952 if (likely(retaddr)) {
2953 /* now we have a real cpu fault */
2954 pc = (unsigned long)retaddr;
2955 tb = tb_find_pc(pc);
2956 if (likely(tb)) {
2957 /* the PC is inside the translated code. It means that we have
2958 a virtual CPU fault */
2959 cpu_restore_state(tb, env, pc, NULL);
2960 }
2961 }
2962 helper_raise_exception_err(env->exception_index, env->error_code);
2963 }
2964 env = saved_env;
2965 }
2966
2967 /* Segment registers load and store */
2968 target_ulong helper_load_sr (target_ulong sr_num)
2969 {
2970 return env->sr[sr_num];
2971 }
2972
2973 void helper_store_sr (target_ulong sr_num, target_ulong val)
2974 {
2975 ppc_store_sr(env, sr_num, val);
2976 }
2977
2978 /* SLB management */
2979 #if defined(TARGET_PPC64)
2980 target_ulong helper_load_slb (target_ulong slb_nr)
2981 {
2982 return ppc_load_slb(env, slb_nr);
2983 }
2984
2985 void helper_store_slb (target_ulong slb_nr, target_ulong rs)
2986 {
2987 ppc_store_slb(env, slb_nr, rs);
2988 }
2989
2990 void helper_slbia (void)
2991 {
2992 ppc_slb_invalidate_all(env);
2993 }
2994
2995 void helper_slbie (target_ulong addr)
2996 {
2997 ppc_slb_invalidate_one(env, addr);
2998 }
2999
3000 #endif /* defined(TARGET_PPC64) */
3001
3002 /* TLB management */
3003 void helper_tlbia (void)
3004 {
3005 ppc_tlb_invalidate_all(env);
3006 }
3007
3008 void helper_tlbie (target_ulong addr)
3009 {
3010 ppc_tlb_invalidate_one(env, addr);
3011 }
3012
3013 /* Software driven TLBs management */
3014 /* PowerPC 602/603 software TLB load instructions helpers */
3015 static void do_6xx_tlb (target_ulong new_EPN, int is_code)
3016 {
3017 target_ulong RPN, CMP, EPN;
3018 int way;
3019
3020 RPN = env->spr[SPR_RPA];
3021 if (is_code) {
3022 CMP = env->spr[SPR_ICMP];
3023 EPN = env->spr[SPR_IMISS];
3024 } else {
3025 CMP = env->spr[SPR_DCMP];
3026 EPN = env->spr[SPR_DMISS];
3027 }
3028 way = (env->spr[SPR_SRR1] >> 17) & 1;
3029 #if defined (DEBUG_SOFTWARE_TLB)
3030 if (loglevel != 0) {
3031 fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3032 " PTE1 " ADDRX " way %d\n",
3033 __func__, new_EPN, EPN, CMP, RPN, way);
3034 }
3035 #endif
3036 /* Store this TLB */
3037 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3038 way, is_code, CMP, RPN);
3039 }
3040
3041 void helper_6xx_tlbd (target_ulong EPN)
3042 {
3043 do_6xx_tlb(EPN, 0);
3044 }
3045
3046 void helper_6xx_tlbi (target_ulong EPN)
3047 {
3048 do_6xx_tlb(EPN, 1);
3049 }
3050
3051 /* PowerPC 74xx software TLB load instructions helpers */
3052 static void do_74xx_tlb (target_ulong new_EPN, int is_code)
3053 {
3054 target_ulong RPN, CMP, EPN;
3055 int way;
3056
3057 RPN = env->spr[SPR_PTELO];
3058 CMP = env->spr[SPR_PTEHI];
3059 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3060 way = env->spr[SPR_TLBMISS] & 0x3;
3061 #if defined (DEBUG_SOFTWARE_TLB)
3062 if (loglevel != 0) {
3063 fprintf(logfile, "%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
3064 " PTE1 " ADDRX " way %d\n",
3065 __func__, new_EPN, EPN, CMP, RPN, way);
3066 }
3067 #endif
3068 /* Store this TLB */
3069 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
3070 way, is_code, CMP, RPN);
3071 }
3072
3073 void helper_74xx_tlbd (target_ulong EPN)
3074 {
3075 do_74xx_tlb(EPN, 0);
3076 }
3077
3078 void helper_74xx_tlbi (target_ulong EPN)
3079 {
3080 do_74xx_tlb(EPN, 1);
3081 }
3082
3083 static always_inline target_ulong booke_tlb_to_page_size (int size)
3084 {
3085 return 1024 << (2 * size);
3086 }
3087
3088 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
3089 {
3090 int size;
3091
3092 switch (page_size) {
3093 case 0x00000400UL:
3094 size = 0x0;
3095 break;
3096 case 0x00001000UL:
3097 size = 0x1;
3098 break;
3099 case 0x00004000UL:
3100 size = 0x2;
3101 break;
3102 case 0x00010000UL:
3103 size = 0x3;
3104 break;
3105 case 0x00040000UL:
3106 size = 0x4;
3107 break;
3108 case 0x00100000UL:
3109 size = 0x5;
3110 break;
3111 case 0x00400000UL:
3112 size = 0x6;
3113 break;
3114 case 0x01000000UL:
3115 size = 0x7;
3116 break;
3117 case 0x04000000UL:
3118 size = 0x8;
3119 break;
3120 case 0x10000000UL:
3121 size = 0x9;
3122 break;
3123 case 0x40000000UL:
3124 size = 0xA;
3125 break;
3126 #if defined (TARGET_PPC64)
3127 case 0x000100000000ULL:
3128 size = 0xB;
3129 break;
3130 case 0x000400000000ULL:
3131 size = 0xC;
3132 break;
3133 case 0x001000000000ULL:
3134 size = 0xD;
3135 break;
3136 case 0x004000000000ULL:
3137 size = 0xE;
3138 break;
3139 case 0x010000000000ULL:
3140 size = 0xF;
3141 break;
3142 #endif
3143 default:
3144 size = -1;
3145 break;
3146 }
3147
3148 return size;
3149 }
3150
3151 /* Helpers for 4xx TLB management */
3152 target_ulong helper_4xx_tlbre_lo (target_ulong entry)
3153 {
3154 ppcemb_tlb_t *tlb;
3155 target_ulong ret;
3156 int size;
3157
3158 entry &= 0x3F;
3159 tlb = &env->tlb[entry].tlbe;
3160 ret = tlb->EPN;
3161 if (tlb->prot & PAGE_VALID)
3162 ret |= 0x400;
3163 size = booke_page_size_to_tlb(tlb->size);
3164 if (size < 0 || size > 0x7)
3165 size = 1;
3166 ret |= size << 7;
3167 env->spr[SPR_40x_PID] = tlb->PID;
3168 return ret;
3169 }
3170
3171 target_ulong helper_4xx_tlbre_hi (target_ulong entry)
3172 {
3173 ppcemb_tlb_t *tlb;
3174 target_ulong ret;
3175
3176 entry &= 0x3F;
3177 tlb = &env->tlb[entry].tlbe;
3178 ret = tlb->RPN;
3179 if (tlb->prot & PAGE_EXEC)
3180 ret |= 0x200;
3181 if (tlb->prot & PAGE_WRITE)
3182 ret |= 0x100;
3183 return ret;
3184 }
3185
3186 void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
3187 {
3188 ppcemb_tlb_t *tlb;
3189 target_ulong page, end;
3190
3191 #if defined (DEBUG_SOFTWARE_TLB)
3192 if (loglevel != 0) {
3193 fprintf(logfile, "%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
3194 }
3195 #endif
3196 entry &= 0x3F;
3197 tlb = &env->tlb[entry].tlbe;
3198 /* Invalidate previous TLB (if it's valid) */
3199 if (tlb->prot & PAGE_VALID) {
3200 end = tlb->EPN + tlb->size;
3201 #if defined (DEBUG_SOFTWARE_TLB)
3202 if (loglevel != 0) {
3203 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
3204 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3205 }
3206 #endif
3207 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3208 tlb_flush_page(env, page);
3209 }
3210 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
3211 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3212 * If this ever occurs, one should use the ppcemb target instead
3213 * of the ppc or ppc64 one
3214 */
3215 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
3216 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3217 "are not supported (%d)\n",
3218 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
3219 }
3220 tlb->EPN = val & ~(tlb->size - 1);
3221 if (val & 0x40)
3222 tlb->prot |= PAGE_VALID;
3223 else
3224 tlb->prot &= ~PAGE_VALID;
3225 if (val & 0x20) {
3226 /* XXX: TO BE FIXED */
3227 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3228 }
3229 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
3230 tlb->attr = val & 0xFF;
3231 #if defined (DEBUG_SOFTWARE_TLB)
3232 if (loglevel != 0) {
3233 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3234 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3235 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3236 tlb->prot & PAGE_READ ? 'r' : '-',
3237 tlb->prot & PAGE_WRITE ? 'w' : '-',
3238 tlb->prot & PAGE_EXEC ? 'x' : '-',
3239 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3240 }
3241 #endif
3242 /* Invalidate new TLB (if valid) */
3243 if (tlb->prot & PAGE_VALID) {
3244 end = tlb->EPN + tlb->size;
3245 #if defined (DEBUG_SOFTWARE_TLB)
3246 if (loglevel != 0) {
3247 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
3248 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
3249 }
3250 #endif
3251 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3252 tlb_flush_page(env, page);
3253 }
3254 }
3255
3256 void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
3257 {
3258 ppcemb_tlb_t *tlb;
3259
3260 #if defined (DEBUG_SOFTWARE_TLB)
3261 if (loglevel != 0) {
3262 fprintf(logfile, "%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
3263 }
3264 #endif
3265 entry &= 0x3F;
3266 tlb = &env->tlb[entry].tlbe;
3267 tlb->RPN = val & 0xFFFFFC00;
3268 tlb->prot = PAGE_READ;
3269 if (val & 0x200)
3270 tlb->prot |= PAGE_EXEC;
3271 if (val & 0x100)
3272 tlb->prot |= PAGE_WRITE;
3273 #if defined (DEBUG_SOFTWARE_TLB)
3274 if (loglevel != 0) {
3275 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
3276 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
3277 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
3278 tlb->prot & PAGE_READ ? 'r' : '-',
3279 tlb->prot & PAGE_WRITE ? 'w' : '-',
3280 tlb->prot & PAGE_EXEC ? 'x' : '-',
3281 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
3282 }
3283 #endif
3284 }
3285
3286 target_ulong helper_4xx_tlbsx (target_ulong address)
3287 {
3288 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
3289 }
3290
3291 /* PowerPC 440 TLB management */
3292 void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
3293 {
3294 ppcemb_tlb_t *tlb;
3295 target_ulong EPN, RPN, size;
3296 int do_flush_tlbs;
3297
3298 #if defined (DEBUG_SOFTWARE_TLB)
3299 if (loglevel != 0) {
3300 fprintf(logfile, "%s word %d entry %d value " ADDRX "\n",
3301 __func__, word, (int)entry, value);
3302 }
3303 #endif
3304 do_flush_tlbs = 0;
3305 entry &= 0x3F;
3306 tlb = &env->tlb[entry].tlbe;
3307 switch (word) {
3308 default:
3309 /* Just here to please gcc */
3310 case 0:
3311 EPN = value & 0xFFFFFC00;
3312 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
3313 do_flush_tlbs = 1;
3314 tlb->EPN = EPN;
3315 size = booke_tlb_to_page_size((value >> 4) & 0xF);
3316 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
3317 do_flush_tlbs = 1;
3318 tlb->size = size;
3319 tlb->attr &= ~0x1;
3320 tlb->attr |= (value >> 8) & 1;
3321 if (value & 0x200) {
3322 tlb->prot |= PAGE_VALID;
3323 } else {
3324 if (tlb->prot & PAGE_VALID) {
3325 tlb->prot &= ~PAGE_VALID;
3326 do_flush_tlbs = 1;
3327 }
3328 }
3329 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
3330 if (do_flush_tlbs)
3331 tlb_flush(env, 1);
3332 break;
3333 case 1:
3334 RPN = value & 0xFFFFFC0F;
3335 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
3336 tlb_flush(env, 1);
3337 tlb->RPN = RPN;
3338 break;
3339 case 2:
3340 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
3341 tlb->prot = tlb->prot & PAGE_VALID;
3342 if (value & 0x1)
3343 tlb->prot |= PAGE_READ << 4;
3344 if (value & 0x2)
3345 tlb->prot |= PAGE_WRITE << 4;
3346 if (value & 0x4)
3347 tlb->prot |= PAGE_EXEC << 4;
3348 if (value & 0x8)
3349 tlb->prot |= PAGE_READ;
3350 if (value & 0x10)
3351 tlb->prot |= PAGE_WRITE;
3352 if (value & 0x20)
3353 tlb->prot |= PAGE_EXEC;
3354 break;
3355 }
3356 }
3357
3358 target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
3359 {
3360 ppcemb_tlb_t *tlb;
3361 target_ulong ret;
3362 int size;
3363
3364 entry &= 0x3F;
3365 tlb = &env->tlb[entry].tlbe;
3366 switch (word) {
3367 default:
3368 /* Just here to please gcc */
3369 case 0:
3370 ret = tlb->EPN;
3371 size = booke_page_size_to_tlb(tlb->size);
3372 if (size < 0 || size > 0xF)
3373 size = 1;
3374 ret |= size << 4;
3375 if (tlb->attr & 0x1)
3376 ret |= 0x100;
3377 if (tlb->prot & PAGE_VALID)
3378 ret |= 0x200;
3379 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
3380 env->spr[SPR_440_MMUCR] |= tlb->PID;
3381 break;
3382 case 1:
3383 ret = tlb->RPN;
3384 break;
3385 case 2:
3386 ret = tlb->attr & ~0x1;
3387 if (tlb->prot & (PAGE_READ << 4))
3388 ret |= 0x1;
3389 if (tlb->prot & (PAGE_WRITE << 4))
3390 ret |= 0x2;
3391 if (tlb->prot & (PAGE_EXEC << 4))
3392 ret |= 0x4;
3393 if (tlb->prot & PAGE_READ)
3394 ret |= 0x8;
3395 if (tlb->prot & PAGE_WRITE)
3396 ret |= 0x10;
3397 if (tlb->prot & PAGE_EXEC)
3398 ret |= 0x20;
3399 break;
3400 }
3401 return ret;
3402 }
3403
3404 target_ulong helper_440_tlbsx (target_ulong address)
3405 {
3406 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
3407 }
3408
3409 #endif /* !CONFIG_USER_ONLY */