]> git.proxmox.com Git - qemu.git/blame - target-ppc/op_helper.c
fix segfault in msix_save
[qemu.git] / target-ppc / op_helper.c
CommitLineData
9a64fbe4 1/*
3fc6c082 2 * PowerPC emulation helpers for qemu.
5fafdf24 3 *
76a66253 4 * Copyright (c) 2003-2007 Jocelyn Mayer
9a64fbe4
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
9a64fbe4 19 */
7b239bec 20#include <string.h>
9a64fbe4 21#include "exec.h"
603fccce 22#include "host-utils.h"
a7812ae4 23#include "helper.h"
9a64fbe4 24
0411a972 25#include "helper_regs.h"
0487d6a8 26
fdabc366
FB
27//#define DEBUG_OP
28//#define DEBUG_EXCEPTIONS
76a66253 29//#define DEBUG_SOFTWARE_TLB
fdabc366 30
d12d51d5 31#ifdef DEBUG_SOFTWARE_TLB
93fcfe39 32# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
d12d51d5
AL
33#else
34# define LOG_SWTLB(...) do { } while (0)
35#endif
36
37
9a64fbe4
FB
38/*****************************************************************************/
39/* Exceptions processing helpers */
9a64fbe4 40
64adab3f 41void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
9a64fbe4 42{
e06fcd75
AJ
43#if 0
44 printf("Raise exception %3x code : %d\n", exception, error_code);
45#endif
46 env->exception_index = exception;
47 env->error_code = error_code;
48 cpu_loop_exit();
76a66253 49}
9fddaa0c 50
e06fcd75 51void helper_raise_exception (uint32_t exception)
9fddaa0c 52{
e06fcd75 53 helper_raise_exception_err(exception, 0);
9a64fbe4
FB
54}
55
45d827d2
AJ
56/*****************************************************************************/
57/* SPR accesses */
58void helper_load_dump_spr (uint32_t sprn)
a496775f 59{
93fcfe39 60 qemu_log("Read SPR %d %03x => " ADDRX "\n",
a496775f 61 sprn, sprn, env->spr[sprn]);
a496775f
JM
62}
63
45d827d2 64void helper_store_dump_spr (uint32_t sprn)
a496775f 65{
93fcfe39 66 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
45d827d2 67 sprn, sprn, env->spr[sprn]);
45d827d2
AJ
68}
69
70target_ulong helper_load_tbl (void)
71{
72 return cpu_ppc_load_tbl(env);
73}
74
75target_ulong helper_load_tbu (void)
76{
77 return cpu_ppc_load_tbu(env);
78}
79
80target_ulong helper_load_atbl (void)
81{
82 return cpu_ppc_load_atbl(env);
83}
84
85target_ulong helper_load_atbu (void)
86{
87 return cpu_ppc_load_atbu(env);
88}
89
90target_ulong helper_load_601_rtcl (void)
91{
92 return cpu_ppc601_load_rtcl(env);
93}
94
95target_ulong helper_load_601_rtcu (void)
96{
97 return cpu_ppc601_load_rtcu(env);
98}
99
100#if !defined(CONFIG_USER_ONLY)
101#if defined (TARGET_PPC64)
102void helper_store_asr (target_ulong val)
103{
104 ppc_store_asr(env, val);
105}
106#endif
107
108void helper_store_sdr1 (target_ulong val)
109{
110 ppc_store_sdr1(env, val);
111}
112
113void helper_store_tbl (target_ulong val)
114{
115 cpu_ppc_store_tbl(env, val);
116}
117
118void helper_store_tbu (target_ulong val)
119{
120 cpu_ppc_store_tbu(env, val);
121}
122
123void helper_store_atbl (target_ulong val)
124{
125 cpu_ppc_store_atbl(env, val);
126}
127
128void helper_store_atbu (target_ulong val)
129{
130 cpu_ppc_store_atbu(env, val);
131}
132
133void helper_store_601_rtcl (target_ulong val)
134{
135 cpu_ppc601_store_rtcl(env, val);
136}
137
138void helper_store_601_rtcu (target_ulong val)
139{
140 cpu_ppc601_store_rtcu(env, val);
141}
142
143target_ulong helper_load_decr (void)
144{
145 return cpu_ppc_load_decr(env);
146}
147
148void helper_store_decr (target_ulong val)
149{
150 cpu_ppc_store_decr(env, val);
151}
152
153void helper_store_hid0_601 (target_ulong val)
154{
155 target_ulong hid0;
156
157 hid0 = env->spr[SPR_HID0];
158 if ((val ^ hid0) & 0x00000008) {
159 /* Change current endianness */
160 env->hflags &= ~(1 << MSR_LE);
161 env->hflags_nmsr &= ~(1 << MSR_LE);
162 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
163 env->hflags |= env->hflags_nmsr;
93fcfe39 164 qemu_log("%s: set endianness to %c => " ADDRX "\n",
45d827d2 165 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
a496775f 166 }
45d827d2 167 env->spr[SPR_HID0] = (uint32_t)val;
a496775f
JM
168}
169
45d827d2
AJ
170void helper_store_403_pbr (uint32_t num, target_ulong value)
171{
172 if (likely(env->pb[num] != value)) {
173 env->pb[num] = value;
174 /* Should be optimized */
175 tlb_flush(env, 1);
176 }
177}
178
179target_ulong helper_load_40x_pit (void)
180{
181 return load_40x_pit(env);
182}
183
184void helper_store_40x_pit (target_ulong val)
185{
186 store_40x_pit(env, val);
187}
188
189void helper_store_40x_dbcr0 (target_ulong val)
190{
191 store_40x_dbcr0(env, val);
192}
193
194void helper_store_40x_sler (target_ulong val)
195{
196 store_40x_sler(env, val);
197}
198
199void helper_store_booke_tcr (target_ulong val)
200{
201 store_booke_tcr(env, val);
202}
203
204void helper_store_booke_tsr (target_ulong val)
205{
206 store_booke_tsr(env, val);
207}
208
209void helper_store_ibatu (uint32_t nr, target_ulong val)
210{
211 ppc_store_ibatu(env, nr, val);
212}
213
214void helper_store_ibatl (uint32_t nr, target_ulong val)
215{
216 ppc_store_ibatl(env, nr, val);
217}
218
219void helper_store_dbatu (uint32_t nr, target_ulong val)
220{
221 ppc_store_dbatu(env, nr, val);
222}
223
224void helper_store_dbatl (uint32_t nr, target_ulong val)
225{
226 ppc_store_dbatl(env, nr, val);
227}
228
229void helper_store_601_batl (uint32_t nr, target_ulong val)
230{
231 ppc_store_ibatl_601(env, nr, val);
232}
233
234void helper_store_601_batu (uint32_t nr, target_ulong val)
235{
236 ppc_store_ibatu_601(env, nr, val);
237}
238#endif
239
ff4a62cd
AJ
240/*****************************************************************************/
241/* Memory load and stores */
242
76db3ba4 243static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
ff4a62cd
AJ
244{
245#if defined(TARGET_PPC64)
76db3ba4
AJ
246 if (!msr_sf)
247 return (uint32_t)(addr + arg);
ff4a62cd
AJ
248 else
249#endif
76db3ba4 250 return addr + arg;
ff4a62cd
AJ
251}
252
253void helper_lmw (target_ulong addr, uint32_t reg)
254{
76db3ba4 255 for (; reg < 32; reg++) {
ff4a62cd 256 if (msr_le)
76db3ba4 257 env->gpr[reg] = bswap32(ldl(addr));
ff4a62cd 258 else
76db3ba4
AJ
259 env->gpr[reg] = ldl(addr);
260 addr = addr_add(addr, 4);
ff4a62cd
AJ
261 }
262}
263
264void helper_stmw (target_ulong addr, uint32_t reg)
265{
76db3ba4 266 for (; reg < 32; reg++) {
ff4a62cd 267 if (msr_le)
76db3ba4 268 stl(addr, bswap32((uint32_t)env->gpr[reg]));
ff4a62cd 269 else
76db3ba4
AJ
270 stl(addr, (uint32_t)env->gpr[reg]);
271 addr = addr_add(addr, 4);
ff4a62cd
AJ
272 }
273}
274
dfbc799d
AJ
275void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
276{
277 int sh;
76db3ba4
AJ
278 for (; nb > 3; nb -= 4) {
279 env->gpr[reg] = ldl(addr);
dfbc799d 280 reg = (reg + 1) % 32;
76db3ba4 281 addr = addr_add(addr, 4);
dfbc799d
AJ
282 }
283 if (unlikely(nb > 0)) {
284 env->gpr[reg] = 0;
76db3ba4
AJ
285 for (sh = 24; nb > 0; nb--, sh -= 8) {
286 env->gpr[reg] |= ldub(addr) << sh;
287 addr = addr_add(addr, 1);
dfbc799d
AJ
288 }
289 }
290}
291/* PPC32 specification says we must generate an exception if
292 * rA is in the range of registers to be loaded.
293 * In an other hand, IBM says this is valid, but rA won't be loaded.
294 * For now, I'll follow the spec...
295 */
296void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
297{
298 if (likely(xer_bc != 0)) {
299 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
300 (reg < rb && (reg + xer_bc) > rb))) {
e06fcd75
AJ
301 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
302 POWERPC_EXCP_INVAL |
303 POWERPC_EXCP_INVAL_LSWX);
dfbc799d
AJ
304 } else {
305 helper_lsw(addr, xer_bc, reg);
306 }
307 }
308}
309
310void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
311{
312 int sh;
76db3ba4
AJ
313 for (; nb > 3; nb -= 4) {
314 stl(addr, env->gpr[reg]);
dfbc799d 315 reg = (reg + 1) % 32;
76db3ba4 316 addr = addr_add(addr, 4);
dfbc799d
AJ
317 }
318 if (unlikely(nb > 0)) {
a16b45e7 319 for (sh = 24; nb > 0; nb--, sh -= 8) {
76db3ba4 320 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
a16b45e7
AJ
321 addr = addr_add(addr, 1);
322 }
dfbc799d
AJ
323 }
324}
325
799a8c8d
AJ
326static void do_dcbz(target_ulong addr, int dcache_line_size)
327{
76db3ba4 328 addr &= ~(dcache_line_size - 1);
799a8c8d 329 int i;
799a8c8d 330 for (i = 0 ; i < dcache_line_size ; i += 4) {
dcc532c8 331 stl(addr + i , 0);
799a8c8d 332 }
76db3ba4 333 if (env->reserve == addr)
799a8c8d
AJ
334 env->reserve = (target_ulong)-1ULL;
335}
336
337void helper_dcbz(target_ulong addr)
338{
339 do_dcbz(addr, env->dcache_line_size);
340}
341
342void helper_dcbz_970(target_ulong addr)
343{
344 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
345 do_dcbz(addr, 32);
346 else
347 do_dcbz(addr, env->dcache_line_size);
348}
349
37d269df
AJ
350void helper_icbi(target_ulong addr)
351{
352 uint32_t tmp;
353
76db3ba4 354 addr &= ~(env->dcache_line_size - 1);
37d269df
AJ
355 /* Invalidate one cache line :
356 * PowerPC specification says this is to be treated like a load
357 * (not a fetch) by the MMU. To be sure it will be so,
358 * do the load "by hand".
359 */
dcc532c8 360 tmp = ldl(addr);
37d269df
AJ
361 tb_invalidate_page_range(addr, addr + env->icache_line_size);
362}
363
bdb4b689
AJ
364// XXX: to be tested
365target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
366{
367 int i, c, d;
bdb4b689
AJ
368 d = 24;
369 for (i = 0; i < xer_bc; i++) {
76db3ba4
AJ
370 c = ldub(addr);
371 addr = addr_add(addr, 1);
bdb4b689
AJ
372 /* ra (if not 0) and rb are never modified */
373 if (likely(reg != rb && (ra == 0 || reg != ra))) {
374 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
375 }
376 if (unlikely(c == xer_cmp))
377 break;
378 if (likely(d != 0)) {
379 d -= 8;
380 } else {
381 d = 24;
382 reg++;
383 reg = reg & 0x1F;
384 }
385 }
386 return i;
387}
388
9a64fbe4 389/*****************************************************************************/
fdabc366 390/* Fixed point operations helpers */
d9bce9d9 391#if defined(TARGET_PPC64)
d9bce9d9 392
74637406
AJ
393/* multiply high word */
394uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
fdabc366 395{
74637406 396 uint64_t tl, th;
fdabc366 397
74637406
AJ
398 muls64(&tl, &th, arg1, arg2);
399 return th;
d9bce9d9 400}
d9bce9d9 401
74637406
AJ
402/* multiply high word unsigned */
403uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
fdabc366 404{
74637406 405 uint64_t tl, th;
fdabc366 406
74637406
AJ
407 mulu64(&tl, &th, arg1, arg2);
408 return th;
fdabc366
FB
409}
410
74637406 411uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
fdabc366 412{
d9bce9d9
JM
413 int64_t th;
414 uint64_t tl;
415
74637406 416 muls64(&tl, (uint64_t *)&th, arg1, arg2);
88ad920b 417 /* If th != 0 && th != -1, then we had an overflow */
6f2d8978 418 if (likely((uint64_t)(th + 1) <= 1)) {
3d7b417e 419 env->xer &= ~(1 << XER_OV);
fdabc366 420 } else {
3d7b417e 421 env->xer |= (1 << XER_OV) | (1 << XER_SO);
fdabc366 422 }
74637406 423 return (int64_t)tl;
d9bce9d9
JM
424}
425#endif
426
26d67362 427target_ulong helper_cntlzw (target_ulong t)
603fccce 428{
26d67362 429 return clz32(t);
603fccce
JM
430}
431
432#if defined(TARGET_PPC64)
26d67362 433target_ulong helper_cntlzd (target_ulong t)
603fccce 434{
26d67362 435 return clz64(t);
603fccce
JM
436}
437#endif
438
9a64fbe4 439/* shift right arithmetic helper */
26d67362 440target_ulong helper_sraw (target_ulong value, target_ulong shift)
9a64fbe4
FB
441{
442 int32_t ret;
443
26d67362
AJ
444 if (likely(!(shift & 0x20))) {
445 if (likely((uint32_t)shift != 0)) {
446 shift &= 0x1f;
447 ret = (int32_t)value >> shift;
448 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
3d7b417e 449 env->xer &= ~(1 << XER_CA);
fdabc366 450 } else {
3d7b417e 451 env->xer |= (1 << XER_CA);
fdabc366
FB
452 }
453 } else {
26d67362 454 ret = (int32_t)value;
3d7b417e 455 env->xer &= ~(1 << XER_CA);
fdabc366
FB
456 }
457 } else {
26d67362
AJ
458 ret = (int32_t)value >> 31;
459 if (ret) {
3d7b417e 460 env->xer |= (1 << XER_CA);
26d67362
AJ
461 } else {
462 env->xer &= ~(1 << XER_CA);
76a66253 463 }
fdabc366 464 }
26d67362 465 return (target_long)ret;
9a64fbe4
FB
466}
467
d9bce9d9 468#if defined(TARGET_PPC64)
26d67362 469target_ulong helper_srad (target_ulong value, target_ulong shift)
d9bce9d9
JM
470{
471 int64_t ret;
472
26d67362
AJ
473 if (likely(!(shift & 0x40))) {
474 if (likely((uint64_t)shift != 0)) {
475 shift &= 0x3f;
476 ret = (int64_t)value >> shift;
477 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
3d7b417e 478 env->xer &= ~(1 << XER_CA);
d9bce9d9 479 } else {
3d7b417e 480 env->xer |= (1 << XER_CA);
d9bce9d9
JM
481 }
482 } else {
26d67362 483 ret = (int64_t)value;
3d7b417e 484 env->xer &= ~(1 << XER_CA);
d9bce9d9
JM
485 }
486 } else {
26d67362
AJ
487 ret = (int64_t)value >> 63;
488 if (ret) {
3d7b417e 489 env->xer |= (1 << XER_CA);
26d67362
AJ
490 } else {
491 env->xer &= ~(1 << XER_CA);
d9bce9d9
JM
492 }
493 }
26d67362 494 return ret;
d9bce9d9
JM
495}
496#endif
497
26d67362 498target_ulong helper_popcntb (target_ulong val)
d9bce9d9 499{
6176a26d
AJ
500 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
501 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
502 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
503 return val;
d9bce9d9
JM
504}
505
506#if defined(TARGET_PPC64)
26d67362 507target_ulong helper_popcntb_64 (target_ulong val)
d9bce9d9 508{
6176a26d
AJ
509 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
510 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
511 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
512 return val;
d9bce9d9
JM
513}
514#endif
515
fdabc366 516/*****************************************************************************/
9a64fbe4 517/* Floating point operations helpers */
a0d7d5a7
AJ
518uint64_t helper_float32_to_float64(uint32_t arg)
519{
520 CPU_FloatU f;
521 CPU_DoubleU d;
522 f.l = arg;
523 d.d = float32_to_float64(f.f, &env->fp_status);
524 return d.ll;
525}
526
527uint32_t helper_float64_to_float32(uint64_t arg)
528{
529 CPU_FloatU f;
530 CPU_DoubleU d;
531 d.ll = arg;
532 f.f = float64_to_float32(d.d, &env->fp_status);
533 return f.l;
534}
535
0ca9d380 536static always_inline int isden (float64 d)
7c58044c 537{
0ca9d380 538 CPU_DoubleU u;
7c58044c 539
0ca9d380 540 u.d = d;
7c58044c 541
0ca9d380 542 return ((u.ll >> 52) & 0x7FF) == 0;
7c58044c
JM
543}
544
af12906f 545uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
7c58044c 546{
af12906f 547 CPU_DoubleU farg;
7c58044c 548 int isneg;
af12906f
AJ
549 int ret;
550 farg.ll = arg;
f23c346e 551 isneg = float64_is_neg(farg.d);
af12906f
AJ
552 if (unlikely(float64_is_nan(farg.d))) {
553 if (float64_is_signaling_nan(farg.d)) {
7c58044c 554 /* Signaling NaN: flags are undefined */
af12906f 555 ret = 0x00;
7c58044c
JM
556 } else {
557 /* Quiet NaN */
af12906f 558 ret = 0x11;
7c58044c 559 }
f23c346e 560 } else if (unlikely(float64_is_infinity(farg.d))) {
7c58044c
JM
561 /* +/- infinity */
562 if (isneg)
af12906f 563 ret = 0x09;
7c58044c 564 else
af12906f 565 ret = 0x05;
7c58044c 566 } else {
f23c346e 567 if (float64_is_zero(farg.d)) {
7c58044c
JM
568 /* +/- zero */
569 if (isneg)
af12906f 570 ret = 0x12;
7c58044c 571 else
af12906f 572 ret = 0x02;
7c58044c 573 } else {
af12906f 574 if (isden(farg.d)) {
7c58044c 575 /* Denormalized numbers */
af12906f 576 ret = 0x10;
7c58044c
JM
577 } else {
578 /* Normalized numbers */
af12906f 579 ret = 0x00;
7c58044c
JM
580 }
581 if (isneg) {
af12906f 582 ret |= 0x08;
7c58044c 583 } else {
af12906f 584 ret |= 0x04;
7c58044c
JM
585 }
586 }
587 }
588 if (set_fprf) {
589 /* We update FPSCR_FPRF */
590 env->fpscr &= ~(0x1F << FPSCR_FPRF);
af12906f 591 env->fpscr |= ret << FPSCR_FPRF;
7c58044c
JM
592 }
593 /* We just need fpcc to update Rc1 */
af12906f 594 return ret & 0xF;
7c58044c
JM
595}
596
597/* Floating-point invalid operations exception */
af12906f 598static always_inline uint64_t fload_invalid_op_excp (int op)
7c58044c 599{
af12906f 600 uint64_t ret = 0;
7c58044c
JM
601 int ve;
602
603 ve = fpscr_ve;
e0147e41
AJ
604 switch (op) {
605 case POWERPC_EXCP_FP_VXSNAN:
7c58044c 606 env->fpscr |= 1 << FPSCR_VXSNAN;
e0147e41
AJ
607 break;
608 case POWERPC_EXCP_FP_VXSOFT:
7c58044c 609 env->fpscr |= 1 << FPSCR_VXSOFT;
e0147e41 610 break;
7c58044c
JM
611 case POWERPC_EXCP_FP_VXISI:
612 /* Magnitude subtraction of infinities */
613 env->fpscr |= 1 << FPSCR_VXISI;
614 goto update_arith;
615 case POWERPC_EXCP_FP_VXIDI:
616 /* Division of infinity by infinity */
617 env->fpscr |= 1 << FPSCR_VXIDI;
618 goto update_arith;
619 case POWERPC_EXCP_FP_VXZDZ:
620 /* Division of zero by zero */
621 env->fpscr |= 1 << FPSCR_VXZDZ;
622 goto update_arith;
623 case POWERPC_EXCP_FP_VXIMZ:
624 /* Multiplication of zero by infinity */
625 env->fpscr |= 1 << FPSCR_VXIMZ;
626 goto update_arith;
627 case POWERPC_EXCP_FP_VXVC:
628 /* Ordered comparison of NaN */
629 env->fpscr |= 1 << FPSCR_VXVC;
630 env->fpscr &= ~(0xF << FPSCR_FPCC);
631 env->fpscr |= 0x11 << FPSCR_FPCC;
632 /* We must update the target FPR before raising the exception */
633 if (ve != 0) {
634 env->exception_index = POWERPC_EXCP_PROGRAM;
635 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
636 /* Update the floating-point enabled exception summary */
637 env->fpscr |= 1 << FPSCR_FEX;
638 /* Exception is differed */
639 ve = 0;
640 }
641 break;
642 case POWERPC_EXCP_FP_VXSQRT:
643 /* Square root of a negative number */
644 env->fpscr |= 1 << FPSCR_VXSQRT;
645 update_arith:
646 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
647 if (ve == 0) {
648 /* Set the result to quiet NaN */
e0147e41 649 ret = 0xFFF8000000000000ULL;
7c58044c
JM
650 env->fpscr &= ~(0xF << FPSCR_FPCC);
651 env->fpscr |= 0x11 << FPSCR_FPCC;
652 }
653 break;
654 case POWERPC_EXCP_FP_VXCVI:
655 /* Invalid conversion */
656 env->fpscr |= 1 << FPSCR_VXCVI;
657 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
658 if (ve == 0) {
659 /* Set the result to quiet NaN */
e0147e41 660 ret = 0xFFF8000000000000ULL;
7c58044c
JM
661 env->fpscr &= ~(0xF << FPSCR_FPCC);
662 env->fpscr |= 0x11 << FPSCR_FPCC;
663 }
664 break;
665 }
666 /* Update the floating-point invalid operation summary */
667 env->fpscr |= 1 << FPSCR_VX;
668 /* Update the floating-point exception summary */
669 env->fpscr |= 1 << FPSCR_FX;
670 if (ve != 0) {
671 /* Update the floating-point enabled exception summary */
672 env->fpscr |= 1 << FPSCR_FEX;
673 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
7c58044c 675 }
af12906f 676 return ret;
7c58044c
JM
677}
678
e33e94f9 679static always_inline void float_zero_divide_excp (void)
7c58044c 680{
7c58044c
JM
681 env->fpscr |= 1 << FPSCR_ZX;
682 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
683 /* Update the floating-point exception summary */
684 env->fpscr |= 1 << FPSCR_FX;
685 if (fpscr_ze != 0) {
686 /* Update the floating-point enabled exception summary */
687 env->fpscr |= 1 << FPSCR_FEX;
688 if (msr_fe0 != 0 || msr_fe1 != 0) {
e06fcd75
AJ
689 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
690 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
7c58044c 691 }
7c58044c
JM
692 }
693}
694
695static always_inline void float_overflow_excp (void)
696{
697 env->fpscr |= 1 << FPSCR_OX;
698 /* Update the floating-point exception summary */
699 env->fpscr |= 1 << FPSCR_FX;
700 if (fpscr_oe != 0) {
701 /* XXX: should adjust the result */
702 /* Update the floating-point enabled exception summary */
703 env->fpscr |= 1 << FPSCR_FEX;
704 /* We must update the target FPR before raising the exception */
705 env->exception_index = POWERPC_EXCP_PROGRAM;
706 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
707 } else {
708 env->fpscr |= 1 << FPSCR_XX;
709 env->fpscr |= 1 << FPSCR_FI;
710 }
711}
712
713static always_inline void float_underflow_excp (void)
714{
715 env->fpscr |= 1 << FPSCR_UX;
716 /* Update the floating-point exception summary */
717 env->fpscr |= 1 << FPSCR_FX;
718 if (fpscr_ue != 0) {
719 /* XXX: should adjust the result */
720 /* Update the floating-point enabled exception summary */
721 env->fpscr |= 1 << FPSCR_FEX;
722 /* We must update the target FPR before raising the exception */
723 env->exception_index = POWERPC_EXCP_PROGRAM;
724 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
725 }
726}
727
728static always_inline void float_inexact_excp (void)
729{
730 env->fpscr |= 1 << FPSCR_XX;
731 /* Update the floating-point exception summary */
732 env->fpscr |= 1 << FPSCR_FX;
733 if (fpscr_xe != 0) {
734 /* Update the floating-point enabled exception summary */
735 env->fpscr |= 1 << FPSCR_FEX;
736 /* We must update the target FPR before raising the exception */
737 env->exception_index = POWERPC_EXCP_PROGRAM;
738 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
739 }
740}
741
742static always_inline void fpscr_set_rounding_mode (void)
743{
744 int rnd_type;
745
746 /* Set rounding mode */
747 switch (fpscr_rn) {
748 case 0:
749 /* Best approximation (round to nearest) */
750 rnd_type = float_round_nearest_even;
751 break;
752 case 1:
753 /* Smaller magnitude (round toward zero) */
754 rnd_type = float_round_to_zero;
755 break;
756 case 2:
757 /* Round toward +infinite */
758 rnd_type = float_round_up;
759 break;
760 default:
761 case 3:
762 /* Round toward -infinite */
763 rnd_type = float_round_down;
764 break;
765 }
766 set_float_rounding_mode(rnd_type, &env->fp_status);
767}
768
6e35d524
AJ
769void helper_fpscr_clrbit (uint32_t bit)
770{
771 int prev;
772
773 prev = (env->fpscr >> bit) & 1;
774 env->fpscr &= ~(1 << bit);
775 if (prev == 1) {
776 switch (bit) {
777 case FPSCR_RN1:
778 case FPSCR_RN:
779 fpscr_set_rounding_mode();
780 break;
781 default:
782 break;
783 }
784 }
785}
786
af12906f 787void helper_fpscr_setbit (uint32_t bit)
7c58044c
JM
788{
789 int prev;
790
791 prev = (env->fpscr >> bit) & 1;
792 env->fpscr |= 1 << bit;
793 if (prev == 0) {
794 switch (bit) {
795 case FPSCR_VX:
796 env->fpscr |= 1 << FPSCR_FX;
797 if (fpscr_ve)
798 goto raise_ve;
799 case FPSCR_OX:
800 env->fpscr |= 1 << FPSCR_FX;
801 if (fpscr_oe)
802 goto raise_oe;
803 break;
804 case FPSCR_UX:
805 env->fpscr |= 1 << FPSCR_FX;
806 if (fpscr_ue)
807 goto raise_ue;
808 break;
809 case FPSCR_ZX:
810 env->fpscr |= 1 << FPSCR_FX;
811 if (fpscr_ze)
812 goto raise_ze;
813 break;
814 case FPSCR_XX:
815 env->fpscr |= 1 << FPSCR_FX;
816 if (fpscr_xe)
817 goto raise_xe;
818 break;
819 case FPSCR_VXSNAN:
820 case FPSCR_VXISI:
821 case FPSCR_VXIDI:
822 case FPSCR_VXZDZ:
823 case FPSCR_VXIMZ:
824 case FPSCR_VXVC:
825 case FPSCR_VXSOFT:
826 case FPSCR_VXSQRT:
827 case FPSCR_VXCVI:
828 env->fpscr |= 1 << FPSCR_VX;
829 env->fpscr |= 1 << FPSCR_FX;
830 if (fpscr_ve != 0)
831 goto raise_ve;
832 break;
833 case FPSCR_VE:
834 if (fpscr_vx != 0) {
835 raise_ve:
836 env->error_code = POWERPC_EXCP_FP;
837 if (fpscr_vxsnan)
838 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
839 if (fpscr_vxisi)
840 env->error_code |= POWERPC_EXCP_FP_VXISI;
841 if (fpscr_vxidi)
842 env->error_code |= POWERPC_EXCP_FP_VXIDI;
843 if (fpscr_vxzdz)
844 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
845 if (fpscr_vximz)
846 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
847 if (fpscr_vxvc)
848 env->error_code |= POWERPC_EXCP_FP_VXVC;
849 if (fpscr_vxsoft)
850 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
851 if (fpscr_vxsqrt)
852 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
853 if (fpscr_vxcvi)
854 env->error_code |= POWERPC_EXCP_FP_VXCVI;
855 goto raise_excp;
856 }
857 break;
858 case FPSCR_OE:
859 if (fpscr_ox != 0) {
860 raise_oe:
861 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
862 goto raise_excp;
863 }
864 break;
865 case FPSCR_UE:
866 if (fpscr_ux != 0) {
867 raise_ue:
868 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
869 goto raise_excp;
870 }
871 break;
872 case FPSCR_ZE:
873 if (fpscr_zx != 0) {
874 raise_ze:
875 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
876 goto raise_excp;
877 }
878 break;
879 case FPSCR_XE:
880 if (fpscr_xx != 0) {
881 raise_xe:
882 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
883 goto raise_excp;
884 }
885 break;
886 case FPSCR_RN1:
887 case FPSCR_RN:
888 fpscr_set_rounding_mode();
889 break;
890 default:
891 break;
892 raise_excp:
893 /* Update the floating-point enabled exception summary */
894 env->fpscr |= 1 << FPSCR_FEX;
895 /* We have to update Rc1 before raising the exception */
896 env->exception_index = POWERPC_EXCP_PROGRAM;
897 break;
898 }
899 }
900}
901
af12906f 902void helper_store_fpscr (uint64_t arg, uint32_t mask)
7c58044c
JM
903{
904 /*
905 * We use only the 32 LSB of the incoming fpr
906 */
7c58044c
JM
907 uint32_t prev, new;
908 int i;
909
7c58044c 910 prev = env->fpscr;
af12906f 911 new = (uint32_t)arg;
27ee5df0
AJ
912 new &= ~0x60000000;
913 new |= prev & 0x60000000;
914 for (i = 0; i < 8; i++) {
7c58044c
JM
915 if (mask & (1 << i)) {
916 env->fpscr &= ~(0xF << (4 * i));
917 env->fpscr |= new & (0xF << (4 * i));
918 }
919 }
920 /* Update VX and FEX */
921 if (fpscr_ix != 0)
922 env->fpscr |= 1 << FPSCR_VX;
5567025f
AJ
923 else
924 env->fpscr &= ~(1 << FPSCR_VX);
7c58044c
JM
925 if ((fpscr_ex & fpscr_eex) != 0) {
926 env->fpscr |= 1 << FPSCR_FEX;
927 env->exception_index = POWERPC_EXCP_PROGRAM;
928 /* XXX: we should compute it properly */
929 env->error_code = POWERPC_EXCP_FP;
930 }
5567025f
AJ
931 else
932 env->fpscr &= ~(1 << FPSCR_FEX);
7c58044c
JM
933 fpscr_set_rounding_mode();
934}
7c58044c 935
af12906f 936void helper_float_check_status (void)
7c58044c 937{
af12906f 938#ifdef CONFIG_SOFTFLOAT
7c58044c
JM
939 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
940 (env->error_code & POWERPC_EXCP_FP)) {
941 /* Differred floating-point exception after target FPR update */
942 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 943 helper_raise_exception_err(env->exception_index, env->error_code);
be94c952
AJ
944 } else {
945 int status = get_float_exception_flags(&env->fp_status);
e33e94f9
AJ
946 if (status & float_flag_divbyzero) {
947 float_zero_divide_excp();
948 } else if (status & float_flag_overflow) {
be94c952
AJ
949 float_overflow_excp();
950 } else if (status & float_flag_underflow) {
951 float_underflow_excp();
952 } else if (status & float_flag_inexact) {
953 float_inexact_excp();
954 }
7c58044c 955 }
af12906f
AJ
956#else
957 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
958 (env->error_code & POWERPC_EXCP_FP)) {
959 /* Differred floating-point exception after target FPR update */
960 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 961 helper_raise_exception_err(env->exception_index, env->error_code);
af12906f 962 }
af12906f
AJ
963#endif
964}
965
966#ifdef CONFIG_SOFTFLOAT
967void helper_reset_fpstatus (void)
968{
be94c952 969 set_float_exception_flags(0, &env->fp_status);
7c58044c
JM
970}
971#endif
972
af12906f
AJ
973/* fadd - fadd. */
974uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
7c58044c 975{
af12906f
AJ
976 CPU_DoubleU farg1, farg2;
977
978 farg1.ll = arg1;
979 farg2.ll = arg2;
980#if USE_PRECISE_EMULATION
981 if (unlikely(float64_is_signaling_nan(farg1.d) ||
982 float64_is_signaling_nan(farg2.d))) {
7c58044c 983 /* sNaN addition */
af12906f 984 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
17218d1f
AJ
985 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
986 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
7c58044c 987 /* Magnitude subtraction of infinities */
cf1cf21e 988 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
17218d1f
AJ
989 } else {
990 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
7c58044c 991 }
af12906f
AJ
992#else
993 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
994#endif
995 return farg1.ll;
7c58044c
JM
996}
997
af12906f
AJ
998/* fsub - fsub. */
999uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
1000{
1001 CPU_DoubleU farg1, farg2;
1002
1003 farg1.ll = arg1;
1004 farg2.ll = arg2;
1005#if USE_PRECISE_EMULATION
7c58044c 1006{
af12906f
AJ
1007 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1008 float64_is_signaling_nan(farg2.d))) {
7c58044c 1009 /* sNaN subtraction */
af12906f 1010 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
17218d1f
AJ
1011 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1012 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
7c58044c 1013 /* Magnitude subtraction of infinities */
af12906f 1014 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
17218d1f
AJ
1015 } else {
1016 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
7c58044c
JM
1017 }
1018}
af12906f
AJ
1019#else
1020 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1021#endif
1022 return farg1.ll;
1023}
7c58044c 1024
af12906f
AJ
1025/* fmul - fmul. */
1026uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
7c58044c 1027{
af12906f
AJ
1028 CPU_DoubleU farg1, farg2;
1029
1030 farg1.ll = arg1;
1031 farg2.ll = arg2;
1032#if USE_PRECISE_EMULATION
1033 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1034 float64_is_signaling_nan(farg2.d))) {
7c58044c 1035 /* sNaN multiplication */
af12906f 1036 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e
AJ
1037 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1038 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
7c58044c 1039 /* Multiplication of zero by infinity */
af12906f 1040 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1041 } else {
af12906f 1042 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
7c58044c 1043 }
af12906f
AJ
1044#else
1045 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1046#endif
1047 return farg1.ll;
1048}
7c58044c 1049
af12906f
AJ
1050/* fdiv - fdiv. */
1051uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
7c58044c 1052{
af12906f
AJ
1053 CPU_DoubleU farg1, farg2;
1054
1055 farg1.ll = arg1;
1056 farg2.ll = arg2;
1057#if USE_PRECISE_EMULATION
1058 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1059 float64_is_signaling_nan(farg2.d))) {
7c58044c 1060 /* sNaN division */
af12906f 1061 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1062 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
7c58044c 1063 /* Division of infinity by infinity */
af12906f 1064 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
e33e94f9
AJ
1065 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1066 /* Division of zero by zero */
1067 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
7c58044c 1068 } else {
af12906f 1069 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
7c58044c 1070 }
af12906f
AJ
1071#else
1072 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1073#endif
1074 return farg1.ll;
7c58044c 1075}
7c58044c 1076
af12906f
AJ
1077/* fabs */
1078uint64_t helper_fabs (uint64_t arg)
9a64fbe4 1079{
af12906f 1080 CPU_DoubleU farg;
9a64fbe4 1081
af12906f
AJ
1082 farg.ll = arg;
1083 farg.d = float64_abs(farg.d);
1084 return farg.ll;
1085}
1086
1087/* fnabs */
1088uint64_t helper_fnabs (uint64_t arg)
1089{
1090 CPU_DoubleU farg;
1091
1092 farg.ll = arg;
1093 farg.d = float64_abs(farg.d);
1094 farg.d = float64_chs(farg.d);
1095 return farg.ll;
1096}
1097
1098/* fneg */
1099uint64_t helper_fneg (uint64_t arg)
1100{
1101 CPU_DoubleU farg;
1102
1103 farg.ll = arg;
1104 farg.d = float64_chs(farg.d);
1105 return farg.ll;
1106}
1107
1108/* fctiw - fctiw. */
1109uint64_t helper_fctiw (uint64_t arg)
1110{
1111 CPU_DoubleU farg;
1112 farg.ll = arg;
1113
1114 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1115 /* sNaN conversion */
af12906f 1116 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1117 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1118 /* qNan / infinity conversion */
af12906f 1119 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1120 } else {
af12906f 1121 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1cdb9c3d 1122#if USE_PRECISE_EMULATION
7c58044c
JM
1123 /* XXX: higher bits are not supposed to be significant.
1124 * to make tests easier, return the same as a real PowerPC 750
1125 */
af12906f 1126 farg.ll |= 0xFFF80000ULL << 32;
e864cabd 1127#endif
7c58044c 1128 }
af12906f 1129 return farg.ll;
9a64fbe4
FB
1130}
1131
af12906f
AJ
1132/* fctiwz - fctiwz. */
1133uint64_t helper_fctiwz (uint64_t arg)
9a64fbe4 1134{
af12906f
AJ
1135 CPU_DoubleU farg;
1136 farg.ll = arg;
4ecc3190 1137
af12906f 1138 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1139 /* sNaN conversion */
af12906f 1140 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1141 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1142 /* qNan / infinity conversion */
af12906f 1143 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1144 } else {
af12906f 1145 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1cdb9c3d 1146#if USE_PRECISE_EMULATION
7c58044c
JM
1147 /* XXX: higher bits are not supposed to be significant.
1148 * to make tests easier, return the same as a real PowerPC 750
1149 */
af12906f 1150 farg.ll |= 0xFFF80000ULL << 32;
e864cabd 1151#endif
7c58044c 1152 }
af12906f 1153 return farg.ll;
9a64fbe4
FB
1154}
1155
426613db 1156#if defined(TARGET_PPC64)
af12906f
AJ
1157/* fcfid - fcfid. */
1158uint64_t helper_fcfid (uint64_t arg)
426613db 1159{
af12906f
AJ
1160 CPU_DoubleU farg;
1161 farg.d = int64_to_float64(arg, &env->fp_status);
1162 return farg.ll;
426613db
JM
1163}
1164
af12906f
AJ
1165/* fctid - fctid. */
1166uint64_t helper_fctid (uint64_t arg)
426613db 1167{
af12906f
AJ
1168 CPU_DoubleU farg;
1169 farg.ll = arg;
426613db 1170
af12906f 1171 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1172 /* sNaN conversion */
af12906f 1173 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1174 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1175 /* qNan / infinity conversion */
af12906f 1176 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1177 } else {
af12906f 1178 farg.ll = float64_to_int64(farg.d, &env->fp_status);
7c58044c 1179 }
af12906f 1180 return farg.ll;
426613db
JM
1181}
1182
af12906f
AJ
1183/* fctidz - fctidz. */
1184uint64_t helper_fctidz (uint64_t arg)
426613db 1185{
af12906f
AJ
1186 CPU_DoubleU farg;
1187 farg.ll = arg;
426613db 1188
af12906f 1189 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1190 /* sNaN conversion */
af12906f 1191 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1192 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1193 /* qNan / infinity conversion */
af12906f 1194 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1195 } else {
af12906f 1196 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
7c58044c 1197 }
af12906f 1198 return farg.ll;
426613db
JM
1199}
1200
1201#endif
1202
af12906f 1203static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
d7e4b87e 1204{
af12906f
AJ
1205 CPU_DoubleU farg;
1206 farg.ll = arg;
1207
1208 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1209 /* sNaN round */
af12906f 1210 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1211 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1212 /* qNan / infinity round */
af12906f 1213 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c
JM
1214 } else {
1215 set_float_rounding_mode(rounding_mode, &env->fp_status);
af12906f 1216 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
7c58044c
JM
1217 /* Restore rounding mode from FPSCR */
1218 fpscr_set_rounding_mode();
1219 }
af12906f 1220 return farg.ll;
d7e4b87e
JM
1221}
1222
af12906f 1223uint64_t helper_frin (uint64_t arg)
d7e4b87e 1224{
af12906f 1225 return do_fri(arg, float_round_nearest_even);
d7e4b87e
JM
1226}
1227
af12906f 1228uint64_t helper_friz (uint64_t arg)
d7e4b87e 1229{
af12906f 1230 return do_fri(arg, float_round_to_zero);
d7e4b87e
JM
1231}
1232
af12906f 1233uint64_t helper_frip (uint64_t arg)
d7e4b87e 1234{
af12906f 1235 return do_fri(arg, float_round_up);
d7e4b87e
JM
1236}
1237
af12906f 1238uint64_t helper_frim (uint64_t arg)
d7e4b87e 1239{
af12906f 1240 return do_fri(arg, float_round_down);
d7e4b87e
JM
1241}
1242
af12906f
AJ
1243/* fmadd - fmadd. */
1244uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
e864cabd 1245{
af12906f
AJ
1246 CPU_DoubleU farg1, farg2, farg3;
1247
1248 farg1.ll = arg1;
1249 farg2.ll = arg2;
1250 farg3.ll = arg3;
1251#if USE_PRECISE_EMULATION
1252 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1253 float64_is_signaling_nan(farg2.d) ||
1254 float64_is_signaling_nan(farg3.d))) {
7c58044c 1255 /* sNaN operation */
af12906f 1256 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1257 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1258 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1259 /* Multiplication of zero by infinity */
1260 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1261 } else {
e864cabd 1262#ifdef FLOAT128
7c58044c
JM
1263 /* This is the way the PowerPC specification defines it */
1264 float128 ft0_128, ft1_128;
1265
af12906f
AJ
1266 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1267 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1268 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1269 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1270 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1271 /* Magnitude subtraction of infinities */
1272 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1273 } else {
1274 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1275 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1276 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1277 }
e864cabd 1278#else
7c58044c 1279 /* This is OK on x86 hosts */
af12906f 1280 farg1.d = (farg1.d * farg2.d) + farg3.d;
e864cabd 1281#endif
7c58044c 1282 }
af12906f
AJ
1283#else
1284 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1285 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1286#endif
1287 return farg1.ll;
e864cabd
JM
1288}
1289
af12906f
AJ
1290/* fmsub - fmsub. */
1291uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
e864cabd 1292{
af12906f
AJ
1293 CPU_DoubleU farg1, farg2, farg3;
1294
1295 farg1.ll = arg1;
1296 farg2.ll = arg2;
1297 farg3.ll = arg3;
1298#if USE_PRECISE_EMULATION
1299 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1300 float64_is_signaling_nan(farg2.d) ||
1301 float64_is_signaling_nan(farg3.d))) {
7c58044c 1302 /* sNaN operation */
af12906f 1303 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1304 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1305 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1306 /* Multiplication of zero by infinity */
1307 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1308 } else {
e864cabd 1309#ifdef FLOAT128
7c58044c
JM
1310 /* This is the way the PowerPC specification defines it */
1311 float128 ft0_128, ft1_128;
1312
af12906f
AJ
1313 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1314 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1315 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1316 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1317 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1318 /* Magnitude subtraction of infinities */
1319 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1320 } else {
1321 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1322 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1323 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1324 }
e864cabd 1325#else
7c58044c 1326 /* This is OK on x86 hosts */
af12906f 1327 farg1.d = (farg1.d * farg2.d) - farg3.d;
e864cabd 1328#endif
7c58044c 1329 }
af12906f
AJ
1330#else
1331 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1332 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1333#endif
1334 return farg1.ll;
e864cabd 1335}
e864cabd 1336
af12906f
AJ
1337/* fnmadd - fnmadd. */
1338uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
4b3686fa 1339{
af12906f
AJ
1340 CPU_DoubleU farg1, farg2, farg3;
1341
1342 farg1.ll = arg1;
1343 farg2.ll = arg2;
1344 farg3.ll = arg3;
1345
1346 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1347 float64_is_signaling_nan(farg2.d) ||
1348 float64_is_signaling_nan(farg3.d))) {
7c58044c 1349 /* sNaN operation */
af12906f 1350 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1351 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1352 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1353 /* Multiplication of zero by infinity */
1354 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1355 } else {
1cdb9c3d 1356#if USE_PRECISE_EMULATION
e864cabd 1357#ifdef FLOAT128
7c58044c
JM
1358 /* This is the way the PowerPC specification defines it */
1359 float128 ft0_128, ft1_128;
1360
af12906f
AJ
1361 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1362 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1363 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1364 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1365 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1366 /* Magnitude subtraction of infinities */
1367 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1368 } else {
1369 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1370 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1371 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1372 }
e864cabd 1373#else
7c58044c 1374 /* This is OK on x86 hosts */
af12906f 1375 farg1.d = (farg1.d * farg2.d) + farg3.d;
e864cabd
JM
1376#endif
1377#else
af12906f
AJ
1378 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1379 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
e864cabd 1380#endif
a44d2ce1 1381 if (likely(!float64_is_nan(farg1.d)))
af12906f 1382 farg1.d = float64_chs(farg1.d);
7c58044c 1383 }
af12906f 1384 return farg1.ll;
4b3686fa
FB
1385}
1386
af12906f
AJ
1387/* fnmsub - fnmsub. */
1388uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
4b3686fa 1389{
af12906f
AJ
1390 CPU_DoubleU farg1, farg2, farg3;
1391
1392 farg1.ll = arg1;
1393 farg2.ll = arg2;
1394 farg3.ll = arg3;
1395
1396 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1397 float64_is_signaling_nan(farg2.d) ||
1398 float64_is_signaling_nan(farg3.d))) {
7c58044c 1399 /* sNaN operation */
af12906f 1400 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1401 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1402 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1403 /* Multiplication of zero by infinity */
1404 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1405 } else {
1cdb9c3d 1406#if USE_PRECISE_EMULATION
e864cabd 1407#ifdef FLOAT128
7c58044c
JM
1408 /* This is the way the PowerPC specification defines it */
1409 float128 ft0_128, ft1_128;
1410
af12906f
AJ
1411 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1412 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1413 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1414 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1415 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1416 /* Magnitude subtraction of infinities */
1417 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1418 } else {
1419 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1420 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1421 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1422 }
e864cabd 1423#else
7c58044c 1424 /* This is OK on x86 hosts */
af12906f 1425 farg1.d = (farg1.d * farg2.d) - farg3.d;
e864cabd
JM
1426#endif
1427#else
af12906f
AJ
1428 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1429 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
e864cabd 1430#endif
a44d2ce1 1431 if (likely(!float64_is_nan(farg1.d)))
af12906f 1432 farg1.d = float64_chs(farg1.d);
7c58044c 1433 }
af12906f 1434 return farg1.ll;
1ef59d0a
FB
1435}
1436
af12906f
AJ
1437/* frsp - frsp. */
1438uint64_t helper_frsp (uint64_t arg)
7c58044c 1439{
af12906f 1440 CPU_DoubleU farg;
6ad193ed 1441 float32 f32;
af12906f
AJ
1442 farg.ll = arg;
1443
1444#if USE_PRECISE_EMULATION
1445 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1446 /* sNaN square root */
af12906f 1447 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
7c58044c 1448 } else {
6ad193ed
AJ
1449 f32 = float64_to_float32(farg.d, &env->fp_status);
1450 farg.d = float32_to_float64(f32, &env->fp_status);
7c58044c 1451 }
af12906f 1452#else
6ad193ed
AJ
1453 f32 = float64_to_float32(farg.d, &env->fp_status);
1454 farg.d = float32_to_float64(f32, &env->fp_status);
af12906f
AJ
1455#endif
1456 return farg.ll;
7c58044c 1457}
7c58044c 1458
af12906f
AJ
1459/* fsqrt - fsqrt. */
1460uint64_t helper_fsqrt (uint64_t arg)
9a64fbe4 1461{
af12906f
AJ
1462 CPU_DoubleU farg;
1463 farg.ll = arg;
1464
1465 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1466 /* sNaN square root */
af12906f 1467 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1468 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
7c58044c 1469 /* Square root of a negative nonzero number */
af12906f 1470 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
7c58044c 1471 } else {
af12906f 1472 farg.d = float64_sqrt(farg.d, &env->fp_status);
7c58044c 1473 }
af12906f 1474 return farg.ll;
9a64fbe4
FB
1475}
1476
af12906f
AJ
1477/* fre - fre. */
1478uint64_t helper_fre (uint64_t arg)
d7e4b87e 1479{
c609b12e 1480 CPU_DoubleU farg;
06f7332a 1481 farg.ll = arg;
d7e4b87e 1482
af12906f 1483 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1484 /* sNaN reciprocal */
af12906f 1485 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
d7e4b87e 1486 } else {
c609b12e 1487 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
d7e4b87e 1488 }
af12906f 1489 return farg.d;
d7e4b87e
JM
1490}
1491
af12906f
AJ
1492/* fres - fres. */
1493uint64_t helper_fres (uint64_t arg)
9a64fbe4 1494{
06f7332a 1495 CPU_DoubleU farg;
6c01bf6c 1496 float32 f32;
06f7332a 1497 farg.ll = arg;
4ecc3190 1498
af12906f 1499 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1500 /* sNaN reciprocal */
af12906f 1501 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
4ecc3190 1502 } else {
c609b12e 1503 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
6c01bf6c
AJ
1504 f32 = float64_to_float32(farg.d, &env->fp_status);
1505 farg.d = float32_to_float64(f32, &env->fp_status);
4ecc3190 1506 }
af12906f 1507 return farg.ll;
9a64fbe4
FB
1508}
1509
af12906f
AJ
1510/* frsqrte - frsqrte. */
1511uint64_t helper_frsqrte (uint64_t arg)
9a64fbe4 1512{
c609b12e 1513 CPU_DoubleU farg;
6c01bf6c 1514 float32 f32;
06f7332a 1515 farg.ll = arg;
4ecc3190 1516
af12906f 1517 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1518 /* sNaN reciprocal square root */
af12906f 1519 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1520 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
7c58044c 1521 /* Reciprocal square root of a negative nonzero number */
af12906f 1522 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
4ecc3190 1523 } else {
6c01bf6c 1524 farg.d = float64_sqrt(farg.d, &env->fp_status);
c609b12e 1525 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
6c01bf6c
AJ
1526 f32 = float64_to_float32(farg.d, &env->fp_status);
1527 farg.d = float32_to_float64(f32, &env->fp_status);
4ecc3190 1528 }
af12906f 1529 return farg.ll;
9a64fbe4
FB
1530}
1531
af12906f
AJ
1532/* fsel - fsel. */
1533uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
9a64fbe4 1534{
6ad7365a 1535 CPU_DoubleU farg1;
af12906f
AJ
1536
1537 farg1.ll = arg1;
af12906f 1538
572c8952 1539 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
6ad7365a 1540 return arg2;
4ecc3190 1541 else
6ad7365a 1542 return arg3;
9a64fbe4
FB
1543}
1544
9a819377 1545void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
9a64fbe4 1546{
af12906f 1547 CPU_DoubleU farg1, farg2;
e1571908 1548 uint32_t ret = 0;
af12906f
AJ
1549 farg1.ll = arg1;
1550 farg2.ll = arg2;
e1571908 1551
9a819377
AJ
1552 if (unlikely(float64_is_nan(farg1.d) ||
1553 float64_is_nan(farg2.d))) {
1554 ret = 0x01UL;
1555 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1556 ret = 0x08UL;
1557 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1558 ret = 0x04UL;
7c58044c 1559 } else {
9a819377 1560 ret = 0x02UL;
9a64fbe4 1561 }
9a819377 1562
7c58044c 1563 env->fpscr &= ~(0x0F << FPSCR_FPRF);
e1571908 1564 env->fpscr |= ret << FPSCR_FPRF;
9a819377
AJ
1565 env->crf[crfD] = ret;
1566 if (unlikely(ret == 0x01UL
1567 && (float64_is_signaling_nan(farg1.d) ||
1568 float64_is_signaling_nan(farg2.d)))) {
1569 /* sNaN comparison */
1570 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1571 }
9a64fbe4
FB
1572}
1573
9a819377 1574void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
9a64fbe4 1575{
af12906f 1576 CPU_DoubleU farg1, farg2;
e1571908 1577 uint32_t ret = 0;
af12906f
AJ
1578 farg1.ll = arg1;
1579 farg2.ll = arg2;
e1571908 1580
af12906f
AJ
1581 if (unlikely(float64_is_nan(farg1.d) ||
1582 float64_is_nan(farg2.d))) {
9a819377
AJ
1583 ret = 0x01UL;
1584 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1585 ret = 0x08UL;
1586 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1587 ret = 0x04UL;
1588 } else {
1589 ret = 0x02UL;
1590 }
1591
1592 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1593 env->fpscr |= ret << FPSCR_FPRF;
1594 env->crf[crfD] = ret;
1595 if (unlikely (ret == 0x01UL)) {
af12906f
AJ
1596 if (float64_is_signaling_nan(farg1.d) ||
1597 float64_is_signaling_nan(farg2.d)) {
7c58044c
JM
1598 /* sNaN comparison */
1599 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1600 POWERPC_EXCP_FP_VXVC);
1601 } else {
1602 /* qNaN comparison */
1603 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1604 }
9a64fbe4 1605 }
9a64fbe4
FB
1606}
1607
76a66253 1608#if !defined (CONFIG_USER_ONLY)
6527f6ea 1609void helper_store_msr (target_ulong val)
0411a972 1610{
6527f6ea
AJ
1611 val = hreg_store_msr(env, val, 0);
1612 if (val != 0) {
0411a972 1613 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
e06fcd75 1614 helper_raise_exception(val);
0411a972
JM
1615 }
1616}
1617
d72a19f7 1618static always_inline void do_rfi (target_ulong nip, target_ulong msr,
0411a972 1619 target_ulong msrm, int keep_msrh)
9a64fbe4 1620{
426613db 1621#if defined(TARGET_PPC64)
0411a972
JM
1622 if (msr & (1ULL << MSR_SF)) {
1623 nip = (uint64_t)nip;
1624 msr &= (uint64_t)msrm;
a42bd6cc 1625 } else {
0411a972
JM
1626 nip = (uint32_t)nip;
1627 msr = (uint32_t)(msr & msrm);
1628 if (keep_msrh)
1629 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
a42bd6cc 1630 }
426613db 1631#else
0411a972
JM
1632 nip = (uint32_t)nip;
1633 msr &= (uint32_t)msrm;
426613db 1634#endif
0411a972
JM
1635 /* XXX: beware: this is false if VLE is supported */
1636 env->nip = nip & ~((target_ulong)0x00000003);
a4f30719 1637 hreg_store_msr(env, msr, 1);
fdabc366 1638#if defined (DEBUG_OP)
0411a972 1639 cpu_dump_rfi(env->nip, env->msr);
fdabc366 1640#endif
0411a972
JM
1641 /* No need to raise an exception here,
1642 * as rfi is always the last insn of a TB
1643 */
fdabc366 1644 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
9a64fbe4 1645}
d9bce9d9 1646
d72a19f7 1647void helper_rfi (void)
0411a972 1648{
d72a19f7 1649 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
2ada0ed7 1650 ~((target_ulong)0x0), 1);
0411a972
JM
1651}
1652
d9bce9d9 1653#if defined(TARGET_PPC64)
d72a19f7 1654void helper_rfid (void)
426613db 1655{
d72a19f7 1656 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
2ada0ed7 1657 ~((target_ulong)0x0), 0);
d9bce9d9 1658}
7863667f 1659
d72a19f7 1660void helper_hrfid (void)
be147d08 1661{
d72a19f7 1662 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
2ada0ed7 1663 ~((target_ulong)0x0), 0);
be147d08
JM
1664}
1665#endif
76a66253 1666#endif
9a64fbe4 1667
cab3bee2 1668void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
9a64fbe4 1669{
cab3bee2
AJ
1670 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1671 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1672 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1673 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1674 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
e06fcd75 1675 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
a42bd6cc 1676 }
9a64fbe4
FB
1677}
1678
d9bce9d9 1679#if defined(TARGET_PPC64)
cab3bee2 1680void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
d9bce9d9 1681{
cab3bee2
AJ
1682 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1683 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1684 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1685 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1686 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
e06fcd75 1687 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
d9bce9d9
JM
1688}
1689#endif
1690
fdabc366 1691/*****************************************************************************/
76a66253 1692/* PowerPC 601 specific instructions (POWER bridge) */
9a64fbe4 1693
22e0e173 1694target_ulong helper_clcs (uint32_t arg)
9a64fbe4 1695{
22e0e173 1696 switch (arg) {
76a66253
JM
1697 case 0x0CUL:
1698 /* Instruction cache line size */
22e0e173 1699 return env->icache_line_size;
76a66253
JM
1700 break;
1701 case 0x0DUL:
1702 /* Data cache line size */
22e0e173 1703 return env->dcache_line_size;
76a66253
JM
1704 break;
1705 case 0x0EUL:
1706 /* Minimum cache line size */
22e0e173
AJ
1707 return (env->icache_line_size < env->dcache_line_size) ?
1708 env->icache_line_size : env->dcache_line_size;
76a66253
JM
1709 break;
1710 case 0x0FUL:
1711 /* Maximum cache line size */
22e0e173
AJ
1712 return (env->icache_line_size > env->dcache_line_size) ?
1713 env->icache_line_size : env->dcache_line_size;
76a66253
JM
1714 break;
1715 default:
1716 /* Undefined */
22e0e173 1717 return 0;
76a66253
JM
1718 break;
1719 }
1720}
1721
22e0e173 1722target_ulong helper_div (target_ulong arg1, target_ulong arg2)
76a66253 1723{
22e0e173 1724 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
76a66253 1725
22e0e173
AJ
1726 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1727 (int32_t)arg2 == 0) {
76a66253 1728 env->spr[SPR_MQ] = 0;
22e0e173 1729 return INT32_MIN;
76a66253 1730 } else {
22e0e173
AJ
1731 env->spr[SPR_MQ] = tmp % arg2;
1732 return tmp / (int32_t)arg2;
76a66253
JM
1733 }
1734}
1735
22e0e173 1736target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
76a66253 1737{
22e0e173 1738 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
76a66253 1739
22e0e173
AJ
1740 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1741 (int32_t)arg2 == 0) {
3d7b417e 1742 env->xer |= (1 << XER_OV) | (1 << XER_SO);
76a66253 1743 env->spr[SPR_MQ] = 0;
22e0e173 1744 return INT32_MIN;
76a66253 1745 } else {
22e0e173
AJ
1746 env->spr[SPR_MQ] = tmp % arg2;
1747 tmp /= (int32_t)arg2;
1748 if ((int32_t)tmp != tmp) {
3d7b417e 1749 env->xer |= (1 << XER_OV) | (1 << XER_SO);
76a66253 1750 } else {
3d7b417e 1751 env->xer &= ~(1 << XER_OV);
76a66253 1752 }
22e0e173 1753 return tmp;
76a66253
JM
1754 }
1755}
1756
22e0e173 1757target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
76a66253 1758{
22e0e173
AJ
1759 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1760 (int32_t)arg2 == 0) {
1761 env->spr[SPR_MQ] = 0;
1762 return INT32_MIN;
76a66253 1763 } else {
22e0e173
AJ
1764 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1765 return (int32_t)arg1 / (int32_t)arg2;
76a66253 1766 }
76a66253
JM
1767}
1768
22e0e173 1769target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
76a66253 1770{
22e0e173
AJ
1771 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1772 (int32_t)arg2 == 0) {
3d7b417e 1773 env->xer |= (1 << XER_OV) | (1 << XER_SO);
22e0e173
AJ
1774 env->spr[SPR_MQ] = 0;
1775 return INT32_MIN;
76a66253 1776 } else {
3d7b417e 1777 env->xer &= ~(1 << XER_OV);
22e0e173
AJ
1778 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1779 return (int32_t)arg1 / (int32_t)arg2;
76a66253
JM
1780 }
1781}
1782
1783#if !defined (CONFIG_USER_ONLY)
22e0e173 1784target_ulong helper_rac (target_ulong addr)
76a66253 1785{
76a66253 1786 mmu_ctx_t ctx;
faadf50e 1787 int nb_BATs;
22e0e173 1788 target_ulong ret = 0;
76a66253
JM
1789
1790 /* We don't have to generate many instances of this instruction,
1791 * as rac is supervisor only.
1792 */
faadf50e
JM
1793 /* XXX: FIX THIS: Pretend we have no BAT */
1794 nb_BATs = env->nb_BATs;
1795 env->nb_BATs = 0;
22e0e173
AJ
1796 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1797 ret = ctx.raddr;
faadf50e 1798 env->nb_BATs = nb_BATs;
22e0e173 1799 return ret;
76a66253
JM
1800}
1801
d72a19f7 1802void helper_rfsvc (void)
76a66253 1803{
d72a19f7 1804 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
76a66253 1805}
76a66253
JM
1806#endif
1807
1808/*****************************************************************************/
1809/* 602 specific instructions */
1810/* mfrom is the most crazy instruction ever seen, imho ! */
1811/* Real implementation uses a ROM table. Do the same */
5e9ae189
AJ
1812/* Extremly decomposed:
1813 * -arg / 256
1814 * return 256 * log10(10 + 1.0) + 0.5
1815 */
db9a16a7 1816#if !defined (CONFIG_USER_ONLY)
cf02a65c 1817target_ulong helper_602_mfrom (target_ulong arg)
76a66253 1818{
cf02a65c 1819 if (likely(arg < 602)) {
76a66253 1820#include "mfrom_table.c"
45d827d2 1821 return mfrom_ROM_table[arg];
76a66253 1822 } else {
cf02a65c 1823 return 0;
76a66253
JM
1824 }
1825}
db9a16a7 1826#endif
76a66253
JM
1827
1828/*****************************************************************************/
1829/* Embedded PowerPC specific helpers */
76a66253 1830
a750fc0b 1831/* XXX: to be improved to check access rights when in user-mode */
06dca6a7 1832target_ulong helper_load_dcr (target_ulong dcrn)
a750fc0b 1833{
06dca6a7 1834 target_ulong val = 0;
a750fc0b
JM
1835
1836 if (unlikely(env->dcr_env == NULL)) {
93fcfe39 1837 qemu_log("No DCR environment\n");
e06fcd75
AJ
1838 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1839 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
06dca6a7 1840 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
93fcfe39 1841 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
e06fcd75
AJ
1842 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1843 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
a750fc0b 1844 }
06dca6a7 1845 return val;
a750fc0b
JM
1846}
1847
06dca6a7 1848void helper_store_dcr (target_ulong dcrn, target_ulong val)
a750fc0b
JM
1849{
1850 if (unlikely(env->dcr_env == NULL)) {
93fcfe39 1851 qemu_log("No DCR environment\n");
e06fcd75
AJ
1852 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1853 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
06dca6a7 1854 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
93fcfe39 1855 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
e06fcd75
AJ
1856 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1857 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
a750fc0b
JM
1858 }
1859}
1860
76a66253 1861#if !defined(CONFIG_USER_ONLY)
d72a19f7 1862void helper_40x_rfci (void)
76a66253 1863{
d72a19f7
AJ
1864 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1865 ~((target_ulong)0xFFFF0000), 0);
a42bd6cc
JM
1866}
1867
d72a19f7 1868void helper_rfci (void)
a42bd6cc 1869{
d72a19f7
AJ
1870 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1871 ~((target_ulong)0x3FFF0000), 0);
a42bd6cc
JM
1872}
1873
d72a19f7 1874void helper_rfdi (void)
a42bd6cc 1875{
d72a19f7
AJ
1876 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1877 ~((target_ulong)0x3FFF0000), 0);
a42bd6cc
JM
1878}
1879
d72a19f7 1880void helper_rfmci (void)
a42bd6cc 1881{
d72a19f7
AJ
1882 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1883 ~((target_ulong)0x3FFF0000), 0);
76a66253 1884}
76a66253
JM
1885#endif
1886
1887/* 440 specific */
ef0d51af 1888target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
76a66253
JM
1889{
1890 target_ulong mask;
1891 int i;
1892
1893 i = 1;
1894 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
ef0d51af
AJ
1895 if ((high & mask) == 0) {
1896 if (update_Rc) {
1897 env->crf[0] = 0x4;
1898 }
76a66253 1899 goto done;
ef0d51af 1900 }
76a66253
JM
1901 i++;
1902 }
1903 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
ef0d51af
AJ
1904 if ((low & mask) == 0) {
1905 if (update_Rc) {
1906 env->crf[0] = 0x8;
1907 }
1908 goto done;
1909 }
76a66253
JM
1910 i++;
1911 }
ef0d51af
AJ
1912 if (update_Rc) {
1913 env->crf[0] = 0x2;
1914 }
76a66253 1915 done:
ef0d51af
AJ
1916 env->xer = (env->xer & ~0x7F) | i;
1917 if (update_Rc) {
1918 env->crf[0] |= xer_so;
1919 }
1920 return i;
fdabc366
FB
1921}
1922
d6a46fe8
AJ
1923/*****************************************************************************/
1924/* Altivec extension helpers */
1925#if defined(WORDS_BIGENDIAN)
1926#define HI_IDX 0
1927#define LO_IDX 1
1928#else
1929#define HI_IDX 1
1930#define LO_IDX 0
1931#endif
1932
1933#if defined(WORDS_BIGENDIAN)
1934#define VECTOR_FOR_INORDER_I(index, element) \
1935 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1936#else
1937#define VECTOR_FOR_INORDER_I(index, element) \
1938 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1939#endif
1940
34ba2857
AJ
1941/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1942 * execute the following block. */
1943#define DO_HANDLE_NAN(result, x) \
1944 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1945 CPU_FloatU __f; \
1946 __f.f = x; \
1947 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1948 result = __f.f; \
1949 } else
1950
1951#define HANDLE_NAN1(result, x) \
1952 DO_HANDLE_NAN(result, x)
1953#define HANDLE_NAN2(result, x, y) \
1954 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1955#define HANDLE_NAN3(result, x, y, z) \
1956 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1957
00d3b8f5
AJ
1958/* Saturating arithmetic helpers. */
1959#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1960 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1961 { \
1962 to_type r; \
1963 if (use_min && x < min) { \
1964 r = min; \
1965 *sat = 1; \
1966 } else if (use_max && x > max) { \
1967 r = max; \
1968 *sat = 1; \
1969 } else { \
1970 r = x; \
1971 } \
1972 return r; \
1973 }
1974SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1975SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1976SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
c5b76b38
BS
1977
1978/* Work around gcc problems with the macro version */
1979static always_inline uint8_t cvtuhub(uint16_t x, int *sat)
1980{
1981 uint8_t r;
1982
1983 if (x > UINT8_MAX) {
1984 r = UINT8_MAX;
1985 *sat = 1;
1986 } else {
1987 r = x;
1988 }
1989 return r;
1990}
1991//SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
00d3b8f5
AJ
1992SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1993SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1994SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1995SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1996SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1997#undef SATCVT
1998
cbfb6ae9
AJ
1999#define LVE(name, access, swap, element) \
2000 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2001 { \
2002 size_t n_elems = ARRAY_SIZE(r->element); \
2003 int adjust = HI_IDX*(n_elems-1); \
2004 int sh = sizeof(r->element[0]) >> 1; \
2005 int index = (addr & 0xf) >> sh; \
2006 if(msr_le) { \
2007 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2008 } else { \
2009 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2010 } \
2011 }
2012#define I(x) (x)
2013LVE(lvebx, ldub, I, u8)
2014LVE(lvehx, lduw, bswap16, u16)
2015LVE(lvewx, ldl, bswap32, u32)
2016#undef I
2017#undef LVE
2018
bf8d8ded
AJ
2019void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2020{
2021 int i, j = (sh & 0xf);
2022
2023 VECTOR_FOR_INORDER_I (i, u8) {
2024 r->u8[i] = j++;
2025 }
2026}
2027
2028void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2029{
2030 int i, j = 0x10 - (sh & 0xf);
2031
2032 VECTOR_FOR_INORDER_I (i, u8) {
2033 r->u8[i] = j++;
2034 }
2035}
2036
cbfb6ae9
AJ
2037#define STVE(name, access, swap, element) \
2038 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2039 { \
2040 size_t n_elems = ARRAY_SIZE(r->element); \
2041 int adjust = HI_IDX*(n_elems-1); \
2042 int sh = sizeof(r->element[0]) >> 1; \
2043 int index = (addr & 0xf) >> sh; \
2044 if(msr_le) { \
2045 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2046 } else { \
2047 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2048 } \
2049 }
2050#define I(x) (x)
2051STVE(stvebx, stb, I, u8)
2052STVE(stvehx, stw, bswap16, u16)
2053STVE(stvewx, stl, bswap32, u32)
2054#undef I
2055#undef LVE
2056
6e87b7c7
AJ
2057void helper_mtvscr (ppc_avr_t *r)
2058{
2059#if defined(WORDS_BIGENDIAN)
2060 env->vscr = r->u32[3];
2061#else
2062 env->vscr = r->u32[0];
2063#endif
2064 set_flush_to_zero(vscr_nj, &env->vec_status);
2065}
2066
e343da72
AJ
2067void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2068{
2069 int i;
2070 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2071 r->u32[i] = ~a->u32[i] < b->u32[i];
2072 }
2073}
2074
7872c51c
AJ
2075#define VARITH_DO(name, op, element) \
2076void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2077{ \
2078 int i; \
2079 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2080 r->element[i] = a->element[i] op b->element[i]; \
2081 } \
2082}
2083#define VARITH(suffix, element) \
2084 VARITH_DO(add##suffix, +, element) \
2085 VARITH_DO(sub##suffix, -, element)
2086VARITH(ubm, u8)
2087VARITH(uhm, u16)
2088VARITH(uwm, u32)
2089#undef VARITH_DO
2090#undef VARITH
2091
56fdd213
AJ
2092#define VARITHFP(suffix, func) \
2093 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2094 { \
2095 int i; \
2096 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2097 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2098 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2099 } \
2100 } \
2101 }
2102VARITHFP(addfp, float32_add)
2103VARITHFP(subfp, float32_sub)
2104#undef VARITHFP
2105
5ab09f33
AJ
2106#define VARITHSAT_CASE(type, op, cvt, element) \
2107 { \
2108 type result = (type)a->element[i] op (type)b->element[i]; \
2109 r->element[i] = cvt(result, &sat); \
2110 }
2111
2112#define VARITHSAT_DO(name, op, optype, cvt, element) \
2113 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2114 { \
2115 int sat = 0; \
2116 int i; \
2117 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2118 switch (sizeof(r->element[0])) { \
2119 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2120 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2122 } \
2123 } \
2124 if (sat) { \
2125 env->vscr |= (1 << VSCR_SAT); \
2126 } \
2127 }
2128#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2129 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2130 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2131#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2132 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2133 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2134VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2135VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2136VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2137VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2138VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2139VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2140#undef VARITHSAT_CASE
2141#undef VARITHSAT_DO
2142#undef VARITHSAT_SIGNED
2143#undef VARITHSAT_UNSIGNED
2144
fab3cbe9
AJ
2145#define VAVG_DO(name, element, etype) \
2146 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2147 { \
2148 int i; \
2149 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2150 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2151 r->element[i] = x >> 1; \
2152 } \
2153 }
2154
2155#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2156 VAVG_DO(avgs##type, signed_element, signed_type) \
2157 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2158VAVG(b, s8, int16_t, u8, uint16_t)
2159VAVG(h, s16, int32_t, u16, uint32_t)
2160VAVG(w, s32, int64_t, u32, uint64_t)
2161#undef VAVG_DO
2162#undef VAVG
2163
e140632e
AJ
2164#define VCF(suffix, cvt, element) \
2165 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2166 { \
2167 int i; \
2168 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2169 float32 t = cvt(b->element[i], &env->vec_status); \
2170 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2171 } \
2172 }
2173VCF(ux, uint32_to_float32, u32)
2174VCF(sx, int32_to_float32, s32)
2175#undef VCF
2176
1add6e23
AJ
2177#define VCMP_DO(suffix, compare, element, record) \
2178 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2179 { \
2180 uint32_t ones = (uint32_t)-1; \
2181 uint32_t all = ones; \
2182 uint32_t none = 0; \
2183 int i; \
2184 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2185 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2186 switch (sizeof (a->element[0])) { \
2187 case 4: r->u32[i] = result; break; \
2188 case 2: r->u16[i] = result; break; \
2189 case 1: r->u8[i] = result; break; \
2190 } \
2191 all &= result; \
2192 none |= result; \
2193 } \
2194 if (record) { \
2195 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2196 } \
2197 }
2198#define VCMP(suffix, compare, element) \
2199 VCMP_DO(suffix, compare, element, 0) \
2200 VCMP_DO(suffix##_dot, compare, element, 1)
2201VCMP(equb, ==, u8)
2202VCMP(equh, ==, u16)
2203VCMP(equw, ==, u32)
2204VCMP(gtub, >, u8)
2205VCMP(gtuh, >, u16)
2206VCMP(gtuw, >, u32)
2207VCMP(gtsb, >, s8)
2208VCMP(gtsh, >, s16)
2209VCMP(gtsw, >, s32)
2210#undef VCMP_DO
2211#undef VCMP
2212
819ca121
AJ
2213#define VCMPFP_DO(suffix, compare, order, record) \
2214 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2215 { \
2216 uint32_t ones = (uint32_t)-1; \
2217 uint32_t all = ones; \
2218 uint32_t none = 0; \
2219 int i; \
2220 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2221 uint32_t result; \
2222 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2223 if (rel == float_relation_unordered) { \
2224 result = 0; \
2225 } else if (rel compare order) { \
2226 result = ones; \
2227 } else { \
2228 result = 0; \
2229 } \
2230 r->u32[i] = result; \
2231 all &= result; \
2232 none |= result; \
2233 } \
2234 if (record) { \
2235 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2236 } \
2237 }
2238#define VCMPFP(suffix, compare, order) \
2239 VCMPFP_DO(suffix, compare, order, 0) \
2240 VCMPFP_DO(suffix##_dot, compare, order, 1)
2241VCMPFP(eqfp, ==, float_relation_equal)
2242VCMPFP(gefp, !=, float_relation_less)
2243VCMPFP(gtfp, ==, float_relation_greater)
2244#undef VCMPFP_DO
2245#undef VCMPFP
2246
2247static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2248 ppc_avr_t *b, int record)
2249{
2250 int i;
2251 int all_in = 0;
2252 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2253 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2254 if (le_rel == float_relation_unordered) {
2255 r->u32[i] = 0xc0000000;
2256 /* ALL_IN does not need to be updated here. */
2257 } else {
2258 float32 bneg = float32_chs(b->f[i]);
2259 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2260 int le = le_rel != float_relation_greater;
2261 int ge = ge_rel != float_relation_less;
2262 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2263 all_in |= (!le | !ge);
2264 }
2265 }
2266 if (record) {
2267 env->crf[6] = (all_in == 0) << 1;
2268 }
2269}
2270
2271void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2272{
2273 vcmpbfp_internal(r, a, b, 0);
2274}
2275
2276void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2277{
2278 vcmpbfp_internal(r, a, b, 1);
2279}
2280
875b31db
AJ
2281#define VCT(suffix, satcvt, element) \
2282 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2283 { \
2284 int i; \
2285 int sat = 0; \
2286 float_status s = env->vec_status; \
2287 set_float_rounding_mode(float_round_to_zero, &s); \
2288 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2289 if (float32_is_nan(b->f[i]) || \
2290 float32_is_signaling_nan(b->f[i])) { \
2291 r->element[i] = 0; \
2292 } else { \
2293 float64 t = float32_to_float64(b->f[i], &s); \
2294 int64_t j; \
2295 t = float64_scalbn(t, uim, &s); \
2296 j = float64_to_int64(t, &s); \
2297 r->element[i] = satcvt(j, &sat); \
2298 } \
2299 } \
2300 if (sat) { \
2301 env->vscr |= (1 << VSCR_SAT); \
2302 } \
2303 }
2304VCT(uxs, cvtsduw, u32)
2305VCT(sxs, cvtsdsw, s32)
2306#undef VCT
2307
35cf7c7e
AJ
2308void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2309{
2310 int i;
2311 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2312 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2313 /* Need to do the computation in higher precision and round
2314 * once at the end. */
2315 float64 af, bf, cf, t;
2316 af = float32_to_float64(a->f[i], &env->vec_status);
2317 bf = float32_to_float64(b->f[i], &env->vec_status);
2318 cf = float32_to_float64(c->f[i], &env->vec_status);
2319 t = float64_mul(af, cf, &env->vec_status);
2320 t = float64_add(t, bf, &env->vec_status);
2321 r->f[i] = float64_to_float32(t, &env->vec_status);
2322 }
2323 }
2324}
2325
b161ae27
AJ
2326void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2327{
2328 int sat = 0;
2329 int i;
2330
2331 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2332 int32_t prod = a->s16[i] * b->s16[i];
2333 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2334 r->s16[i] = cvtswsh (t, &sat);
2335 }
2336
2337 if (sat) {
2338 env->vscr |= (1 << VSCR_SAT);
2339 }
2340}
2341
2342void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2343{
2344 int sat = 0;
2345 int i;
2346
2347 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2348 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2349 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2350 r->s16[i] = cvtswsh (t, &sat);
2351 }
2352
2353 if (sat) {
2354 env->vscr |= (1 << VSCR_SAT);
2355 }
2356}
2357
e4039339
AJ
2358#define VMINMAX_DO(name, compare, element) \
2359 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2360 { \
2361 int i; \
2362 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2363 if (a->element[i] compare b->element[i]) { \
2364 r->element[i] = b->element[i]; \
2365 } else { \
2366 r->element[i] = a->element[i]; \
2367 } \
2368 } \
2369 }
2370#define VMINMAX(suffix, element) \
2371 VMINMAX_DO(min##suffix, >, element) \
2372 VMINMAX_DO(max##suffix, <, element)
2373VMINMAX(sb, s8)
2374VMINMAX(sh, s16)
2375VMINMAX(sw, s32)
2376VMINMAX(ub, u8)
2377VMINMAX(uh, u16)
2378VMINMAX(uw, u32)
2379#undef VMINMAX_DO
2380#undef VMINMAX
2381
1536ff64
AJ
2382#define VMINMAXFP(suffix, rT, rF) \
2383 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2384 { \
2385 int i; \
2386 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2387 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2388 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2389 r->f[i] = rT->f[i]; \
2390 } else { \
2391 r->f[i] = rF->f[i]; \
2392 } \
2393 } \
2394 } \
2395 }
2396VMINMAXFP(minfp, a, b)
2397VMINMAXFP(maxfp, b, a)
2398#undef VMINMAXFP
2399
bcd2ee23
AJ
2400void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2401{
2402 int i;
2403 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2404 int32_t prod = a->s16[i] * b->s16[i];
2405 r->s16[i] = (int16_t) (prod + c->s16[i]);
2406 }
2407}
2408
3b430048
AJ
2409#define VMRG_DO(name, element, highp) \
2410 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2411 { \
2412 ppc_avr_t result; \
2413 int i; \
2414 size_t n_elems = ARRAY_SIZE(r->element); \
2415 for (i = 0; i < n_elems/2; i++) { \
2416 if (highp) { \
2417 result.element[i*2+HI_IDX] = a->element[i]; \
2418 result.element[i*2+LO_IDX] = b->element[i]; \
2419 } else { \
2420 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2421 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2422 } \
2423 } \
2424 *r = result; \
2425 }
2426#if defined(WORDS_BIGENDIAN)
2427#define MRGHI 0
b392e756 2428#define MRGLO 1
3b430048
AJ
2429#else
2430#define MRGHI 1
2431#define MRGLO 0
2432#endif
2433#define VMRG(suffix, element) \
2434 VMRG_DO(mrgl##suffix, element, MRGHI) \
2435 VMRG_DO(mrgh##suffix, element, MRGLO)
2436VMRG(b, u8)
2437VMRG(h, u16)
2438VMRG(w, u32)
2439#undef VMRG_DO
2440#undef VMRG
2441#undef MRGHI
2442#undef MRGLO
2443
b04ae981
AJ
2444void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2445{
2446 int32_t prod[16];
2447 int i;
2448
2449 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2450 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2451 }
2452
2453 VECTOR_FOR_INORDER_I(i, s32) {
2454 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2455 }
2456}
2457
eae07261
AJ
2458void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2459{
2460 int32_t prod[8];
2461 int i;
2462
2463 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2464 prod[i] = a->s16[i] * b->s16[i];
2465 }
2466
2467 VECTOR_FOR_INORDER_I(i, s32) {
2468 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2469 }
2470}
2471
2472void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2473{
2474 int32_t prod[8];
2475 int i;
2476 int sat = 0;
2477
2478 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2479 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2480 }
2481
2482 VECTOR_FOR_INORDER_I (i, s32) {
2483 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2484 r->u32[i] = cvtsdsw(t, &sat);
2485 }
2486
2487 if (sat) {
2488 env->vscr |= (1 << VSCR_SAT);
2489 }
2490}
2491
b04ae981
AJ
2492void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2493{
2494 uint16_t prod[16];
2495 int i;
2496
2497 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2498 prod[i] = a->u8[i] * b->u8[i];
2499 }
2500
2501 VECTOR_FOR_INORDER_I(i, u32) {
2502 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2503 }
2504}
2505
4d9903b6
AJ
2506void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2507{
2508 uint32_t prod[8];
2509 int i;
2510
2511 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2512 prod[i] = a->u16[i] * b->u16[i];
2513 }
2514
2515 VECTOR_FOR_INORDER_I(i, u32) {
2516 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2517 }
2518}
2519
2520void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2521{
2522 uint32_t prod[8];
2523 int i;
2524 int sat = 0;
2525
2526 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2527 prod[i] = a->u16[i] * b->u16[i];
2528 }
2529
2530 VECTOR_FOR_INORDER_I (i, s32) {
2531 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2532 r->u32[i] = cvtuduw(t, &sat);
2533 }
2534
2535 if (sat) {
2536 env->vscr |= (1 << VSCR_SAT);
2537 }
2538}
2539
2c277908
AJ
2540#define VMUL_DO(name, mul_element, prod_element, evenp) \
2541 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2542 { \
2543 int i; \
2544 VECTOR_FOR_INORDER_I(i, prod_element) { \
2545 if (evenp) { \
2546 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2547 } else { \
2548 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2549 } \
2550 } \
2551 }
2552#define VMUL(suffix, mul_element, prod_element) \
2553 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2554 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2555VMUL(sb, s8, s16)
2556VMUL(sh, s16, s32)
2557VMUL(ub, u8, u16)
2558VMUL(uh, u16, u32)
2559#undef VMUL_DO
2560#undef VMUL
2561
35cf7c7e
AJ
2562void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2563{
2564 int i;
2565 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2566 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2567 /* Need to do the computation is higher precision and round
2568 * once at the end. */
2569 float64 af, bf, cf, t;
2570 af = float32_to_float64(a->f[i], &env->vec_status);
2571 bf = float32_to_float64(b->f[i], &env->vec_status);
2572 cf = float32_to_float64(c->f[i], &env->vec_status);
2573 t = float64_mul(af, cf, &env->vec_status);
2574 t = float64_sub(t, bf, &env->vec_status);
2575 t = float64_chs(t);
2576 r->f[i] = float64_to_float32(t, &env->vec_status);
2577 }
2578 }
2579}
2580
d1258698
AJ
2581void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2582{
2583 ppc_avr_t result;
2584 int i;
2585 VECTOR_FOR_INORDER_I (i, u8) {
2586 int s = c->u8[i] & 0x1f;
2587#if defined(WORDS_BIGENDIAN)
2588 int index = s & 0xf;
2589#else
2590 int index = 15 - (s & 0xf);
2591#endif
2592 if (s & 0x10) {
2593 result.u8[i] = b->u8[index];
2594 } else {
2595 result.u8[i] = a->u8[index];
2596 }
2597 }
2598 *r = result;
2599}
2600
5335a145
AJ
2601#if defined(WORDS_BIGENDIAN)
2602#define PKBIG 1
2603#else
2604#define PKBIG 0
2605#endif
1dd9ffb9
AJ
2606void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2607{
2608 int i, j;
2609 ppc_avr_t result;
2610#if defined(WORDS_BIGENDIAN)
2611 const ppc_avr_t *x[2] = { a, b };
2612#else
2613 const ppc_avr_t *x[2] = { b, a };
2614#endif
2615
2616 VECTOR_FOR_INORDER_I (i, u64) {
2617 VECTOR_FOR_INORDER_I (j, u32){
2618 uint32_t e = x[i]->u32[j];
2619 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2620 ((e >> 6) & 0x3e0) |
2621 ((e >> 3) & 0x1f));
2622 }
2623 }
2624 *r = result;
2625}
2626
5335a145
AJ
2627#define VPK(suffix, from, to, cvt, dosat) \
2628 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2629 { \
2630 int i; \
2631 int sat = 0; \
2632 ppc_avr_t result; \
2633 ppc_avr_t *a0 = PKBIG ? a : b; \
2634 ppc_avr_t *a1 = PKBIG ? b : a; \
2635 VECTOR_FOR_INORDER_I (i, from) { \
2636 result.to[i] = cvt(a0->from[i], &sat); \
2637 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2638 } \
2639 *r = result; \
2640 if (dosat && sat) { \
2641 env->vscr |= (1 << VSCR_SAT); \
2642 } \
2643 }
2644#define I(x, y) (x)
2645VPK(shss, s16, s8, cvtshsb, 1)
2646VPK(shus, s16, u8, cvtshub, 1)
2647VPK(swss, s32, s16, cvtswsh, 1)
2648VPK(swus, s32, u16, cvtswuh, 1)
2649VPK(uhus, u16, u8, cvtuhub, 1)
2650VPK(uwus, u32, u16, cvtuwuh, 1)
2651VPK(uhum, u16, u8, I, 0)
2652VPK(uwum, u32, u16, I, 0)
2653#undef I
2654#undef VPK
2655#undef PKBIG
2656
bdfbac35
AJ
2657void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2658{
2659 int i;
2660 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2661 HANDLE_NAN1(r->f[i], b->f[i]) {
2662 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2663 }
2664 }
2665}
2666
f6b19645
AJ
2667#define VRFI(suffix, rounding) \
2668 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2669 { \
2670 int i; \
2671 float_status s = env->vec_status; \
2672 set_float_rounding_mode(rounding, &s); \
2673 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2674 HANDLE_NAN1(r->f[i], b->f[i]) { \
2675 r->f[i] = float32_round_to_int (b->f[i], &s); \
2676 } \
2677 } \
2678 }
2679VRFI(n, float_round_nearest_even)
2680VRFI(m, float_round_down)
2681VRFI(p, float_round_up)
2682VRFI(z, float_round_to_zero)
2683#undef VRFI
2684
5e1d0985
AJ
2685#define VROTATE(suffix, element) \
2686 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2687 { \
2688 int i; \
2689 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2690 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2691 unsigned int shift = b->element[i] & mask; \
2692 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2693 } \
2694 }
2695VROTATE(b, u8)
2696VROTATE(h, u16)
2697VROTATE(w, u32)
2698#undef VROTATE
2699
071fc3b1
AJ
2700void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2701{
2702 int i;
2703 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2704 HANDLE_NAN1(r->f[i], b->f[i]) {
2705 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2706 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2707 }
2708 }
2709}
2710
d1258698
AJ
2711void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2712{
2713 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2714 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2715}
2716
b580763f 2717void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
f586ce09
AJ
2718{
2719 int i;
2720 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2721 HANDLE_NAN1(r->f[i], b->f[i]) {
2722 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2723 }
2724 }
2725}
2726
d9430add
AJ
2727#if defined(WORDS_BIGENDIAN)
2728#define LEFT 0
2729#define RIGHT 1
2730#else
2731#define LEFT 1
2732#define RIGHT 0
2733#endif
2734/* The specification says that the results are undefined if all of the
2735 * shift counts are not identical. We check to make sure that they are
2736 * to conform to what real hardware appears to do. */
2737#define VSHIFT(suffix, leftp) \
2738 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2739 { \
1481e16a 2740 int shift = b->u8[LO_IDX*15] & 0x7; \
d9430add
AJ
2741 int doit = 1; \
2742 int i; \
2743 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2744 doit = doit && ((b->u8[i] & 0x7) == shift); \
2745 } \
2746 if (doit) { \
2747 if (shift == 0) { \
2748 *r = *a; \
2749 } else if (leftp) { \
2750 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2751 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2752 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2753 } else { \
2754 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2755 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2756 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2757 } \
2758 } \
2759 }
2760VSHIFT(l, LEFT)
2761VSHIFT(r, RIGHT)
2762#undef VSHIFT
2763#undef LEFT
2764#undef RIGHT
2765
d79f0809
AJ
2766#define VSL(suffix, element) \
2767 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2768 { \
2769 int i; \
2770 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2771 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2772 unsigned int shift = b->element[i] & mask; \
2773 r->element[i] = a->element[i] << shift; \
2774 } \
2775 }
2776VSL(b, u8)
2777VSL(h, u16)
2778VSL(w, u32)
2779#undef VSL
2780
cd633b10
AJ
2781void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2782{
2783 int sh = shift & 0xf;
2784 int i;
2785 ppc_avr_t result;
2786
2787#if defined(WORDS_BIGENDIAN)
2788 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2789 int index = sh + i;
2790 if (index > 0xf) {
2791 result.u8[i] = b->u8[index-0x10];
2792 } else {
2793 result.u8[i] = a->u8[index];
2794 }
2795 }
2796#else
2797 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2798 int index = (16 - sh) + i;
2799 if (index > 0xf) {
2800 result.u8[i] = a->u8[index-0x10];
2801 } else {
2802 result.u8[i] = b->u8[index];
2803 }
2804 }
2805#endif
2806 *r = result;
2807}
2808
7b239bec
AJ
2809void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2810{
2811 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2812
2813#if defined (WORDS_BIGENDIAN)
2814 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2815 memset (&r->u8[16-sh], 0, sh);
2816#else
2817 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2818 memset (&r->u8[0], 0, sh);
2819#endif
2820}
2821
e4e6bee7
AJ
2822/* Experimental testing shows that hardware masks the immediate. */
2823#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2824#if defined(WORDS_BIGENDIAN)
2825#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2826#else
2827#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2828#endif
2829#define VSPLT(suffix, element) \
2830 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2831 { \
2832 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2833 int i; \
2834 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2835 r->element[i] = s; \
2836 } \
2837 }
2838VSPLT(b, u8)
2839VSPLT(h, u16)
2840VSPLT(w, u32)
2841#undef VSPLT
2842#undef SPLAT_ELEMENT
2843#undef _SPLAT_MASKED
2844
c026766b
AJ
2845#define VSPLTI(suffix, element, splat_type) \
2846 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2847 { \
2848 splat_type x = (int8_t)(splat << 3) >> 3; \
2849 int i; \
2850 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2851 r->element[i] = x; \
2852 } \
2853 }
2854VSPLTI(b, s8, int8_t)
2855VSPLTI(h, s16, int16_t)
2856VSPLTI(w, s32, int32_t)
2857#undef VSPLTI
2858
07ef34c3
AJ
2859#define VSR(suffix, element) \
2860 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2861 { \
2862 int i; \
2863 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2864 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2865 unsigned int shift = b->element[i] & mask; \
2866 r->element[i] = a->element[i] >> shift; \
2867 } \
2868 }
2869VSR(ab, s8)
2870VSR(ah, s16)
2871VSR(aw, s32)
2872VSR(b, u8)
2873VSR(h, u16)
2874VSR(w, u32)
2875#undef VSR
2876
7b239bec
AJ
2877void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2878{
2879 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2880
2881#if defined (WORDS_BIGENDIAN)
2882 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2883 memset (&r->u8[0], 0, sh);
2884#else
2885 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2886 memset (&r->u8[16-sh], 0, sh);
2887#endif
2888}
2889
e343da72
AJ
2890void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2891{
2892 int i;
2893 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2894 r->u32[i] = a->u32[i] >= b->u32[i];
2895 }
2896}
2897
8142cddd
AJ
2898void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2899{
2900 int64_t t;
2901 int i, upper;
2902 ppc_avr_t result;
2903 int sat = 0;
2904
2905#if defined(WORDS_BIGENDIAN)
2906 upper = ARRAY_SIZE(r->s32)-1;
2907#else
2908 upper = 0;
2909#endif
2910 t = (int64_t)b->s32[upper];
2911 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2912 t += a->s32[i];
2913 result.s32[i] = 0;
2914 }
2915 result.s32[upper] = cvtsdsw(t, &sat);
2916 *r = result;
2917
2918 if (sat) {
2919 env->vscr |= (1 << VSCR_SAT);
2920 }
2921}
2922
2923void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2924{
2925 int i, j, upper;
2926 ppc_avr_t result;
2927 int sat = 0;
2928
2929#if defined(WORDS_BIGENDIAN)
2930 upper = 1;
2931#else
2932 upper = 0;
2933#endif
2934 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2935 int64_t t = (int64_t)b->s32[upper+i*2];
2936 result.u64[i] = 0;
2937 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2938 t += a->s32[2*i+j];
2939 }
2940 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2941 }
2942
2943 *r = result;
2944 if (sat) {
2945 env->vscr |= (1 << VSCR_SAT);
2946 }
2947}
2948
2949void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2950{
2951 int i, j;
2952 int sat = 0;
2953
2954 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2955 int64_t t = (int64_t)b->s32[i];
2956 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2957 t += a->s8[4*i+j];
2958 }
2959 r->s32[i] = cvtsdsw(t, &sat);
2960 }
2961
2962 if (sat) {
2963 env->vscr |= (1 << VSCR_SAT);
2964 }
2965}
2966
2967void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2968{
2969 int sat = 0;
2970 int i;
2971
2972 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2973 int64_t t = (int64_t)b->s32[i];
2974 t += a->s16[2*i] + a->s16[2*i+1];
2975 r->s32[i] = cvtsdsw(t, &sat);
2976 }
2977
2978 if (sat) {
2979 env->vscr |= (1 << VSCR_SAT);
2980 }
2981}
2982
2983void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2984{
2985 int i, j;
2986 int sat = 0;
2987
2988 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2989 uint64_t t = (uint64_t)b->u32[i];
2990 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2991 t += a->u8[4*i+j];
2992 }
2993 r->u32[i] = cvtuduw(t, &sat);
2994 }
2995
2996 if (sat) {
2997 env->vscr |= (1 << VSCR_SAT);
2998 }
2999}
3000
79f85c3a
AJ
3001#if defined(WORDS_BIGENDIAN)
3002#define UPKHI 1
3003#define UPKLO 0
3004#else
3005#define UPKHI 0
3006#define UPKLO 1
3007#endif
3008#define VUPKPX(suffix, hi) \
3009 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3010 { \
3011 int i; \
3012 ppc_avr_t result; \
3013 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3014 uint16_t e = b->u16[hi ? i : i+4]; \
3015 uint8_t a = (e >> 15) ? 0xff : 0; \
3016 uint8_t r = (e >> 10) & 0x1f; \
3017 uint8_t g = (e >> 5) & 0x1f; \
3018 uint8_t b = e & 0x1f; \
3019 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3020 } \
3021 *r = result; \
3022 }
3023VUPKPX(lpx, UPKLO)
3024VUPKPX(hpx, UPKHI)
3025#undef VUPKPX
3026
6cf1c6e5
AJ
3027#define VUPK(suffix, unpacked, packee, hi) \
3028 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3029 { \
3030 int i; \
3031 ppc_avr_t result; \
3032 if (hi) { \
3033 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3034 result.unpacked[i] = b->packee[i]; \
3035 } \
3036 } else { \
3037 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3038 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3039 } \
3040 } \
3041 *r = result; \
3042 }
3043VUPK(hsb, s16, s8, UPKHI)
3044VUPK(hsh, s32, s16, UPKHI)
3045VUPK(lsb, s16, s8, UPKLO)
3046VUPK(lsh, s32, s16, UPKLO)
3047#undef VUPK
79f85c3a
AJ
3048#undef UPKHI
3049#undef UPKLO
3050
34ba2857
AJ
3051#undef DO_HANDLE_NAN
3052#undef HANDLE_NAN1
3053#undef HANDLE_NAN2
3054#undef HANDLE_NAN3
d6a46fe8
AJ
3055#undef VECTOR_FOR_INORDER_I
3056#undef HI_IDX
3057#undef LO_IDX
3058
1c97856d 3059/*****************************************************************************/
0487d6a8
JM
3060/* SPE extension helpers */
3061/* Use a table to make this quicker */
3062static uint8_t hbrev[16] = {
3063 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3064 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3065};
3066
b068d6a7 3067static always_inline uint8_t byte_reverse (uint8_t val)
0487d6a8
JM
3068{
3069 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3070}
3071
b068d6a7 3072static always_inline uint32_t word_reverse (uint32_t val)
0487d6a8
JM
3073{
3074 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3075 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3076}
3077
3cd7d1dd 3078#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
57951c27 3079target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
0487d6a8
JM
3080{
3081 uint32_t a, b, d, mask;
3082
3cd7d1dd 3083 mask = UINT32_MAX >> (32 - MASKBITS);
57951c27
AJ
3084 a = arg1 & mask;
3085 b = arg2 & mask;
3cd7d1dd 3086 d = word_reverse(1 + word_reverse(a | ~b));
57951c27 3087 return (arg1 & ~mask) | (d & b);
0487d6a8
JM
3088}
3089
57951c27 3090uint32_t helper_cntlsw32 (uint32_t val)
0487d6a8
JM
3091{
3092 if (val & 0x80000000)
603fccce 3093 return clz32(~val);
0487d6a8 3094 else
603fccce 3095 return clz32(val);
0487d6a8
JM
3096}
3097
57951c27 3098uint32_t helper_cntlzw32 (uint32_t val)
0487d6a8 3099{
603fccce 3100 return clz32(val);
0487d6a8
JM
3101}
3102
1c97856d
AJ
3103/* Single-precision floating-point conversions */
3104static always_inline uint32_t efscfsi (uint32_t val)
0487d6a8 3105{
0ca9d380 3106 CPU_FloatU u;
0487d6a8 3107
fbd265b6 3108 u.f = int32_to_float32(val, &env->vec_status);
0487d6a8 3109
0ca9d380 3110 return u.l;
0487d6a8
JM
3111}
3112
1c97856d 3113static always_inline uint32_t efscfui (uint32_t val)
0487d6a8 3114{
0ca9d380 3115 CPU_FloatU u;
0487d6a8 3116
fbd265b6 3117 u.f = uint32_to_float32(val, &env->vec_status);
0487d6a8 3118
0ca9d380 3119 return u.l;
0487d6a8
JM
3120}
3121
1c97856d 3122static always_inline int32_t efsctsi (uint32_t val)
0487d6a8 3123{
0ca9d380 3124 CPU_FloatU u;
0487d6a8 3125
0ca9d380 3126 u.l = val;
0487d6a8 3127 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3128 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3129 return 0;
3130
fbd265b6 3131 return float32_to_int32(u.f, &env->vec_status);
0487d6a8
JM
3132}
3133
1c97856d 3134static always_inline uint32_t efsctui (uint32_t val)
0487d6a8 3135{
0ca9d380 3136 CPU_FloatU u;
0487d6a8 3137
0ca9d380 3138 u.l = val;
0487d6a8 3139 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3140 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3141 return 0;
3142
fbd265b6 3143 return float32_to_uint32(u.f, &env->vec_status);
0487d6a8
JM
3144}
3145
1c97856d 3146static always_inline uint32_t efsctsiz (uint32_t val)
0487d6a8 3147{
0ca9d380 3148 CPU_FloatU u;
0487d6a8 3149
0ca9d380 3150 u.l = val;
0487d6a8 3151 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3152 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3153 return 0;
3154
fbd265b6 3155 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
0487d6a8
JM
3156}
3157
1c97856d 3158static always_inline uint32_t efsctuiz (uint32_t val)
0487d6a8 3159{
0ca9d380 3160 CPU_FloatU u;
0487d6a8 3161
0ca9d380 3162 u.l = val;
0487d6a8 3163 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3164 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3165 return 0;
3166
fbd265b6 3167 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
0487d6a8
JM
3168}
3169
1c97856d 3170static always_inline uint32_t efscfsf (uint32_t val)
0487d6a8 3171{
0ca9d380 3172 CPU_FloatU u;
0487d6a8
JM
3173 float32 tmp;
3174
fbd265b6
AJ
3175 u.f = int32_to_float32(val, &env->vec_status);
3176 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3177 u.f = float32_div(u.f, tmp, &env->vec_status);
0487d6a8 3178
0ca9d380 3179 return u.l;
0487d6a8
JM
3180}
3181
1c97856d 3182static always_inline uint32_t efscfuf (uint32_t val)
0487d6a8 3183{
0ca9d380 3184 CPU_FloatU u;
0487d6a8
JM
3185 float32 tmp;
3186
fbd265b6
AJ
3187 u.f = uint32_to_float32(val, &env->vec_status);
3188 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3189 u.f = float32_div(u.f, tmp, &env->vec_status);
0487d6a8 3190
0ca9d380 3191 return u.l;
0487d6a8
JM
3192}
3193
1c97856d 3194static always_inline uint32_t efsctsf (uint32_t val)
0487d6a8 3195{
0ca9d380 3196 CPU_FloatU u;
0487d6a8
JM
3197 float32 tmp;
3198
0ca9d380 3199 u.l = val;
0487d6a8 3200 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3201 if (unlikely(float32_is_nan(u.f)))
0487d6a8 3202 return 0;
fbd265b6
AJ
3203 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3204 u.f = float32_mul(u.f, tmp, &env->vec_status);
0487d6a8 3205
fbd265b6 3206 return float32_to_int32(u.f, &env->vec_status);
0487d6a8
JM
3207}
3208
1c97856d 3209static always_inline uint32_t efsctuf (uint32_t val)
0487d6a8 3210{
0ca9d380 3211 CPU_FloatU u;
0487d6a8
JM
3212 float32 tmp;
3213
0ca9d380 3214 u.l = val;
0487d6a8 3215 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3216 if (unlikely(float32_is_nan(u.f)))
0487d6a8 3217 return 0;
fbd265b6
AJ
3218 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3219 u.f = float32_mul(u.f, tmp, &env->vec_status);
0487d6a8 3220
fbd265b6 3221 return float32_to_uint32(u.f, &env->vec_status);
0487d6a8
JM
3222}
3223
1c97856d
AJ
3224#define HELPER_SPE_SINGLE_CONV(name) \
3225uint32_t helper_e##name (uint32_t val) \
3226{ \
3227 return e##name(val); \
3228}
3229/* efscfsi */
3230HELPER_SPE_SINGLE_CONV(fscfsi);
3231/* efscfui */
3232HELPER_SPE_SINGLE_CONV(fscfui);
3233/* efscfuf */
3234HELPER_SPE_SINGLE_CONV(fscfuf);
3235/* efscfsf */
3236HELPER_SPE_SINGLE_CONV(fscfsf);
3237/* efsctsi */
3238HELPER_SPE_SINGLE_CONV(fsctsi);
3239/* efsctui */
3240HELPER_SPE_SINGLE_CONV(fsctui);
3241/* efsctsiz */
3242HELPER_SPE_SINGLE_CONV(fsctsiz);
3243/* efsctuiz */
3244HELPER_SPE_SINGLE_CONV(fsctuiz);
3245/* efsctsf */
3246HELPER_SPE_SINGLE_CONV(fsctsf);
3247/* efsctuf */
3248HELPER_SPE_SINGLE_CONV(fsctuf);
3249
3250#define HELPER_SPE_VECTOR_CONV(name) \
3251uint64_t helper_ev##name (uint64_t val) \
3252{ \
3253 return ((uint64_t)e##name(val >> 32) << 32) | \
3254 (uint64_t)e##name(val); \
0487d6a8 3255}
1c97856d
AJ
3256/* evfscfsi */
3257HELPER_SPE_VECTOR_CONV(fscfsi);
3258/* evfscfui */
3259HELPER_SPE_VECTOR_CONV(fscfui);
3260/* evfscfuf */
3261HELPER_SPE_VECTOR_CONV(fscfuf);
3262/* evfscfsf */
3263HELPER_SPE_VECTOR_CONV(fscfsf);
3264/* evfsctsi */
3265HELPER_SPE_VECTOR_CONV(fsctsi);
3266/* evfsctui */
3267HELPER_SPE_VECTOR_CONV(fsctui);
3268/* evfsctsiz */
3269HELPER_SPE_VECTOR_CONV(fsctsiz);
3270/* evfsctuiz */
3271HELPER_SPE_VECTOR_CONV(fsctuiz);
3272/* evfsctsf */
3273HELPER_SPE_VECTOR_CONV(fsctsf);
3274/* evfsctuf */
3275HELPER_SPE_VECTOR_CONV(fsctuf);
0487d6a8 3276
1c97856d
AJ
3277/* Single-precision floating-point arithmetic */
3278static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
0487d6a8 3279{
1c97856d
AJ
3280 CPU_FloatU u1, u2;
3281 u1.l = op1;
3282 u2.l = op2;
fbd265b6 3283 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1c97856d 3284 return u1.l;
0487d6a8
JM
3285}
3286
1c97856d 3287static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
0487d6a8 3288{
1c97856d
AJ
3289 CPU_FloatU u1, u2;
3290 u1.l = op1;
3291 u2.l = op2;
fbd265b6 3292 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1c97856d 3293 return u1.l;
0487d6a8
JM
3294}
3295
1c97856d 3296static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
0487d6a8 3297{
1c97856d
AJ
3298 CPU_FloatU u1, u2;
3299 u1.l = op1;
3300 u2.l = op2;
fbd265b6 3301 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1c97856d 3302 return u1.l;
0487d6a8
JM
3303}
3304
1c97856d 3305static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
0487d6a8 3306{
1c97856d
AJ
3307 CPU_FloatU u1, u2;
3308 u1.l = op1;
3309 u2.l = op2;
fbd265b6 3310 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1c97856d 3311 return u1.l;
0487d6a8
JM
3312}
3313
1c97856d
AJ
3314#define HELPER_SPE_SINGLE_ARITH(name) \
3315uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3316{ \
3317 return e##name(op1, op2); \
3318}
3319/* efsadd */
3320HELPER_SPE_SINGLE_ARITH(fsadd);
3321/* efssub */
3322HELPER_SPE_SINGLE_ARITH(fssub);
3323/* efsmul */
3324HELPER_SPE_SINGLE_ARITH(fsmul);
3325/* efsdiv */
3326HELPER_SPE_SINGLE_ARITH(fsdiv);
3327
3328#define HELPER_SPE_VECTOR_ARITH(name) \
3329uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3330{ \
3331 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3332 (uint64_t)e##name(op1, op2); \
3333}
3334/* evfsadd */
3335HELPER_SPE_VECTOR_ARITH(fsadd);
3336/* evfssub */
3337HELPER_SPE_VECTOR_ARITH(fssub);
3338/* evfsmul */
3339HELPER_SPE_VECTOR_ARITH(fsmul);
3340/* evfsdiv */
3341HELPER_SPE_VECTOR_ARITH(fsdiv);
3342
3343/* Single-precision floating-point comparisons */
3344static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
0487d6a8 3345{
1c97856d
AJ
3346 CPU_FloatU u1, u2;
3347 u1.l = op1;
3348 u2.l = op2;
fbd265b6 3349 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3350}
3351
1c97856d 3352static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
0487d6a8 3353{
1c97856d
AJ
3354 CPU_FloatU u1, u2;
3355 u1.l = op1;
3356 u2.l = op2;
fbd265b6 3357 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
0487d6a8
JM
3358}
3359
1c97856d 3360static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
0487d6a8 3361{
1c97856d
AJ
3362 CPU_FloatU u1, u2;
3363 u1.l = op1;
3364 u2.l = op2;
fbd265b6 3365 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3366}
3367
1c97856d 3368static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
0487d6a8
JM
3369{
3370 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3371 return efststlt(op1, op2);
0487d6a8
JM
3372}
3373
1c97856d 3374static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
0487d6a8
JM
3375{
3376 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3377 return efststgt(op1, op2);
0487d6a8
JM
3378}
3379
1c97856d 3380static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
0487d6a8
JM
3381{
3382 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3383 return efststeq(op1, op2);
0487d6a8
JM
3384}
3385
1c97856d
AJ
3386#define HELPER_SINGLE_SPE_CMP(name) \
3387uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3388{ \
3389 return e##name(op1, op2) << 2; \
3390}
3391/* efststlt */
3392HELPER_SINGLE_SPE_CMP(fststlt);
3393/* efststgt */
3394HELPER_SINGLE_SPE_CMP(fststgt);
3395/* efststeq */
3396HELPER_SINGLE_SPE_CMP(fststeq);
3397/* efscmplt */
3398HELPER_SINGLE_SPE_CMP(fscmplt);
3399/* efscmpgt */
3400HELPER_SINGLE_SPE_CMP(fscmpgt);
3401/* efscmpeq */
3402HELPER_SINGLE_SPE_CMP(fscmpeq);
3403
3404static always_inline uint32_t evcmp_merge (int t0, int t1)
0487d6a8 3405{
1c97856d 3406 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
0487d6a8
JM
3407}
3408
1c97856d
AJ
3409#define HELPER_VECTOR_SPE_CMP(name) \
3410uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3411{ \
3412 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
0487d6a8 3413}
1c97856d
AJ
3414/* evfststlt */
3415HELPER_VECTOR_SPE_CMP(fststlt);
3416/* evfststgt */
3417HELPER_VECTOR_SPE_CMP(fststgt);
3418/* evfststeq */
3419HELPER_VECTOR_SPE_CMP(fststeq);
3420/* evfscmplt */
3421HELPER_VECTOR_SPE_CMP(fscmplt);
3422/* evfscmpgt */
3423HELPER_VECTOR_SPE_CMP(fscmpgt);
3424/* evfscmpeq */
3425HELPER_VECTOR_SPE_CMP(fscmpeq);
0487d6a8 3426
1c97856d
AJ
3427/* Double-precision floating-point conversion */
3428uint64_t helper_efdcfsi (uint32_t val)
0487d6a8 3429{
1c97856d
AJ
3430 CPU_DoubleU u;
3431
fbd265b6 3432 u.d = int32_to_float64(val, &env->vec_status);
1c97856d
AJ
3433
3434 return u.ll;
0487d6a8
JM
3435}
3436
1c97856d 3437uint64_t helper_efdcfsid (uint64_t val)
0487d6a8 3438{
0ca9d380 3439 CPU_DoubleU u;
0487d6a8 3440
fbd265b6 3441 u.d = int64_to_float64(val, &env->vec_status);
0487d6a8 3442
0ca9d380 3443 return u.ll;
0487d6a8
JM
3444}
3445
1c97856d
AJ
3446uint64_t helper_efdcfui (uint32_t val)
3447{
3448 CPU_DoubleU u;
3449
fbd265b6 3450 u.d = uint32_to_float64(val, &env->vec_status);
1c97856d
AJ
3451
3452 return u.ll;
3453}
3454
3455uint64_t helper_efdcfuid (uint64_t val)
0487d6a8 3456{
0ca9d380 3457 CPU_DoubleU u;
0487d6a8 3458
fbd265b6 3459 u.d = uint64_to_float64(val, &env->vec_status);
0487d6a8 3460
0ca9d380 3461 return u.ll;
0487d6a8
JM
3462}
3463
1c97856d 3464uint32_t helper_efdctsi (uint64_t val)
0487d6a8 3465{
0ca9d380 3466 CPU_DoubleU u;
0487d6a8 3467
0ca9d380 3468 u.ll = val;
0487d6a8 3469 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3470 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3471 return 0;
3472
fbd265b6 3473 return float64_to_int32(u.d, &env->vec_status);
0487d6a8
JM
3474}
3475
1c97856d 3476uint32_t helper_efdctui (uint64_t val)
0487d6a8 3477{
0ca9d380 3478 CPU_DoubleU u;
0487d6a8 3479
0ca9d380 3480 u.ll = val;
0487d6a8 3481 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3482 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3483 return 0;
3484
fbd265b6 3485 return float64_to_uint32(u.d, &env->vec_status);
0487d6a8
JM
3486}
3487
1c97856d 3488uint32_t helper_efdctsiz (uint64_t val)
0487d6a8 3489{
0ca9d380 3490 CPU_DoubleU u;
0487d6a8 3491
0ca9d380 3492 u.ll = val;
0487d6a8 3493 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3494 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3495 return 0;
3496
fbd265b6 3497 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3498}
3499
1c97856d 3500uint64_t helper_efdctsidz (uint64_t val)
0487d6a8 3501{
0ca9d380 3502 CPU_DoubleU u;
0487d6a8 3503
0ca9d380 3504 u.ll = val;
0487d6a8 3505 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3506 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3507 return 0;
3508
fbd265b6 3509 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3510}
3511
1c97856d 3512uint32_t helper_efdctuiz (uint64_t val)
0487d6a8 3513{
1c97856d 3514 CPU_DoubleU u;
0487d6a8 3515
1c97856d
AJ
3516 u.ll = val;
3517 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3518 if (unlikely(float64_is_nan(u.d)))
1c97856d 3519 return 0;
0487d6a8 3520
fbd265b6 3521 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3522}
3523
1c97856d 3524uint64_t helper_efdctuidz (uint64_t val)
0487d6a8 3525{
1c97856d 3526 CPU_DoubleU u;
0487d6a8 3527
1c97856d
AJ
3528 u.ll = val;
3529 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3530 if (unlikely(float64_is_nan(u.d)))
1c97856d 3531 return 0;
0487d6a8 3532
fbd265b6 3533 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3534}
3535
1c97856d 3536uint64_t helper_efdcfsf (uint32_t val)
0487d6a8 3537{
0ca9d380 3538 CPU_DoubleU u;
0487d6a8
JM
3539 float64 tmp;
3540
fbd265b6
AJ
3541 u.d = int32_to_float64(val, &env->vec_status);
3542 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3543 u.d = float64_div(u.d, tmp, &env->vec_status);
0487d6a8 3544
0ca9d380 3545 return u.ll;
0487d6a8
JM
3546}
3547
1c97856d 3548uint64_t helper_efdcfuf (uint32_t val)
0487d6a8 3549{
0ca9d380 3550 CPU_DoubleU u;
0487d6a8
JM
3551 float64 tmp;
3552
fbd265b6
AJ
3553 u.d = uint32_to_float64(val, &env->vec_status);
3554 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3555 u.d = float64_div(u.d, tmp, &env->vec_status);
0487d6a8 3556
0ca9d380 3557 return u.ll;
0487d6a8
JM
3558}
3559
1c97856d 3560uint32_t helper_efdctsf (uint64_t val)
0487d6a8 3561{
0ca9d380 3562 CPU_DoubleU u;
0487d6a8
JM
3563 float64 tmp;
3564
0ca9d380 3565 u.ll = val;
0487d6a8 3566 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3567 if (unlikely(float64_is_nan(u.d)))
0487d6a8 3568 return 0;
fbd265b6
AJ
3569 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3570 u.d = float64_mul(u.d, tmp, &env->vec_status);
0487d6a8 3571
fbd265b6 3572 return float64_to_int32(u.d, &env->vec_status);
0487d6a8
JM
3573}
3574
1c97856d 3575uint32_t helper_efdctuf (uint64_t val)
0487d6a8 3576{
0ca9d380 3577 CPU_DoubleU u;
0487d6a8
JM
3578 float64 tmp;
3579
0ca9d380 3580 u.ll = val;
0487d6a8 3581 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3582 if (unlikely(float64_is_nan(u.d)))
0487d6a8 3583 return 0;
fbd265b6
AJ
3584 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3585 u.d = float64_mul(u.d, tmp, &env->vec_status);
0487d6a8 3586
fbd265b6 3587 return float64_to_uint32(u.d, &env->vec_status);
0487d6a8
JM
3588}
3589
1c97856d 3590uint32_t helper_efscfd (uint64_t val)
0487d6a8 3591{
0ca9d380
AJ
3592 CPU_DoubleU u1;
3593 CPU_FloatU u2;
0487d6a8 3594
0ca9d380 3595 u1.ll = val;
fbd265b6 3596 u2.f = float64_to_float32(u1.d, &env->vec_status);
0487d6a8 3597
0ca9d380 3598 return u2.l;
0487d6a8
JM
3599}
3600
1c97856d 3601uint64_t helper_efdcfs (uint32_t val)
0487d6a8 3602{
0ca9d380
AJ
3603 CPU_DoubleU u2;
3604 CPU_FloatU u1;
0487d6a8 3605
0ca9d380 3606 u1.l = val;
fbd265b6 3607 u2.d = float32_to_float64(u1.f, &env->vec_status);
0487d6a8 3608
0ca9d380 3609 return u2.ll;
0487d6a8
JM
3610}
3611
1c97856d
AJ
3612/* Double precision fixed-point arithmetic */
3613uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
0487d6a8 3614{
1c97856d
AJ
3615 CPU_DoubleU u1, u2;
3616 u1.ll = op1;
3617 u2.ll = op2;
fbd265b6 3618 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1c97856d 3619 return u1.ll;
0487d6a8
JM
3620}
3621
1c97856d 3622uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
0487d6a8 3623{
1c97856d
AJ
3624 CPU_DoubleU u1, u2;
3625 u1.ll = op1;
3626 u2.ll = op2;
fbd265b6 3627 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1c97856d 3628 return u1.ll;
0487d6a8
JM
3629}
3630
1c97856d 3631uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
0487d6a8 3632{
1c97856d
AJ
3633 CPU_DoubleU u1, u2;
3634 u1.ll = op1;
3635 u2.ll = op2;
fbd265b6 3636 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1c97856d 3637 return u1.ll;
0487d6a8
JM
3638}
3639
1c97856d 3640uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
0487d6a8 3641{
1c97856d
AJ
3642 CPU_DoubleU u1, u2;
3643 u1.ll = op1;
3644 u2.ll = op2;
fbd265b6 3645 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1c97856d 3646 return u1.ll;
0487d6a8
JM
3647}
3648
1c97856d
AJ
3649/* Double precision floating point helpers */
3650uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
0487d6a8 3651{
1c97856d
AJ
3652 CPU_DoubleU u1, u2;
3653 u1.ll = op1;
3654 u2.ll = op2;
fbd265b6 3655 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3656}
3657
1c97856d 3658uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
0487d6a8 3659{
1c97856d
AJ
3660 CPU_DoubleU u1, u2;
3661 u1.ll = op1;
3662 u2.ll = op2;
fbd265b6 3663 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
0487d6a8
JM
3664}
3665
1c97856d 3666uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
0487d6a8 3667{
1c97856d
AJ
3668 CPU_DoubleU u1, u2;
3669 u1.ll = op1;
3670 u2.ll = op2;
fbd265b6 3671 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3672}
3673
1c97856d 3674uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
0487d6a8 3675{
1c97856d
AJ
3676 /* XXX: TODO: test special values (NaN, infinites, ...) */
3677 return helper_efdtstlt(op1, op2);
0487d6a8
JM
3678}
3679
1c97856d
AJ
3680uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3681{
3682 /* XXX: TODO: test special values (NaN, infinites, ...) */
3683 return helper_efdtstgt(op1, op2);
3684}
0487d6a8 3685
1c97856d
AJ
3686uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3687{
3688 /* XXX: TODO: test special values (NaN, infinites, ...) */
3689 return helper_efdtsteq(op1, op2);
3690}
0487d6a8 3691
fdabc366
FB
3692/*****************************************************************************/
3693/* Softmmu support */
3694#if !defined (CONFIG_USER_ONLY)
3695
3696#define MMUSUFFIX _mmu
fdabc366
FB
3697
3698#define SHIFT 0
3699#include "softmmu_template.h"
3700
3701#define SHIFT 1
3702#include "softmmu_template.h"
3703
3704#define SHIFT 2
3705#include "softmmu_template.h"
3706
3707#define SHIFT 3
3708#include "softmmu_template.h"
3709
3710/* try to fill the TLB and return an exception if error. If retaddr is
3711 NULL, it means that the function was called in C code (i.e. not
3712 from generated code or from helper.c) */
3713/* XXX: fix it to restore all registers */
6ebbf390 3714void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
fdabc366
FB
3715{
3716 TranslationBlock *tb;
3717 CPUState *saved_env;
44f8625d 3718 unsigned long pc;
fdabc366
FB
3719 int ret;
3720
3721 /* XXX: hack to restore env in all cases, even if not called from
3722 generated code */
3723 saved_env = env;
3724 env = cpu_single_env;
6ebbf390 3725 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
76a66253 3726 if (unlikely(ret != 0)) {
fdabc366
FB
3727 if (likely(retaddr)) {
3728 /* now we have a real cpu fault */
44f8625d 3729 pc = (unsigned long)retaddr;
fdabc366
FB
3730 tb = tb_find_pc(pc);
3731 if (likely(tb)) {
3732 /* the PC is inside the translated code. It means that we have
3733 a virtual CPU fault */
3734 cpu_restore_state(tb, env, pc, NULL);
76a66253 3735 }
fdabc366 3736 }
e06fcd75 3737 helper_raise_exception_err(env->exception_index, env->error_code);
fdabc366
FB
3738 }
3739 env = saved_env;
9a64fbe4
FB
3740}
3741
74d37793
AJ
3742/* Segment registers load and store */
3743target_ulong helper_load_sr (target_ulong sr_num)
3744{
f6b868fc
BS
3745#if defined(TARGET_PPC64)
3746 if (env->mmu_model & POWERPC_MMU_64)
3747 return ppc_load_sr(env, sr_num);
3748#endif
74d37793
AJ
3749 return env->sr[sr_num];
3750}
3751
3752void helper_store_sr (target_ulong sr_num, target_ulong val)
3753{
45d827d2 3754 ppc_store_sr(env, sr_num, val);
74d37793
AJ
3755}
3756
3757/* SLB management */
3758#if defined(TARGET_PPC64)
3759target_ulong helper_load_slb (target_ulong slb_nr)
3760{
3761 return ppc_load_slb(env, slb_nr);
3762}
3763
f6b868fc 3764void helper_store_slb (target_ulong rb, target_ulong rs)
74d37793 3765{
f6b868fc 3766 ppc_store_slb(env, rb, rs);
74d37793
AJ
3767}
3768
3769void helper_slbia (void)
3770{
3771 ppc_slb_invalidate_all(env);
3772}
3773
3774void helper_slbie (target_ulong addr)
3775{
3776 ppc_slb_invalidate_one(env, addr);
3777}
3778
3779#endif /* defined(TARGET_PPC64) */
3780
3781/* TLB management */
3782void helper_tlbia (void)
3783{
3784 ppc_tlb_invalidate_all(env);
3785}
3786
3787void helper_tlbie (target_ulong addr)
3788{
3789 ppc_tlb_invalidate_one(env, addr);
3790}
3791
76a66253
JM
3792/* Software driven TLBs management */
3793/* PowerPC 602/603 software TLB load instructions helpers */
74d37793 3794static void do_6xx_tlb (target_ulong new_EPN, int is_code)
76a66253
JM
3795{
3796 target_ulong RPN, CMP, EPN;
3797 int way;
d9bce9d9 3798
76a66253
JM
3799 RPN = env->spr[SPR_RPA];
3800 if (is_code) {
3801 CMP = env->spr[SPR_ICMP];
3802 EPN = env->spr[SPR_IMISS];
3803 } else {
3804 CMP = env->spr[SPR_DCMP];
3805 EPN = env->spr[SPR_DMISS];
3806 }
3807 way = (env->spr[SPR_SRR1] >> 17) & 1;
d12d51d5 3808 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
6b542af7 3809 " PTE1 " ADDRX " way %d\n",
0e69805a 3810 __func__, new_EPN, EPN, CMP, RPN, way);
76a66253 3811 /* Store this TLB */
0f3955e2 3812 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
d9bce9d9 3813 way, is_code, CMP, RPN);
76a66253
JM
3814}
3815
74d37793 3816void helper_6xx_tlbd (target_ulong EPN)
0f3955e2 3817{
74d37793 3818 do_6xx_tlb(EPN, 0);
0f3955e2
AJ
3819}
3820
74d37793 3821void helper_6xx_tlbi (target_ulong EPN)
0f3955e2 3822{
74d37793 3823 do_6xx_tlb(EPN, 1);
0f3955e2
AJ
3824}
3825
3826/* PowerPC 74xx software TLB load instructions helpers */
74d37793 3827static void do_74xx_tlb (target_ulong new_EPN, int is_code)
7dbe11ac
JM
3828{
3829 target_ulong RPN, CMP, EPN;
3830 int way;
3831
3832 RPN = env->spr[SPR_PTELO];
3833 CMP = env->spr[SPR_PTEHI];
3834 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3835 way = env->spr[SPR_TLBMISS] & 0x3;
d12d51d5 3836 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
6b542af7 3837 " PTE1 " ADDRX " way %d\n",
0e69805a 3838 __func__, new_EPN, EPN, CMP, RPN, way);
7dbe11ac 3839 /* Store this TLB */
0f3955e2 3840 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
7dbe11ac
JM
3841 way, is_code, CMP, RPN);
3842}
3843
74d37793 3844void helper_74xx_tlbd (target_ulong EPN)
0f3955e2 3845{
74d37793 3846 do_74xx_tlb(EPN, 0);
0f3955e2
AJ
3847}
3848
74d37793 3849void helper_74xx_tlbi (target_ulong EPN)
0f3955e2 3850{
74d37793 3851 do_74xx_tlb(EPN, 1);
0f3955e2
AJ
3852}
3853
a11b8151 3854static always_inline target_ulong booke_tlb_to_page_size (int size)
a8dea12f
JM
3855{
3856 return 1024 << (2 * size);
3857}
3858
a11b8151 3859static always_inline int booke_page_size_to_tlb (target_ulong page_size)
a8dea12f
JM
3860{
3861 int size;
3862
3863 switch (page_size) {
3864 case 0x00000400UL:
3865 size = 0x0;
3866 break;
3867 case 0x00001000UL:
3868 size = 0x1;
3869 break;
3870 case 0x00004000UL:
3871 size = 0x2;
3872 break;
3873 case 0x00010000UL:
3874 size = 0x3;
3875 break;
3876 case 0x00040000UL:
3877 size = 0x4;
3878 break;
3879 case 0x00100000UL:
3880 size = 0x5;
3881 break;
3882 case 0x00400000UL:
3883 size = 0x6;
3884 break;
3885 case 0x01000000UL:
3886 size = 0x7;
3887 break;
3888 case 0x04000000UL:
3889 size = 0x8;
3890 break;
3891 case 0x10000000UL:
3892 size = 0x9;
3893 break;
3894 case 0x40000000UL:
3895 size = 0xA;
3896 break;
3897#if defined (TARGET_PPC64)
3898 case 0x000100000000ULL:
3899 size = 0xB;
3900 break;
3901 case 0x000400000000ULL:
3902 size = 0xC;
3903 break;
3904 case 0x001000000000ULL:
3905 size = 0xD;
3906 break;
3907 case 0x004000000000ULL:
3908 size = 0xE;
3909 break;
3910 case 0x010000000000ULL:
3911 size = 0xF;
3912 break;
3913#endif
3914 default:
3915 size = -1;
3916 break;
3917 }
3918
3919 return size;
3920}
3921
76a66253 3922/* Helpers for 4xx TLB management */
74d37793 3923target_ulong helper_4xx_tlbre_lo (target_ulong entry)
76a66253 3924{
a8dea12f 3925 ppcemb_tlb_t *tlb;
74d37793 3926 target_ulong ret;
a8dea12f 3927 int size;
76a66253 3928
74d37793
AJ
3929 entry &= 0x3F;
3930 tlb = &env->tlb[entry].tlbe;
3931 ret = tlb->EPN;
a8dea12f 3932 if (tlb->prot & PAGE_VALID)
74d37793 3933 ret |= 0x400;
a8dea12f
JM
3934 size = booke_page_size_to_tlb(tlb->size);
3935 if (size < 0 || size > 0x7)
3936 size = 1;
74d37793 3937 ret |= size << 7;
a8dea12f 3938 env->spr[SPR_40x_PID] = tlb->PID;
74d37793 3939 return ret;
76a66253
JM
3940}
3941
74d37793 3942target_ulong helper_4xx_tlbre_hi (target_ulong entry)
76a66253 3943{
a8dea12f 3944 ppcemb_tlb_t *tlb;
74d37793 3945 target_ulong ret;
76a66253 3946
74d37793
AJ
3947 entry &= 0x3F;
3948 tlb = &env->tlb[entry].tlbe;
3949 ret = tlb->RPN;
a8dea12f 3950 if (tlb->prot & PAGE_EXEC)
74d37793 3951 ret |= 0x200;
a8dea12f 3952 if (tlb->prot & PAGE_WRITE)
74d37793
AJ
3953 ret |= 0x100;
3954 return ret;
76a66253
JM
3955}
3956
74d37793 3957void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
76a66253 3958{
a8dea12f 3959 ppcemb_tlb_t *tlb;
76a66253
JM
3960 target_ulong page, end;
3961
d12d51d5 3962 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
74d37793
AJ
3963 entry &= 0x3F;
3964 tlb = &env->tlb[entry].tlbe;
76a66253
JM
3965 /* Invalidate previous TLB (if it's valid) */
3966 if (tlb->prot & PAGE_VALID) {
3967 end = tlb->EPN + tlb->size;
d12d51d5 3968 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
74d37793 3969 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
76a66253
JM
3970 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3971 tlb_flush_page(env, page);
3972 }
74d37793 3973 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
c294fc58
JM
3974 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3975 * If this ever occurs, one should use the ppcemb target instead
3976 * of the ppc or ppc64 one
3977 */
74d37793 3978 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
71c8b8fd
JM
3979 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3980 "are not supported (%d)\n",
74d37793 3981 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
c294fc58 3982 }
74d37793
AJ
3983 tlb->EPN = val & ~(tlb->size - 1);
3984 if (val & 0x40)
76a66253
JM
3985 tlb->prot |= PAGE_VALID;
3986 else
3987 tlb->prot &= ~PAGE_VALID;
74d37793 3988 if (val & 0x20) {
c294fc58
JM
3989 /* XXX: TO BE FIXED */
3990 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3991 }
c55e9aef 3992 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
74d37793 3993 tlb->attr = val & 0xFF;
d12d51d5 3994 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
c55e9aef 3995 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
0e69805a 3996 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
c55e9aef
JM
3997 tlb->prot & PAGE_READ ? 'r' : '-',
3998 tlb->prot & PAGE_WRITE ? 'w' : '-',
3999 tlb->prot & PAGE_EXEC ? 'x' : '-',
4000 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
76a66253
JM
4001 /* Invalidate new TLB (if valid) */
4002 if (tlb->prot & PAGE_VALID) {
4003 end = tlb->EPN + tlb->size;
d12d51d5 4004 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
0e69805a 4005 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
76a66253
JM
4006 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
4007 tlb_flush_page(env, page);
4008 }
76a66253
JM
4009}
4010
74d37793 4011void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
76a66253 4012{
a8dea12f 4013 ppcemb_tlb_t *tlb;
76a66253 4014
d12d51d5 4015 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
74d37793
AJ
4016 entry &= 0x3F;
4017 tlb = &env->tlb[entry].tlbe;
4018 tlb->RPN = val & 0xFFFFFC00;
76a66253 4019 tlb->prot = PAGE_READ;
74d37793 4020 if (val & 0x200)
76a66253 4021 tlb->prot |= PAGE_EXEC;
74d37793 4022 if (val & 0x100)
76a66253 4023 tlb->prot |= PAGE_WRITE;
d12d51d5 4024 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
c55e9aef 4025 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
74d37793 4026 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
c55e9aef
JM
4027 tlb->prot & PAGE_READ ? 'r' : '-',
4028 tlb->prot & PAGE_WRITE ? 'w' : '-',
4029 tlb->prot & PAGE_EXEC ? 'x' : '-',
4030 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
76a66253 4031}
5eb7995e 4032
74d37793
AJ
4033target_ulong helper_4xx_tlbsx (target_ulong address)
4034{
4035 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4036}
4037
a4bb6c3e 4038/* PowerPC 440 TLB management */
74d37793 4039void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
5eb7995e
JM
4040{
4041 ppcemb_tlb_t *tlb;
a4bb6c3e 4042 target_ulong EPN, RPN, size;
5eb7995e
JM
4043 int do_flush_tlbs;
4044
d12d51d5 4045 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
0e69805a 4046 __func__, word, (int)entry, value);
5eb7995e 4047 do_flush_tlbs = 0;
74d37793
AJ
4048 entry &= 0x3F;
4049 tlb = &env->tlb[entry].tlbe;
a4bb6c3e
JM
4050 switch (word) {
4051 default:
4052 /* Just here to please gcc */
4053 case 0:
74d37793 4054 EPN = value & 0xFFFFFC00;
a4bb6c3e 4055 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
5eb7995e 4056 do_flush_tlbs = 1;
a4bb6c3e 4057 tlb->EPN = EPN;
74d37793 4058 size = booke_tlb_to_page_size((value >> 4) & 0xF);
a4bb6c3e
JM
4059 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4060 do_flush_tlbs = 1;
4061 tlb->size = size;
4062 tlb->attr &= ~0x1;
74d37793
AJ
4063 tlb->attr |= (value >> 8) & 1;
4064 if (value & 0x200) {
a4bb6c3e
JM
4065 tlb->prot |= PAGE_VALID;
4066 } else {
4067 if (tlb->prot & PAGE_VALID) {
4068 tlb->prot &= ~PAGE_VALID;
4069 do_flush_tlbs = 1;
4070 }
5eb7995e 4071 }
a4bb6c3e
JM
4072 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4073 if (do_flush_tlbs)
4074 tlb_flush(env, 1);
4075 break;
4076 case 1:
74d37793 4077 RPN = value & 0xFFFFFC0F;
a4bb6c3e
JM
4078 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4079 tlb_flush(env, 1);
4080 tlb->RPN = RPN;
4081 break;
4082 case 2:
74d37793 4083 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
a4bb6c3e 4084 tlb->prot = tlb->prot & PAGE_VALID;
74d37793 4085 if (value & 0x1)
a4bb6c3e 4086 tlb->prot |= PAGE_READ << 4;
74d37793 4087 if (value & 0x2)
a4bb6c3e 4088 tlb->prot |= PAGE_WRITE << 4;
74d37793 4089 if (value & 0x4)
a4bb6c3e 4090 tlb->prot |= PAGE_EXEC << 4;
74d37793 4091 if (value & 0x8)
a4bb6c3e 4092 tlb->prot |= PAGE_READ;
74d37793 4093 if (value & 0x10)
a4bb6c3e 4094 tlb->prot |= PAGE_WRITE;
74d37793 4095 if (value & 0x20)
a4bb6c3e
JM
4096 tlb->prot |= PAGE_EXEC;
4097 break;
5eb7995e 4098 }
5eb7995e
JM
4099}
4100
74d37793 4101target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
5eb7995e
JM
4102{
4103 ppcemb_tlb_t *tlb;
74d37793 4104 target_ulong ret;
5eb7995e
JM
4105 int size;
4106
74d37793
AJ
4107 entry &= 0x3F;
4108 tlb = &env->tlb[entry].tlbe;
a4bb6c3e
JM
4109 switch (word) {
4110 default:
4111 /* Just here to please gcc */
4112 case 0:
74d37793 4113 ret = tlb->EPN;
a4bb6c3e
JM
4114 size = booke_page_size_to_tlb(tlb->size);
4115 if (size < 0 || size > 0xF)
4116 size = 1;
74d37793 4117 ret |= size << 4;
a4bb6c3e 4118 if (tlb->attr & 0x1)
74d37793 4119 ret |= 0x100;
a4bb6c3e 4120 if (tlb->prot & PAGE_VALID)
74d37793 4121 ret |= 0x200;
a4bb6c3e
JM
4122 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4123 env->spr[SPR_440_MMUCR] |= tlb->PID;
4124 break;
4125 case 1:
74d37793 4126 ret = tlb->RPN;
a4bb6c3e
JM
4127 break;
4128 case 2:
74d37793 4129 ret = tlb->attr & ~0x1;
a4bb6c3e 4130 if (tlb->prot & (PAGE_READ << 4))
74d37793 4131 ret |= 0x1;
a4bb6c3e 4132 if (tlb->prot & (PAGE_WRITE << 4))
74d37793 4133 ret |= 0x2;
a4bb6c3e 4134 if (tlb->prot & (PAGE_EXEC << 4))
74d37793 4135 ret |= 0x4;
a4bb6c3e 4136 if (tlb->prot & PAGE_READ)
74d37793 4137 ret |= 0x8;
a4bb6c3e 4138 if (tlb->prot & PAGE_WRITE)
74d37793 4139 ret |= 0x10;
a4bb6c3e 4140 if (tlb->prot & PAGE_EXEC)
74d37793 4141 ret |= 0x20;
a4bb6c3e
JM
4142 break;
4143 }
74d37793 4144 return ret;
5eb7995e 4145}
74d37793
AJ
4146
4147target_ulong helper_440_tlbsx (target_ulong address)
4148{
4149 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4150}
4151
76a66253 4152#endif /* !CONFIG_USER_ONLY */