]> git.proxmox.com Git - qemu.git/blame - target-ppc/op_helper.c
Update to a hopefully more future proof FSF address
[qemu.git] / target-ppc / op_helper.c
CommitLineData
9a64fbe4 1/*
3fc6c082 2 * PowerPC emulation helpers for qemu.
5fafdf24 3 *
76a66253 4 * Copyright (c) 2003-2007 Jocelyn Mayer
9a64fbe4
FB
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
9a64fbe4 18 */
7b239bec 19#include <string.h>
9a64fbe4 20#include "exec.h"
603fccce 21#include "host-utils.h"
a7812ae4 22#include "helper.h"
9a64fbe4 23
0411a972 24#include "helper_regs.h"
0487d6a8 25
fdabc366
FB
26//#define DEBUG_OP
27//#define DEBUG_EXCEPTIONS
76a66253 28//#define DEBUG_SOFTWARE_TLB
fdabc366 29
d12d51d5 30#ifdef DEBUG_SOFTWARE_TLB
93fcfe39 31# define LOG_SWTLB(...) qemu_log(__VA_ARGS__)
d12d51d5
AL
32#else
33# define LOG_SWTLB(...) do { } while (0)
34#endif
35
36
9a64fbe4
FB
37/*****************************************************************************/
38/* Exceptions processing helpers */
9a64fbe4 39
64adab3f 40void helper_raise_exception_err (uint32_t exception, uint32_t error_code)
9a64fbe4 41{
e06fcd75
AJ
42#if 0
43 printf("Raise exception %3x code : %d\n", exception, error_code);
44#endif
45 env->exception_index = exception;
46 env->error_code = error_code;
47 cpu_loop_exit();
76a66253 48}
9fddaa0c 49
e06fcd75 50void helper_raise_exception (uint32_t exception)
9fddaa0c 51{
e06fcd75 52 helper_raise_exception_err(exception, 0);
9a64fbe4
FB
53}
54
45d827d2
AJ
55/*****************************************************************************/
56/* SPR accesses */
57void helper_load_dump_spr (uint32_t sprn)
a496775f 58{
93fcfe39 59 qemu_log("Read SPR %d %03x => " ADDRX "\n",
a496775f 60 sprn, sprn, env->spr[sprn]);
a496775f
JM
61}
62
45d827d2 63void helper_store_dump_spr (uint32_t sprn)
a496775f 64{
93fcfe39 65 qemu_log("Write SPR %d %03x <= " ADDRX "\n",
45d827d2 66 sprn, sprn, env->spr[sprn]);
45d827d2
AJ
67}
68
69target_ulong helper_load_tbl (void)
70{
71 return cpu_ppc_load_tbl(env);
72}
73
74target_ulong helper_load_tbu (void)
75{
76 return cpu_ppc_load_tbu(env);
77}
78
79target_ulong helper_load_atbl (void)
80{
81 return cpu_ppc_load_atbl(env);
82}
83
84target_ulong helper_load_atbu (void)
85{
86 return cpu_ppc_load_atbu(env);
87}
88
89target_ulong helper_load_601_rtcl (void)
90{
91 return cpu_ppc601_load_rtcl(env);
92}
93
94target_ulong helper_load_601_rtcu (void)
95{
96 return cpu_ppc601_load_rtcu(env);
97}
98
99#if !defined(CONFIG_USER_ONLY)
100#if defined (TARGET_PPC64)
101void helper_store_asr (target_ulong val)
102{
103 ppc_store_asr(env, val);
104}
105#endif
106
107void helper_store_sdr1 (target_ulong val)
108{
109 ppc_store_sdr1(env, val);
110}
111
112void helper_store_tbl (target_ulong val)
113{
114 cpu_ppc_store_tbl(env, val);
115}
116
117void helper_store_tbu (target_ulong val)
118{
119 cpu_ppc_store_tbu(env, val);
120}
121
122void helper_store_atbl (target_ulong val)
123{
124 cpu_ppc_store_atbl(env, val);
125}
126
127void helper_store_atbu (target_ulong val)
128{
129 cpu_ppc_store_atbu(env, val);
130}
131
132void helper_store_601_rtcl (target_ulong val)
133{
134 cpu_ppc601_store_rtcl(env, val);
135}
136
137void helper_store_601_rtcu (target_ulong val)
138{
139 cpu_ppc601_store_rtcu(env, val);
140}
141
142target_ulong helper_load_decr (void)
143{
144 return cpu_ppc_load_decr(env);
145}
146
147void helper_store_decr (target_ulong val)
148{
149 cpu_ppc_store_decr(env, val);
150}
151
152void helper_store_hid0_601 (target_ulong val)
153{
154 target_ulong hid0;
155
156 hid0 = env->spr[SPR_HID0];
157 if ((val ^ hid0) & 0x00000008) {
158 /* Change current endianness */
159 env->hflags &= ~(1 << MSR_LE);
160 env->hflags_nmsr &= ~(1 << MSR_LE);
161 env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
162 env->hflags |= env->hflags_nmsr;
93fcfe39 163 qemu_log("%s: set endianness to %c => " ADDRX "\n",
45d827d2 164 __func__, val & 0x8 ? 'l' : 'b', env->hflags);
a496775f 165 }
45d827d2 166 env->spr[SPR_HID0] = (uint32_t)val;
a496775f
JM
167}
168
45d827d2
AJ
169void helper_store_403_pbr (uint32_t num, target_ulong value)
170{
171 if (likely(env->pb[num] != value)) {
172 env->pb[num] = value;
173 /* Should be optimized */
174 tlb_flush(env, 1);
175 }
176}
177
178target_ulong helper_load_40x_pit (void)
179{
180 return load_40x_pit(env);
181}
182
183void helper_store_40x_pit (target_ulong val)
184{
185 store_40x_pit(env, val);
186}
187
188void helper_store_40x_dbcr0 (target_ulong val)
189{
190 store_40x_dbcr0(env, val);
191}
192
193void helper_store_40x_sler (target_ulong val)
194{
195 store_40x_sler(env, val);
196}
197
198void helper_store_booke_tcr (target_ulong val)
199{
200 store_booke_tcr(env, val);
201}
202
203void helper_store_booke_tsr (target_ulong val)
204{
205 store_booke_tsr(env, val);
206}
207
208void helper_store_ibatu (uint32_t nr, target_ulong val)
209{
210 ppc_store_ibatu(env, nr, val);
211}
212
213void helper_store_ibatl (uint32_t nr, target_ulong val)
214{
215 ppc_store_ibatl(env, nr, val);
216}
217
218void helper_store_dbatu (uint32_t nr, target_ulong val)
219{
220 ppc_store_dbatu(env, nr, val);
221}
222
223void helper_store_dbatl (uint32_t nr, target_ulong val)
224{
225 ppc_store_dbatl(env, nr, val);
226}
227
228void helper_store_601_batl (uint32_t nr, target_ulong val)
229{
230 ppc_store_ibatl_601(env, nr, val);
231}
232
233void helper_store_601_batu (uint32_t nr, target_ulong val)
234{
235 ppc_store_ibatu_601(env, nr, val);
236}
237#endif
238
ff4a62cd
AJ
239/*****************************************************************************/
240/* Memory load and stores */
241
76db3ba4 242static always_inline target_ulong addr_add(target_ulong addr, target_long arg)
ff4a62cd
AJ
243{
244#if defined(TARGET_PPC64)
76db3ba4
AJ
245 if (!msr_sf)
246 return (uint32_t)(addr + arg);
ff4a62cd
AJ
247 else
248#endif
76db3ba4 249 return addr + arg;
ff4a62cd
AJ
250}
251
252void helper_lmw (target_ulong addr, uint32_t reg)
253{
76db3ba4 254 for (; reg < 32; reg++) {
ff4a62cd 255 if (msr_le)
76db3ba4 256 env->gpr[reg] = bswap32(ldl(addr));
ff4a62cd 257 else
76db3ba4
AJ
258 env->gpr[reg] = ldl(addr);
259 addr = addr_add(addr, 4);
ff4a62cd
AJ
260 }
261}
262
263void helper_stmw (target_ulong addr, uint32_t reg)
264{
76db3ba4 265 for (; reg < 32; reg++) {
ff4a62cd 266 if (msr_le)
76db3ba4 267 stl(addr, bswap32((uint32_t)env->gpr[reg]));
ff4a62cd 268 else
76db3ba4
AJ
269 stl(addr, (uint32_t)env->gpr[reg]);
270 addr = addr_add(addr, 4);
ff4a62cd
AJ
271 }
272}
273
dfbc799d
AJ
274void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
275{
276 int sh;
76db3ba4
AJ
277 for (; nb > 3; nb -= 4) {
278 env->gpr[reg] = ldl(addr);
dfbc799d 279 reg = (reg + 1) % 32;
76db3ba4 280 addr = addr_add(addr, 4);
dfbc799d
AJ
281 }
282 if (unlikely(nb > 0)) {
283 env->gpr[reg] = 0;
76db3ba4
AJ
284 for (sh = 24; nb > 0; nb--, sh -= 8) {
285 env->gpr[reg] |= ldub(addr) << sh;
286 addr = addr_add(addr, 1);
dfbc799d
AJ
287 }
288 }
289}
290/* PPC32 specification says we must generate an exception if
291 * rA is in the range of registers to be loaded.
292 * In an other hand, IBM says this is valid, but rA won't be loaded.
293 * For now, I'll follow the spec...
294 */
295void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
296{
297 if (likely(xer_bc != 0)) {
298 if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
299 (reg < rb && (reg + xer_bc) > rb))) {
e06fcd75
AJ
300 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
301 POWERPC_EXCP_INVAL |
302 POWERPC_EXCP_INVAL_LSWX);
dfbc799d
AJ
303 } else {
304 helper_lsw(addr, xer_bc, reg);
305 }
306 }
307}
308
309void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
310{
311 int sh;
76db3ba4
AJ
312 for (; nb > 3; nb -= 4) {
313 stl(addr, env->gpr[reg]);
dfbc799d 314 reg = (reg + 1) % 32;
76db3ba4 315 addr = addr_add(addr, 4);
dfbc799d
AJ
316 }
317 if (unlikely(nb > 0)) {
a16b45e7 318 for (sh = 24; nb > 0; nb--, sh -= 8) {
76db3ba4 319 stb(addr, (env->gpr[reg] >> sh) & 0xFF);
a16b45e7
AJ
320 addr = addr_add(addr, 1);
321 }
dfbc799d
AJ
322 }
323}
324
799a8c8d
AJ
325static void do_dcbz(target_ulong addr, int dcache_line_size)
326{
76db3ba4 327 addr &= ~(dcache_line_size - 1);
799a8c8d 328 int i;
799a8c8d 329 for (i = 0 ; i < dcache_line_size ; i += 4) {
dcc532c8 330 stl(addr + i , 0);
799a8c8d 331 }
76db3ba4 332 if (env->reserve == addr)
799a8c8d
AJ
333 env->reserve = (target_ulong)-1ULL;
334}
335
336void helper_dcbz(target_ulong addr)
337{
338 do_dcbz(addr, env->dcache_line_size);
339}
340
341void helper_dcbz_970(target_ulong addr)
342{
343 if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
344 do_dcbz(addr, 32);
345 else
346 do_dcbz(addr, env->dcache_line_size);
347}
348
37d269df
AJ
349void helper_icbi(target_ulong addr)
350{
351 uint32_t tmp;
352
76db3ba4 353 addr &= ~(env->dcache_line_size - 1);
37d269df
AJ
354 /* Invalidate one cache line :
355 * PowerPC specification says this is to be treated like a load
356 * (not a fetch) by the MMU. To be sure it will be so,
357 * do the load "by hand".
358 */
dcc532c8 359 tmp = ldl(addr);
37d269df
AJ
360 tb_invalidate_page_range(addr, addr + env->icache_line_size);
361}
362
bdb4b689
AJ
363// XXX: to be tested
364target_ulong helper_lscbx (target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
365{
366 int i, c, d;
bdb4b689
AJ
367 d = 24;
368 for (i = 0; i < xer_bc; i++) {
76db3ba4
AJ
369 c = ldub(addr);
370 addr = addr_add(addr, 1);
bdb4b689
AJ
371 /* ra (if not 0) and rb are never modified */
372 if (likely(reg != rb && (ra == 0 || reg != ra))) {
373 env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
374 }
375 if (unlikely(c == xer_cmp))
376 break;
377 if (likely(d != 0)) {
378 d -= 8;
379 } else {
380 d = 24;
381 reg++;
382 reg = reg & 0x1F;
383 }
384 }
385 return i;
386}
387
9a64fbe4 388/*****************************************************************************/
fdabc366 389/* Fixed point operations helpers */
d9bce9d9 390#if defined(TARGET_PPC64)
d9bce9d9 391
74637406
AJ
392/* multiply high word */
393uint64_t helper_mulhd (uint64_t arg1, uint64_t arg2)
fdabc366 394{
74637406 395 uint64_t tl, th;
fdabc366 396
74637406
AJ
397 muls64(&tl, &th, arg1, arg2);
398 return th;
d9bce9d9 399}
d9bce9d9 400
74637406
AJ
401/* multiply high word unsigned */
402uint64_t helper_mulhdu (uint64_t arg1, uint64_t arg2)
fdabc366 403{
74637406 404 uint64_t tl, th;
fdabc366 405
74637406
AJ
406 mulu64(&tl, &th, arg1, arg2);
407 return th;
fdabc366
FB
408}
409
74637406 410uint64_t helper_mulldo (uint64_t arg1, uint64_t arg2)
fdabc366 411{
d9bce9d9
JM
412 int64_t th;
413 uint64_t tl;
414
74637406 415 muls64(&tl, (uint64_t *)&th, arg1, arg2);
88ad920b 416 /* If th != 0 && th != -1, then we had an overflow */
6f2d8978 417 if (likely((uint64_t)(th + 1) <= 1)) {
3d7b417e 418 env->xer &= ~(1 << XER_OV);
fdabc366 419 } else {
3d7b417e 420 env->xer |= (1 << XER_OV) | (1 << XER_SO);
fdabc366 421 }
74637406 422 return (int64_t)tl;
d9bce9d9
JM
423}
424#endif
425
26d67362 426target_ulong helper_cntlzw (target_ulong t)
603fccce 427{
26d67362 428 return clz32(t);
603fccce
JM
429}
430
431#if defined(TARGET_PPC64)
26d67362 432target_ulong helper_cntlzd (target_ulong t)
603fccce 433{
26d67362 434 return clz64(t);
603fccce
JM
435}
436#endif
437
9a64fbe4 438/* shift right arithmetic helper */
26d67362 439target_ulong helper_sraw (target_ulong value, target_ulong shift)
9a64fbe4
FB
440{
441 int32_t ret;
442
26d67362
AJ
443 if (likely(!(shift & 0x20))) {
444 if (likely((uint32_t)shift != 0)) {
445 shift &= 0x1f;
446 ret = (int32_t)value >> shift;
447 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
3d7b417e 448 env->xer &= ~(1 << XER_CA);
fdabc366 449 } else {
3d7b417e 450 env->xer |= (1 << XER_CA);
fdabc366
FB
451 }
452 } else {
26d67362 453 ret = (int32_t)value;
3d7b417e 454 env->xer &= ~(1 << XER_CA);
fdabc366
FB
455 }
456 } else {
26d67362
AJ
457 ret = (int32_t)value >> 31;
458 if (ret) {
3d7b417e 459 env->xer |= (1 << XER_CA);
26d67362
AJ
460 } else {
461 env->xer &= ~(1 << XER_CA);
76a66253 462 }
fdabc366 463 }
26d67362 464 return (target_long)ret;
9a64fbe4
FB
465}
466
d9bce9d9 467#if defined(TARGET_PPC64)
26d67362 468target_ulong helper_srad (target_ulong value, target_ulong shift)
d9bce9d9
JM
469{
470 int64_t ret;
471
26d67362
AJ
472 if (likely(!(shift & 0x40))) {
473 if (likely((uint64_t)shift != 0)) {
474 shift &= 0x3f;
475 ret = (int64_t)value >> shift;
476 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
3d7b417e 477 env->xer &= ~(1 << XER_CA);
d9bce9d9 478 } else {
3d7b417e 479 env->xer |= (1 << XER_CA);
d9bce9d9
JM
480 }
481 } else {
26d67362 482 ret = (int64_t)value;
3d7b417e 483 env->xer &= ~(1 << XER_CA);
d9bce9d9
JM
484 }
485 } else {
26d67362
AJ
486 ret = (int64_t)value >> 63;
487 if (ret) {
3d7b417e 488 env->xer |= (1 << XER_CA);
26d67362
AJ
489 } else {
490 env->xer &= ~(1 << XER_CA);
d9bce9d9
JM
491 }
492 }
26d67362 493 return ret;
d9bce9d9
JM
494}
495#endif
496
26d67362 497target_ulong helper_popcntb (target_ulong val)
d9bce9d9 498{
6176a26d
AJ
499 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
500 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
501 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
502 return val;
d9bce9d9
JM
503}
504
505#if defined(TARGET_PPC64)
26d67362 506target_ulong helper_popcntb_64 (target_ulong val)
d9bce9d9 507{
6176a26d
AJ
508 val = (val & 0x5555555555555555ULL) + ((val >> 1) & 0x5555555555555555ULL);
509 val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL);
510 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fULL);
511 return val;
d9bce9d9
JM
512}
513#endif
514
fdabc366 515/*****************************************************************************/
9a64fbe4 516/* Floating point operations helpers */
a0d7d5a7
AJ
517uint64_t helper_float32_to_float64(uint32_t arg)
518{
519 CPU_FloatU f;
520 CPU_DoubleU d;
521 f.l = arg;
522 d.d = float32_to_float64(f.f, &env->fp_status);
523 return d.ll;
524}
525
526uint32_t helper_float64_to_float32(uint64_t arg)
527{
528 CPU_FloatU f;
529 CPU_DoubleU d;
530 d.ll = arg;
531 f.f = float64_to_float32(d.d, &env->fp_status);
532 return f.l;
533}
534
0ca9d380 535static always_inline int isden (float64 d)
7c58044c 536{
0ca9d380 537 CPU_DoubleU u;
7c58044c 538
0ca9d380 539 u.d = d;
7c58044c 540
0ca9d380 541 return ((u.ll >> 52) & 0x7FF) == 0;
7c58044c
JM
542}
543
af12906f 544uint32_t helper_compute_fprf (uint64_t arg, uint32_t set_fprf)
7c58044c 545{
af12906f 546 CPU_DoubleU farg;
7c58044c 547 int isneg;
af12906f
AJ
548 int ret;
549 farg.ll = arg;
f23c346e 550 isneg = float64_is_neg(farg.d);
af12906f
AJ
551 if (unlikely(float64_is_nan(farg.d))) {
552 if (float64_is_signaling_nan(farg.d)) {
7c58044c 553 /* Signaling NaN: flags are undefined */
af12906f 554 ret = 0x00;
7c58044c
JM
555 } else {
556 /* Quiet NaN */
af12906f 557 ret = 0x11;
7c58044c 558 }
f23c346e 559 } else if (unlikely(float64_is_infinity(farg.d))) {
7c58044c
JM
560 /* +/- infinity */
561 if (isneg)
af12906f 562 ret = 0x09;
7c58044c 563 else
af12906f 564 ret = 0x05;
7c58044c 565 } else {
f23c346e 566 if (float64_is_zero(farg.d)) {
7c58044c
JM
567 /* +/- zero */
568 if (isneg)
af12906f 569 ret = 0x12;
7c58044c 570 else
af12906f 571 ret = 0x02;
7c58044c 572 } else {
af12906f 573 if (isden(farg.d)) {
7c58044c 574 /* Denormalized numbers */
af12906f 575 ret = 0x10;
7c58044c
JM
576 } else {
577 /* Normalized numbers */
af12906f 578 ret = 0x00;
7c58044c
JM
579 }
580 if (isneg) {
af12906f 581 ret |= 0x08;
7c58044c 582 } else {
af12906f 583 ret |= 0x04;
7c58044c
JM
584 }
585 }
586 }
587 if (set_fprf) {
588 /* We update FPSCR_FPRF */
589 env->fpscr &= ~(0x1F << FPSCR_FPRF);
af12906f 590 env->fpscr |= ret << FPSCR_FPRF;
7c58044c
JM
591 }
592 /* We just need fpcc to update Rc1 */
af12906f 593 return ret & 0xF;
7c58044c
JM
594}
595
596/* Floating-point invalid operations exception */
af12906f 597static always_inline uint64_t fload_invalid_op_excp (int op)
7c58044c 598{
af12906f 599 uint64_t ret = 0;
7c58044c
JM
600 int ve;
601
602 ve = fpscr_ve;
e0147e41
AJ
603 switch (op) {
604 case POWERPC_EXCP_FP_VXSNAN:
7c58044c 605 env->fpscr |= 1 << FPSCR_VXSNAN;
e0147e41
AJ
606 break;
607 case POWERPC_EXCP_FP_VXSOFT:
7c58044c 608 env->fpscr |= 1 << FPSCR_VXSOFT;
e0147e41 609 break;
7c58044c
JM
610 case POWERPC_EXCP_FP_VXISI:
611 /* Magnitude subtraction of infinities */
612 env->fpscr |= 1 << FPSCR_VXISI;
613 goto update_arith;
614 case POWERPC_EXCP_FP_VXIDI:
615 /* Division of infinity by infinity */
616 env->fpscr |= 1 << FPSCR_VXIDI;
617 goto update_arith;
618 case POWERPC_EXCP_FP_VXZDZ:
619 /* Division of zero by zero */
620 env->fpscr |= 1 << FPSCR_VXZDZ;
621 goto update_arith;
622 case POWERPC_EXCP_FP_VXIMZ:
623 /* Multiplication of zero by infinity */
624 env->fpscr |= 1 << FPSCR_VXIMZ;
625 goto update_arith;
626 case POWERPC_EXCP_FP_VXVC:
627 /* Ordered comparison of NaN */
628 env->fpscr |= 1 << FPSCR_VXVC;
629 env->fpscr &= ~(0xF << FPSCR_FPCC);
630 env->fpscr |= 0x11 << FPSCR_FPCC;
631 /* We must update the target FPR before raising the exception */
632 if (ve != 0) {
633 env->exception_index = POWERPC_EXCP_PROGRAM;
634 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
635 /* Update the floating-point enabled exception summary */
636 env->fpscr |= 1 << FPSCR_FEX;
637 /* Exception is differed */
638 ve = 0;
639 }
640 break;
641 case POWERPC_EXCP_FP_VXSQRT:
642 /* Square root of a negative number */
643 env->fpscr |= 1 << FPSCR_VXSQRT;
644 update_arith:
645 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
646 if (ve == 0) {
647 /* Set the result to quiet NaN */
e0147e41 648 ret = 0xFFF8000000000000ULL;
7c58044c
JM
649 env->fpscr &= ~(0xF << FPSCR_FPCC);
650 env->fpscr |= 0x11 << FPSCR_FPCC;
651 }
652 break;
653 case POWERPC_EXCP_FP_VXCVI:
654 /* Invalid conversion */
655 env->fpscr |= 1 << FPSCR_VXCVI;
656 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
657 if (ve == 0) {
658 /* Set the result to quiet NaN */
e0147e41 659 ret = 0xFFF8000000000000ULL;
7c58044c
JM
660 env->fpscr &= ~(0xF << FPSCR_FPCC);
661 env->fpscr |= 0x11 << FPSCR_FPCC;
662 }
663 break;
664 }
665 /* Update the floating-point invalid operation summary */
666 env->fpscr |= 1 << FPSCR_VX;
667 /* Update the floating-point exception summary */
668 env->fpscr |= 1 << FPSCR_FX;
669 if (ve != 0) {
670 /* Update the floating-point enabled exception summary */
671 env->fpscr |= 1 << FPSCR_FEX;
672 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 673 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
7c58044c 674 }
af12906f 675 return ret;
7c58044c
JM
676}
677
e33e94f9 678static always_inline void float_zero_divide_excp (void)
7c58044c 679{
7c58044c
JM
680 env->fpscr |= 1 << FPSCR_ZX;
681 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
682 /* Update the floating-point exception summary */
683 env->fpscr |= 1 << FPSCR_FX;
684 if (fpscr_ze != 0) {
685 /* Update the floating-point enabled exception summary */
686 env->fpscr |= 1 << FPSCR_FEX;
687 if (msr_fe0 != 0 || msr_fe1 != 0) {
e06fcd75
AJ
688 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
689 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
7c58044c 690 }
7c58044c
JM
691 }
692}
693
694static always_inline void float_overflow_excp (void)
695{
696 env->fpscr |= 1 << FPSCR_OX;
697 /* Update the floating-point exception summary */
698 env->fpscr |= 1 << FPSCR_FX;
699 if (fpscr_oe != 0) {
700 /* XXX: should adjust the result */
701 /* Update the floating-point enabled exception summary */
702 env->fpscr |= 1 << FPSCR_FEX;
703 /* We must update the target FPR before raising the exception */
704 env->exception_index = POWERPC_EXCP_PROGRAM;
705 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
706 } else {
707 env->fpscr |= 1 << FPSCR_XX;
708 env->fpscr |= 1 << FPSCR_FI;
709 }
710}
711
712static always_inline void float_underflow_excp (void)
713{
714 env->fpscr |= 1 << FPSCR_UX;
715 /* Update the floating-point exception summary */
716 env->fpscr |= 1 << FPSCR_FX;
717 if (fpscr_ue != 0) {
718 /* XXX: should adjust the result */
719 /* Update the floating-point enabled exception summary */
720 env->fpscr |= 1 << FPSCR_FEX;
721 /* We must update the target FPR before raising the exception */
722 env->exception_index = POWERPC_EXCP_PROGRAM;
723 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
724 }
725}
726
727static always_inline void float_inexact_excp (void)
728{
729 env->fpscr |= 1 << FPSCR_XX;
730 /* Update the floating-point exception summary */
731 env->fpscr |= 1 << FPSCR_FX;
732 if (fpscr_xe != 0) {
733 /* Update the floating-point enabled exception summary */
734 env->fpscr |= 1 << FPSCR_FEX;
735 /* We must update the target FPR before raising the exception */
736 env->exception_index = POWERPC_EXCP_PROGRAM;
737 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
738 }
739}
740
741static always_inline void fpscr_set_rounding_mode (void)
742{
743 int rnd_type;
744
745 /* Set rounding mode */
746 switch (fpscr_rn) {
747 case 0:
748 /* Best approximation (round to nearest) */
749 rnd_type = float_round_nearest_even;
750 break;
751 case 1:
752 /* Smaller magnitude (round toward zero) */
753 rnd_type = float_round_to_zero;
754 break;
755 case 2:
756 /* Round toward +infinite */
757 rnd_type = float_round_up;
758 break;
759 default:
760 case 3:
761 /* Round toward -infinite */
762 rnd_type = float_round_down;
763 break;
764 }
765 set_float_rounding_mode(rnd_type, &env->fp_status);
766}
767
6e35d524
AJ
768void helper_fpscr_clrbit (uint32_t bit)
769{
770 int prev;
771
772 prev = (env->fpscr >> bit) & 1;
773 env->fpscr &= ~(1 << bit);
774 if (prev == 1) {
775 switch (bit) {
776 case FPSCR_RN1:
777 case FPSCR_RN:
778 fpscr_set_rounding_mode();
779 break;
780 default:
781 break;
782 }
783 }
784}
785
af12906f 786void helper_fpscr_setbit (uint32_t bit)
7c58044c
JM
787{
788 int prev;
789
790 prev = (env->fpscr >> bit) & 1;
791 env->fpscr |= 1 << bit;
792 if (prev == 0) {
793 switch (bit) {
794 case FPSCR_VX:
795 env->fpscr |= 1 << FPSCR_FX;
796 if (fpscr_ve)
797 goto raise_ve;
798 case FPSCR_OX:
799 env->fpscr |= 1 << FPSCR_FX;
800 if (fpscr_oe)
801 goto raise_oe;
802 break;
803 case FPSCR_UX:
804 env->fpscr |= 1 << FPSCR_FX;
805 if (fpscr_ue)
806 goto raise_ue;
807 break;
808 case FPSCR_ZX:
809 env->fpscr |= 1 << FPSCR_FX;
810 if (fpscr_ze)
811 goto raise_ze;
812 break;
813 case FPSCR_XX:
814 env->fpscr |= 1 << FPSCR_FX;
815 if (fpscr_xe)
816 goto raise_xe;
817 break;
818 case FPSCR_VXSNAN:
819 case FPSCR_VXISI:
820 case FPSCR_VXIDI:
821 case FPSCR_VXZDZ:
822 case FPSCR_VXIMZ:
823 case FPSCR_VXVC:
824 case FPSCR_VXSOFT:
825 case FPSCR_VXSQRT:
826 case FPSCR_VXCVI:
827 env->fpscr |= 1 << FPSCR_VX;
828 env->fpscr |= 1 << FPSCR_FX;
829 if (fpscr_ve != 0)
830 goto raise_ve;
831 break;
832 case FPSCR_VE:
833 if (fpscr_vx != 0) {
834 raise_ve:
835 env->error_code = POWERPC_EXCP_FP;
836 if (fpscr_vxsnan)
837 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
838 if (fpscr_vxisi)
839 env->error_code |= POWERPC_EXCP_FP_VXISI;
840 if (fpscr_vxidi)
841 env->error_code |= POWERPC_EXCP_FP_VXIDI;
842 if (fpscr_vxzdz)
843 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
844 if (fpscr_vximz)
845 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
846 if (fpscr_vxvc)
847 env->error_code |= POWERPC_EXCP_FP_VXVC;
848 if (fpscr_vxsoft)
849 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
850 if (fpscr_vxsqrt)
851 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
852 if (fpscr_vxcvi)
853 env->error_code |= POWERPC_EXCP_FP_VXCVI;
854 goto raise_excp;
855 }
856 break;
857 case FPSCR_OE:
858 if (fpscr_ox != 0) {
859 raise_oe:
860 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
861 goto raise_excp;
862 }
863 break;
864 case FPSCR_UE:
865 if (fpscr_ux != 0) {
866 raise_ue:
867 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
868 goto raise_excp;
869 }
870 break;
871 case FPSCR_ZE:
872 if (fpscr_zx != 0) {
873 raise_ze:
874 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
875 goto raise_excp;
876 }
877 break;
878 case FPSCR_XE:
879 if (fpscr_xx != 0) {
880 raise_xe:
881 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
882 goto raise_excp;
883 }
884 break;
885 case FPSCR_RN1:
886 case FPSCR_RN:
887 fpscr_set_rounding_mode();
888 break;
889 default:
890 break;
891 raise_excp:
892 /* Update the floating-point enabled exception summary */
893 env->fpscr |= 1 << FPSCR_FEX;
894 /* We have to update Rc1 before raising the exception */
895 env->exception_index = POWERPC_EXCP_PROGRAM;
896 break;
897 }
898 }
899}
900
af12906f 901void helper_store_fpscr (uint64_t arg, uint32_t mask)
7c58044c
JM
902{
903 /*
904 * We use only the 32 LSB of the incoming fpr
905 */
7c58044c
JM
906 uint32_t prev, new;
907 int i;
908
7c58044c 909 prev = env->fpscr;
af12906f 910 new = (uint32_t)arg;
27ee5df0
AJ
911 new &= ~0x60000000;
912 new |= prev & 0x60000000;
913 for (i = 0; i < 8; i++) {
7c58044c
JM
914 if (mask & (1 << i)) {
915 env->fpscr &= ~(0xF << (4 * i));
916 env->fpscr |= new & (0xF << (4 * i));
917 }
918 }
919 /* Update VX and FEX */
920 if (fpscr_ix != 0)
921 env->fpscr |= 1 << FPSCR_VX;
5567025f
AJ
922 else
923 env->fpscr &= ~(1 << FPSCR_VX);
7c58044c
JM
924 if ((fpscr_ex & fpscr_eex) != 0) {
925 env->fpscr |= 1 << FPSCR_FEX;
926 env->exception_index = POWERPC_EXCP_PROGRAM;
927 /* XXX: we should compute it properly */
928 env->error_code = POWERPC_EXCP_FP;
929 }
5567025f
AJ
930 else
931 env->fpscr &= ~(1 << FPSCR_FEX);
7c58044c
JM
932 fpscr_set_rounding_mode();
933}
7c58044c 934
af12906f 935void helper_float_check_status (void)
7c58044c 936{
af12906f 937#ifdef CONFIG_SOFTFLOAT
7c58044c
JM
938 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
939 (env->error_code & POWERPC_EXCP_FP)) {
940 /* Differred floating-point exception after target FPR update */
941 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 942 helper_raise_exception_err(env->exception_index, env->error_code);
be94c952
AJ
943 } else {
944 int status = get_float_exception_flags(&env->fp_status);
e33e94f9
AJ
945 if (status & float_flag_divbyzero) {
946 float_zero_divide_excp();
947 } else if (status & float_flag_overflow) {
be94c952
AJ
948 float_overflow_excp();
949 } else if (status & float_flag_underflow) {
950 float_underflow_excp();
951 } else if (status & float_flag_inexact) {
952 float_inexact_excp();
953 }
7c58044c 954 }
af12906f
AJ
955#else
956 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
957 (env->error_code & POWERPC_EXCP_FP)) {
958 /* Differred floating-point exception after target FPR update */
959 if (msr_fe0 != 0 || msr_fe1 != 0)
e06fcd75 960 helper_raise_exception_err(env->exception_index, env->error_code);
af12906f 961 }
af12906f
AJ
962#endif
963}
964
965#ifdef CONFIG_SOFTFLOAT
966void helper_reset_fpstatus (void)
967{
be94c952 968 set_float_exception_flags(0, &env->fp_status);
7c58044c
JM
969}
970#endif
971
af12906f
AJ
972/* fadd - fadd. */
973uint64_t helper_fadd (uint64_t arg1, uint64_t arg2)
7c58044c 974{
af12906f
AJ
975 CPU_DoubleU farg1, farg2;
976
977 farg1.ll = arg1;
978 farg2.ll = arg2;
979#if USE_PRECISE_EMULATION
980 if (unlikely(float64_is_signaling_nan(farg1.d) ||
981 float64_is_signaling_nan(farg2.d))) {
7c58044c 982 /* sNaN addition */
af12906f 983 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
17218d1f
AJ
984 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
985 float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
7c58044c 986 /* Magnitude subtraction of infinities */
cf1cf21e 987 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
17218d1f
AJ
988 } else {
989 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
7c58044c 990 }
af12906f
AJ
991#else
992 farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
993#endif
994 return farg1.ll;
7c58044c
JM
995}
996
af12906f
AJ
997/* fsub - fsub. */
998uint64_t helper_fsub (uint64_t arg1, uint64_t arg2)
999{
1000 CPU_DoubleU farg1, farg2;
1001
1002 farg1.ll = arg1;
1003 farg2.ll = arg2;
1004#if USE_PRECISE_EMULATION
7c58044c 1005{
af12906f
AJ
1006 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1007 float64_is_signaling_nan(farg2.d))) {
7c58044c 1008 /* sNaN subtraction */
af12906f 1009 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
17218d1f
AJ
1010 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
1011 float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
7c58044c 1012 /* Magnitude subtraction of infinities */
af12906f 1013 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
17218d1f
AJ
1014 } else {
1015 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
7c58044c
JM
1016 }
1017}
af12906f
AJ
1018#else
1019 farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
1020#endif
1021 return farg1.ll;
1022}
7c58044c 1023
af12906f
AJ
1024/* fmul - fmul. */
1025uint64_t helper_fmul (uint64_t arg1, uint64_t arg2)
7c58044c 1026{
af12906f
AJ
1027 CPU_DoubleU farg1, farg2;
1028
1029 farg1.ll = arg1;
1030 farg2.ll = arg2;
1031#if USE_PRECISE_EMULATION
1032 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1033 float64_is_signaling_nan(farg2.d))) {
7c58044c 1034 /* sNaN multiplication */
af12906f 1035 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e
AJ
1036 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1037 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
7c58044c 1038 /* Multiplication of zero by infinity */
af12906f 1039 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1040 } else {
af12906f 1041 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
7c58044c 1042 }
af12906f
AJ
1043#else
1044 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1045#endif
1046 return farg1.ll;
1047}
7c58044c 1048
af12906f
AJ
1049/* fdiv - fdiv. */
1050uint64_t helper_fdiv (uint64_t arg1, uint64_t arg2)
7c58044c 1051{
af12906f
AJ
1052 CPU_DoubleU farg1, farg2;
1053
1054 farg1.ll = arg1;
1055 farg2.ll = arg2;
1056#if USE_PRECISE_EMULATION
1057 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1058 float64_is_signaling_nan(farg2.d))) {
7c58044c 1059 /* sNaN division */
af12906f 1060 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1061 } else if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d))) {
7c58044c 1062 /* Division of infinity by infinity */
af12906f 1063 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
e33e94f9
AJ
1064 } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
1065 /* Division of zero by zero */
1066 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
7c58044c 1067 } else {
af12906f 1068 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
7c58044c 1069 }
af12906f
AJ
1070#else
1071 farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
1072#endif
1073 return farg1.ll;
7c58044c 1074}
7c58044c 1075
af12906f
AJ
1076/* fabs */
1077uint64_t helper_fabs (uint64_t arg)
9a64fbe4 1078{
af12906f 1079 CPU_DoubleU farg;
9a64fbe4 1080
af12906f
AJ
1081 farg.ll = arg;
1082 farg.d = float64_abs(farg.d);
1083 return farg.ll;
1084}
1085
1086/* fnabs */
1087uint64_t helper_fnabs (uint64_t arg)
1088{
1089 CPU_DoubleU farg;
1090
1091 farg.ll = arg;
1092 farg.d = float64_abs(farg.d);
1093 farg.d = float64_chs(farg.d);
1094 return farg.ll;
1095}
1096
1097/* fneg */
1098uint64_t helper_fneg (uint64_t arg)
1099{
1100 CPU_DoubleU farg;
1101
1102 farg.ll = arg;
1103 farg.d = float64_chs(farg.d);
1104 return farg.ll;
1105}
1106
1107/* fctiw - fctiw. */
1108uint64_t helper_fctiw (uint64_t arg)
1109{
1110 CPU_DoubleU farg;
1111 farg.ll = arg;
1112
1113 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1114 /* sNaN conversion */
af12906f 1115 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1116 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1117 /* qNan / infinity conversion */
af12906f 1118 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1119 } else {
af12906f 1120 farg.ll = float64_to_int32(farg.d, &env->fp_status);
1cdb9c3d 1121#if USE_PRECISE_EMULATION
7c58044c
JM
1122 /* XXX: higher bits are not supposed to be significant.
1123 * to make tests easier, return the same as a real PowerPC 750
1124 */
af12906f 1125 farg.ll |= 0xFFF80000ULL << 32;
e864cabd 1126#endif
7c58044c 1127 }
af12906f 1128 return farg.ll;
9a64fbe4
FB
1129}
1130
af12906f
AJ
1131/* fctiwz - fctiwz. */
1132uint64_t helper_fctiwz (uint64_t arg)
9a64fbe4 1133{
af12906f
AJ
1134 CPU_DoubleU farg;
1135 farg.ll = arg;
4ecc3190 1136
af12906f 1137 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1138 /* sNaN conversion */
af12906f 1139 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1140 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1141 /* qNan / infinity conversion */
af12906f 1142 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1143 } else {
af12906f 1144 farg.ll = float64_to_int32_round_to_zero(farg.d, &env->fp_status);
1cdb9c3d 1145#if USE_PRECISE_EMULATION
7c58044c
JM
1146 /* XXX: higher bits are not supposed to be significant.
1147 * to make tests easier, return the same as a real PowerPC 750
1148 */
af12906f 1149 farg.ll |= 0xFFF80000ULL << 32;
e864cabd 1150#endif
7c58044c 1151 }
af12906f 1152 return farg.ll;
9a64fbe4
FB
1153}
1154
426613db 1155#if defined(TARGET_PPC64)
af12906f
AJ
1156/* fcfid - fcfid. */
1157uint64_t helper_fcfid (uint64_t arg)
426613db 1158{
af12906f
AJ
1159 CPU_DoubleU farg;
1160 farg.d = int64_to_float64(arg, &env->fp_status);
1161 return farg.ll;
426613db
JM
1162}
1163
af12906f
AJ
1164/* fctid - fctid. */
1165uint64_t helper_fctid (uint64_t arg)
426613db 1166{
af12906f
AJ
1167 CPU_DoubleU farg;
1168 farg.ll = arg;
426613db 1169
af12906f 1170 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1171 /* sNaN conversion */
af12906f 1172 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1173 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1174 /* qNan / infinity conversion */
af12906f 1175 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1176 } else {
af12906f 1177 farg.ll = float64_to_int64(farg.d, &env->fp_status);
7c58044c 1178 }
af12906f 1179 return farg.ll;
426613db
JM
1180}
1181
af12906f
AJ
1182/* fctidz - fctidz. */
1183uint64_t helper_fctidz (uint64_t arg)
426613db 1184{
af12906f
AJ
1185 CPU_DoubleU farg;
1186 farg.ll = arg;
426613db 1187
af12906f 1188 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1189 /* sNaN conversion */
af12906f 1190 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1191 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1192 /* qNan / infinity conversion */
af12906f 1193 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c 1194 } else {
af12906f 1195 farg.ll = float64_to_int64_round_to_zero(farg.d, &env->fp_status);
7c58044c 1196 }
af12906f 1197 return farg.ll;
426613db
JM
1198}
1199
1200#endif
1201
af12906f 1202static always_inline uint64_t do_fri (uint64_t arg, int rounding_mode)
d7e4b87e 1203{
af12906f
AJ
1204 CPU_DoubleU farg;
1205 farg.ll = arg;
1206
1207 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1208 /* sNaN round */
af12906f 1209 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
f23c346e 1210 } else if (unlikely(float64_is_nan(farg.d) || float64_is_infinity(farg.d))) {
7c58044c 1211 /* qNan / infinity round */
af12906f 1212 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
7c58044c
JM
1213 } else {
1214 set_float_rounding_mode(rounding_mode, &env->fp_status);
af12906f 1215 farg.ll = float64_round_to_int(farg.d, &env->fp_status);
7c58044c
JM
1216 /* Restore rounding mode from FPSCR */
1217 fpscr_set_rounding_mode();
1218 }
af12906f 1219 return farg.ll;
d7e4b87e
JM
1220}
1221
af12906f 1222uint64_t helper_frin (uint64_t arg)
d7e4b87e 1223{
af12906f 1224 return do_fri(arg, float_round_nearest_even);
d7e4b87e
JM
1225}
1226
af12906f 1227uint64_t helper_friz (uint64_t arg)
d7e4b87e 1228{
af12906f 1229 return do_fri(arg, float_round_to_zero);
d7e4b87e
JM
1230}
1231
af12906f 1232uint64_t helper_frip (uint64_t arg)
d7e4b87e 1233{
af12906f 1234 return do_fri(arg, float_round_up);
d7e4b87e
JM
1235}
1236
af12906f 1237uint64_t helper_frim (uint64_t arg)
d7e4b87e 1238{
af12906f 1239 return do_fri(arg, float_round_down);
d7e4b87e
JM
1240}
1241
af12906f
AJ
1242/* fmadd - fmadd. */
1243uint64_t helper_fmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
e864cabd 1244{
af12906f
AJ
1245 CPU_DoubleU farg1, farg2, farg3;
1246
1247 farg1.ll = arg1;
1248 farg2.ll = arg2;
1249 farg3.ll = arg3;
1250#if USE_PRECISE_EMULATION
1251 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1252 float64_is_signaling_nan(farg2.d) ||
1253 float64_is_signaling_nan(farg3.d))) {
7c58044c 1254 /* sNaN operation */
af12906f 1255 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1256 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1257 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1258 /* Multiplication of zero by infinity */
1259 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1260 } else {
e864cabd 1261#ifdef FLOAT128
7c58044c
JM
1262 /* This is the way the PowerPC specification defines it */
1263 float128 ft0_128, ft1_128;
1264
af12906f
AJ
1265 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1266 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1267 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1268 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1269 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1270 /* Magnitude subtraction of infinities */
1271 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1272 } else {
1273 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1274 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1275 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1276 }
e864cabd 1277#else
7c58044c 1278 /* This is OK on x86 hosts */
af12906f 1279 farg1.d = (farg1.d * farg2.d) + farg3.d;
e864cabd 1280#endif
7c58044c 1281 }
af12906f
AJ
1282#else
1283 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1284 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
1285#endif
1286 return farg1.ll;
e864cabd
JM
1287}
1288
af12906f
AJ
1289/* fmsub - fmsub. */
1290uint64_t helper_fmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
e864cabd 1291{
af12906f
AJ
1292 CPU_DoubleU farg1, farg2, farg3;
1293
1294 farg1.ll = arg1;
1295 farg2.ll = arg2;
1296 farg3.ll = arg3;
1297#if USE_PRECISE_EMULATION
1298 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1299 float64_is_signaling_nan(farg2.d) ||
1300 float64_is_signaling_nan(farg3.d))) {
7c58044c 1301 /* sNaN operation */
af12906f 1302 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1303 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1304 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1305 /* Multiplication of zero by infinity */
1306 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1307 } else {
e864cabd 1308#ifdef FLOAT128
7c58044c
JM
1309 /* This is the way the PowerPC specification defines it */
1310 float128 ft0_128, ft1_128;
1311
af12906f
AJ
1312 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1313 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1314 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1315 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1316 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1317 /* Magnitude subtraction of infinities */
1318 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1319 } else {
1320 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1321 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1322 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1323 }
e864cabd 1324#else
7c58044c 1325 /* This is OK on x86 hosts */
af12906f 1326 farg1.d = (farg1.d * farg2.d) - farg3.d;
e864cabd 1327#endif
7c58044c 1328 }
af12906f
AJ
1329#else
1330 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1331 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
1332#endif
1333 return farg1.ll;
e864cabd 1334}
e864cabd 1335
af12906f
AJ
1336/* fnmadd - fnmadd. */
1337uint64_t helper_fnmadd (uint64_t arg1, uint64_t arg2, uint64_t arg3)
4b3686fa 1338{
af12906f
AJ
1339 CPU_DoubleU farg1, farg2, farg3;
1340
1341 farg1.ll = arg1;
1342 farg2.ll = arg2;
1343 farg3.ll = arg3;
1344
1345 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1346 float64_is_signaling_nan(farg2.d) ||
1347 float64_is_signaling_nan(farg3.d))) {
7c58044c 1348 /* sNaN operation */
af12906f 1349 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1350 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1351 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1352 /* Multiplication of zero by infinity */
1353 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1354 } else {
1cdb9c3d 1355#if USE_PRECISE_EMULATION
e864cabd 1356#ifdef FLOAT128
7c58044c
JM
1357 /* This is the way the PowerPC specification defines it */
1358 float128 ft0_128, ft1_128;
1359
af12906f
AJ
1360 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1361 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1362 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1363 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1364 float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
1365 /* Magnitude subtraction of infinities */
1366 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1367 } else {
1368 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1369 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1370 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1371 }
e864cabd 1372#else
7c58044c 1373 /* This is OK on x86 hosts */
af12906f 1374 farg1.d = (farg1.d * farg2.d) + farg3.d;
e864cabd
JM
1375#endif
1376#else
af12906f
AJ
1377 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1378 farg1.d = float64_add(farg1.d, farg3.d, &env->fp_status);
e864cabd 1379#endif
a44d2ce1 1380 if (likely(!float64_is_nan(farg1.d)))
af12906f 1381 farg1.d = float64_chs(farg1.d);
7c58044c 1382 }
af12906f 1383 return farg1.ll;
4b3686fa
FB
1384}
1385
af12906f
AJ
1386/* fnmsub - fnmsub. */
1387uint64_t helper_fnmsub (uint64_t arg1, uint64_t arg2, uint64_t arg3)
4b3686fa 1388{
af12906f
AJ
1389 CPU_DoubleU farg1, farg2, farg3;
1390
1391 farg1.ll = arg1;
1392 farg2.ll = arg2;
1393 farg3.ll = arg3;
1394
1395 if (unlikely(float64_is_signaling_nan(farg1.d) ||
1396 float64_is_signaling_nan(farg2.d) ||
1397 float64_is_signaling_nan(farg3.d))) {
7c58044c 1398 /* sNaN operation */
af12906f 1399 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
da1e7ac9
AJ
1400 } else if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
1401 (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
1402 /* Multiplication of zero by infinity */
1403 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
7c58044c 1404 } else {
1cdb9c3d 1405#if USE_PRECISE_EMULATION
e864cabd 1406#ifdef FLOAT128
7c58044c
JM
1407 /* This is the way the PowerPC specification defines it */
1408 float128 ft0_128, ft1_128;
1409
af12906f
AJ
1410 ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
1411 ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
7c58044c 1412 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
da1e7ac9
AJ
1413 if (unlikely(float128_is_infinity(ft0_128) && float64_is_infinity(farg3.d) &&
1414 float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
1415 /* Magnitude subtraction of infinities */
1416 farg1.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
1417 } else {
1418 ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
1419 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1420 farg1.d = float128_to_float64(ft0_128, &env->fp_status);
1421 }
e864cabd 1422#else
7c58044c 1423 /* This is OK on x86 hosts */
af12906f 1424 farg1.d = (farg1.d * farg2.d) - farg3.d;
e864cabd
JM
1425#endif
1426#else
af12906f
AJ
1427 farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
1428 farg1.d = float64_sub(farg1.d, farg3.d, &env->fp_status);
e864cabd 1429#endif
a44d2ce1 1430 if (likely(!float64_is_nan(farg1.d)))
af12906f 1431 farg1.d = float64_chs(farg1.d);
7c58044c 1432 }
af12906f 1433 return farg1.ll;
1ef59d0a
FB
1434}
1435
af12906f
AJ
1436/* frsp - frsp. */
1437uint64_t helper_frsp (uint64_t arg)
7c58044c 1438{
af12906f 1439 CPU_DoubleU farg;
6ad193ed 1440 float32 f32;
af12906f
AJ
1441 farg.ll = arg;
1442
1443#if USE_PRECISE_EMULATION
1444 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1445 /* sNaN square root */
af12906f 1446 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
7c58044c 1447 } else {
6ad193ed
AJ
1448 f32 = float64_to_float32(farg.d, &env->fp_status);
1449 farg.d = float32_to_float64(f32, &env->fp_status);
7c58044c 1450 }
af12906f 1451#else
6ad193ed
AJ
1452 f32 = float64_to_float32(farg.d, &env->fp_status);
1453 farg.d = float32_to_float64(f32, &env->fp_status);
af12906f
AJ
1454#endif
1455 return farg.ll;
7c58044c 1456}
7c58044c 1457
af12906f
AJ
1458/* fsqrt - fsqrt. */
1459uint64_t helper_fsqrt (uint64_t arg)
9a64fbe4 1460{
af12906f
AJ
1461 CPU_DoubleU farg;
1462 farg.ll = arg;
1463
1464 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1465 /* sNaN square root */
af12906f 1466 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1467 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
7c58044c 1468 /* Square root of a negative nonzero number */
af12906f 1469 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
7c58044c 1470 } else {
af12906f 1471 farg.d = float64_sqrt(farg.d, &env->fp_status);
7c58044c 1472 }
af12906f 1473 return farg.ll;
9a64fbe4
FB
1474}
1475
af12906f
AJ
1476/* fre - fre. */
1477uint64_t helper_fre (uint64_t arg)
d7e4b87e 1478{
c609b12e 1479 CPU_DoubleU farg;
06f7332a 1480 farg.ll = arg;
d7e4b87e 1481
af12906f 1482 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1483 /* sNaN reciprocal */
af12906f 1484 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
d7e4b87e 1485 } else {
c609b12e 1486 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
d7e4b87e 1487 }
af12906f 1488 return farg.d;
d7e4b87e
JM
1489}
1490
af12906f
AJ
1491/* fres - fres. */
1492uint64_t helper_fres (uint64_t arg)
9a64fbe4 1493{
06f7332a 1494 CPU_DoubleU farg;
6c01bf6c 1495 float32 f32;
06f7332a 1496 farg.ll = arg;
4ecc3190 1497
af12906f 1498 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1499 /* sNaN reciprocal */
af12906f 1500 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
4ecc3190 1501 } else {
c609b12e 1502 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
6c01bf6c
AJ
1503 f32 = float64_to_float32(farg.d, &env->fp_status);
1504 farg.d = float32_to_float64(f32, &env->fp_status);
4ecc3190 1505 }
af12906f 1506 return farg.ll;
9a64fbe4
FB
1507}
1508
af12906f
AJ
1509/* frsqrte - frsqrte. */
1510uint64_t helper_frsqrte (uint64_t arg)
9a64fbe4 1511{
c609b12e 1512 CPU_DoubleU farg;
6c01bf6c 1513 float32 f32;
06f7332a 1514 farg.ll = arg;
4ecc3190 1515
af12906f 1516 if (unlikely(float64_is_signaling_nan(farg.d))) {
7c58044c 1517 /* sNaN reciprocal square root */
af12906f 1518 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
f23c346e 1519 } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
7c58044c 1520 /* Reciprocal square root of a negative nonzero number */
af12906f 1521 farg.ll = fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
4ecc3190 1522 } else {
6c01bf6c 1523 farg.d = float64_sqrt(farg.d, &env->fp_status);
c609b12e 1524 farg.d = float64_div(float64_one, farg.d, &env->fp_status);
6c01bf6c
AJ
1525 f32 = float64_to_float32(farg.d, &env->fp_status);
1526 farg.d = float32_to_float64(f32, &env->fp_status);
4ecc3190 1527 }
af12906f 1528 return farg.ll;
9a64fbe4
FB
1529}
1530
af12906f
AJ
1531/* fsel - fsel. */
1532uint64_t helper_fsel (uint64_t arg1, uint64_t arg2, uint64_t arg3)
9a64fbe4 1533{
6ad7365a 1534 CPU_DoubleU farg1;
af12906f
AJ
1535
1536 farg1.ll = arg1;
af12906f 1537
572c8952 1538 if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) && !float64_is_nan(farg1.d))
6ad7365a 1539 return arg2;
4ecc3190 1540 else
6ad7365a 1541 return arg3;
9a64fbe4
FB
1542}
1543
9a819377 1544void helper_fcmpu (uint64_t arg1, uint64_t arg2, uint32_t crfD)
9a64fbe4 1545{
af12906f 1546 CPU_DoubleU farg1, farg2;
e1571908 1547 uint32_t ret = 0;
af12906f
AJ
1548 farg1.ll = arg1;
1549 farg2.ll = arg2;
e1571908 1550
9a819377
AJ
1551 if (unlikely(float64_is_nan(farg1.d) ||
1552 float64_is_nan(farg2.d))) {
1553 ret = 0x01UL;
1554 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1555 ret = 0x08UL;
1556 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1557 ret = 0x04UL;
7c58044c 1558 } else {
9a819377 1559 ret = 0x02UL;
9a64fbe4 1560 }
9a819377 1561
7c58044c 1562 env->fpscr &= ~(0x0F << FPSCR_FPRF);
e1571908 1563 env->fpscr |= ret << FPSCR_FPRF;
9a819377
AJ
1564 env->crf[crfD] = ret;
1565 if (unlikely(ret == 0x01UL
1566 && (float64_is_signaling_nan(farg1.d) ||
1567 float64_is_signaling_nan(farg2.d)))) {
1568 /* sNaN comparison */
1569 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1570 }
9a64fbe4
FB
1571}
1572
9a819377 1573void helper_fcmpo (uint64_t arg1, uint64_t arg2, uint32_t crfD)
9a64fbe4 1574{
af12906f 1575 CPU_DoubleU farg1, farg2;
e1571908 1576 uint32_t ret = 0;
af12906f
AJ
1577 farg1.ll = arg1;
1578 farg2.ll = arg2;
e1571908 1579
af12906f
AJ
1580 if (unlikely(float64_is_nan(farg1.d) ||
1581 float64_is_nan(farg2.d))) {
9a819377
AJ
1582 ret = 0x01UL;
1583 } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
1584 ret = 0x08UL;
1585 } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
1586 ret = 0x04UL;
1587 } else {
1588 ret = 0x02UL;
1589 }
1590
1591 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1592 env->fpscr |= ret << FPSCR_FPRF;
1593 env->crf[crfD] = ret;
1594 if (unlikely (ret == 0x01UL)) {
af12906f
AJ
1595 if (float64_is_signaling_nan(farg1.d) ||
1596 float64_is_signaling_nan(farg2.d)) {
7c58044c
JM
1597 /* sNaN comparison */
1598 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1599 POWERPC_EXCP_FP_VXVC);
1600 } else {
1601 /* qNaN comparison */
1602 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1603 }
9a64fbe4 1604 }
9a64fbe4
FB
1605}
1606
76a66253 1607#if !defined (CONFIG_USER_ONLY)
6527f6ea 1608void helper_store_msr (target_ulong val)
0411a972 1609{
6527f6ea
AJ
1610 val = hreg_store_msr(env, val, 0);
1611 if (val != 0) {
0411a972 1612 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
e06fcd75 1613 helper_raise_exception(val);
0411a972
JM
1614 }
1615}
1616
d72a19f7 1617static always_inline void do_rfi (target_ulong nip, target_ulong msr,
0411a972 1618 target_ulong msrm, int keep_msrh)
9a64fbe4 1619{
426613db 1620#if defined(TARGET_PPC64)
0411a972
JM
1621 if (msr & (1ULL << MSR_SF)) {
1622 nip = (uint64_t)nip;
1623 msr &= (uint64_t)msrm;
a42bd6cc 1624 } else {
0411a972
JM
1625 nip = (uint32_t)nip;
1626 msr = (uint32_t)(msr & msrm);
1627 if (keep_msrh)
1628 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
a42bd6cc 1629 }
426613db 1630#else
0411a972
JM
1631 nip = (uint32_t)nip;
1632 msr &= (uint32_t)msrm;
426613db 1633#endif
0411a972
JM
1634 /* XXX: beware: this is false if VLE is supported */
1635 env->nip = nip & ~((target_ulong)0x00000003);
a4f30719 1636 hreg_store_msr(env, msr, 1);
fdabc366 1637#if defined (DEBUG_OP)
0411a972 1638 cpu_dump_rfi(env->nip, env->msr);
fdabc366 1639#endif
0411a972
JM
1640 /* No need to raise an exception here,
1641 * as rfi is always the last insn of a TB
1642 */
fdabc366 1643 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
9a64fbe4 1644}
d9bce9d9 1645
d72a19f7 1646void helper_rfi (void)
0411a972 1647{
d72a19f7 1648 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
2ada0ed7 1649 ~((target_ulong)0x0), 1);
0411a972
JM
1650}
1651
d9bce9d9 1652#if defined(TARGET_PPC64)
d72a19f7 1653void helper_rfid (void)
426613db 1654{
d72a19f7 1655 do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
2ada0ed7 1656 ~((target_ulong)0x0), 0);
d9bce9d9 1657}
7863667f 1658
d72a19f7 1659void helper_hrfid (void)
be147d08 1660{
d72a19f7 1661 do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
2ada0ed7 1662 ~((target_ulong)0x0), 0);
be147d08
JM
1663}
1664#endif
76a66253 1665#endif
9a64fbe4 1666
cab3bee2 1667void helper_tw (target_ulong arg1, target_ulong arg2, uint32_t flags)
9a64fbe4 1668{
cab3bee2
AJ
1669 if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
1670 ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
1671 ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
1672 ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
1673 ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
e06fcd75 1674 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
a42bd6cc 1675 }
9a64fbe4
FB
1676}
1677
d9bce9d9 1678#if defined(TARGET_PPC64)
cab3bee2 1679void helper_td (target_ulong arg1, target_ulong arg2, uint32_t flags)
d9bce9d9 1680{
cab3bee2
AJ
1681 if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
1682 ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
1683 ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
1684 ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
1685 ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01)))))
e06fcd75 1686 helper_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
d9bce9d9
JM
1687}
1688#endif
1689
fdabc366 1690/*****************************************************************************/
76a66253 1691/* PowerPC 601 specific instructions (POWER bridge) */
9a64fbe4 1692
22e0e173 1693target_ulong helper_clcs (uint32_t arg)
9a64fbe4 1694{
22e0e173 1695 switch (arg) {
76a66253
JM
1696 case 0x0CUL:
1697 /* Instruction cache line size */
22e0e173 1698 return env->icache_line_size;
76a66253
JM
1699 break;
1700 case 0x0DUL:
1701 /* Data cache line size */
22e0e173 1702 return env->dcache_line_size;
76a66253
JM
1703 break;
1704 case 0x0EUL:
1705 /* Minimum cache line size */
22e0e173
AJ
1706 return (env->icache_line_size < env->dcache_line_size) ?
1707 env->icache_line_size : env->dcache_line_size;
76a66253
JM
1708 break;
1709 case 0x0FUL:
1710 /* Maximum cache line size */
22e0e173
AJ
1711 return (env->icache_line_size > env->dcache_line_size) ?
1712 env->icache_line_size : env->dcache_line_size;
76a66253
JM
1713 break;
1714 default:
1715 /* Undefined */
22e0e173 1716 return 0;
76a66253
JM
1717 break;
1718 }
1719}
1720
22e0e173 1721target_ulong helper_div (target_ulong arg1, target_ulong arg2)
76a66253 1722{
22e0e173 1723 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
76a66253 1724
22e0e173
AJ
1725 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1726 (int32_t)arg2 == 0) {
76a66253 1727 env->spr[SPR_MQ] = 0;
22e0e173 1728 return INT32_MIN;
76a66253 1729 } else {
22e0e173
AJ
1730 env->spr[SPR_MQ] = tmp % arg2;
1731 return tmp / (int32_t)arg2;
76a66253
JM
1732 }
1733}
1734
22e0e173 1735target_ulong helper_divo (target_ulong arg1, target_ulong arg2)
76a66253 1736{
22e0e173 1737 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
76a66253 1738
22e0e173
AJ
1739 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1740 (int32_t)arg2 == 0) {
3d7b417e 1741 env->xer |= (1 << XER_OV) | (1 << XER_SO);
76a66253 1742 env->spr[SPR_MQ] = 0;
22e0e173 1743 return INT32_MIN;
76a66253 1744 } else {
22e0e173
AJ
1745 env->spr[SPR_MQ] = tmp % arg2;
1746 tmp /= (int32_t)arg2;
1747 if ((int32_t)tmp != tmp) {
3d7b417e 1748 env->xer |= (1 << XER_OV) | (1 << XER_SO);
76a66253 1749 } else {
3d7b417e 1750 env->xer &= ~(1 << XER_OV);
76a66253 1751 }
22e0e173 1752 return tmp;
76a66253
JM
1753 }
1754}
1755
22e0e173 1756target_ulong helper_divs (target_ulong arg1, target_ulong arg2)
76a66253 1757{
22e0e173
AJ
1758 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1759 (int32_t)arg2 == 0) {
1760 env->spr[SPR_MQ] = 0;
1761 return INT32_MIN;
76a66253 1762 } else {
22e0e173
AJ
1763 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1764 return (int32_t)arg1 / (int32_t)arg2;
76a66253 1765 }
76a66253
JM
1766}
1767
22e0e173 1768target_ulong helper_divso (target_ulong arg1, target_ulong arg2)
76a66253 1769{
22e0e173
AJ
1770 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
1771 (int32_t)arg2 == 0) {
3d7b417e 1772 env->xer |= (1 << XER_OV) | (1 << XER_SO);
22e0e173
AJ
1773 env->spr[SPR_MQ] = 0;
1774 return INT32_MIN;
76a66253 1775 } else {
3d7b417e 1776 env->xer &= ~(1 << XER_OV);
22e0e173
AJ
1777 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
1778 return (int32_t)arg1 / (int32_t)arg2;
76a66253
JM
1779 }
1780}
1781
1782#if !defined (CONFIG_USER_ONLY)
22e0e173 1783target_ulong helper_rac (target_ulong addr)
76a66253 1784{
76a66253 1785 mmu_ctx_t ctx;
faadf50e 1786 int nb_BATs;
22e0e173 1787 target_ulong ret = 0;
76a66253
JM
1788
1789 /* We don't have to generate many instances of this instruction,
1790 * as rac is supervisor only.
1791 */
faadf50e
JM
1792 /* XXX: FIX THIS: Pretend we have no BAT */
1793 nb_BATs = env->nb_BATs;
1794 env->nb_BATs = 0;
22e0e173
AJ
1795 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0)
1796 ret = ctx.raddr;
faadf50e 1797 env->nb_BATs = nb_BATs;
22e0e173 1798 return ret;
76a66253
JM
1799}
1800
d72a19f7 1801void helper_rfsvc (void)
76a66253 1802{
d72a19f7 1803 do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
76a66253 1804}
76a66253
JM
1805#endif
1806
1807/*****************************************************************************/
1808/* 602 specific instructions */
1809/* mfrom is the most crazy instruction ever seen, imho ! */
1810/* Real implementation uses a ROM table. Do the same */
5e9ae189
AJ
1811/* Extremly decomposed:
1812 * -arg / 256
1813 * return 256 * log10(10 + 1.0) + 0.5
1814 */
db9a16a7 1815#if !defined (CONFIG_USER_ONLY)
cf02a65c 1816target_ulong helper_602_mfrom (target_ulong arg)
76a66253 1817{
cf02a65c 1818 if (likely(arg < 602)) {
76a66253 1819#include "mfrom_table.c"
45d827d2 1820 return mfrom_ROM_table[arg];
76a66253 1821 } else {
cf02a65c 1822 return 0;
76a66253
JM
1823 }
1824}
db9a16a7 1825#endif
76a66253
JM
1826
1827/*****************************************************************************/
1828/* Embedded PowerPC specific helpers */
76a66253 1829
a750fc0b 1830/* XXX: to be improved to check access rights when in user-mode */
06dca6a7 1831target_ulong helper_load_dcr (target_ulong dcrn)
a750fc0b 1832{
06dca6a7 1833 target_ulong val = 0;
a750fc0b
JM
1834
1835 if (unlikely(env->dcr_env == NULL)) {
93fcfe39 1836 qemu_log("No DCR environment\n");
e06fcd75
AJ
1837 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1838 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
06dca6a7 1839 } else if (unlikely(ppc_dcr_read(env->dcr_env, dcrn, &val) != 0)) {
93fcfe39 1840 qemu_log("DCR read error %d %03x\n", (int)dcrn, (int)dcrn);
e06fcd75
AJ
1841 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1842 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
a750fc0b 1843 }
06dca6a7 1844 return val;
a750fc0b
JM
1845}
1846
06dca6a7 1847void helper_store_dcr (target_ulong dcrn, target_ulong val)
a750fc0b
JM
1848{
1849 if (unlikely(env->dcr_env == NULL)) {
93fcfe39 1850 qemu_log("No DCR environment\n");
e06fcd75
AJ
1851 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1852 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
06dca6a7 1853 } else if (unlikely(ppc_dcr_write(env->dcr_env, dcrn, val) != 0)) {
93fcfe39 1854 qemu_log("DCR write error %d %03x\n", (int)dcrn, (int)dcrn);
e06fcd75
AJ
1855 helper_raise_exception_err(POWERPC_EXCP_PROGRAM,
1856 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
a750fc0b
JM
1857 }
1858}
1859
76a66253 1860#if !defined(CONFIG_USER_ONLY)
d72a19f7 1861void helper_40x_rfci (void)
76a66253 1862{
d72a19f7
AJ
1863 do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1864 ~((target_ulong)0xFFFF0000), 0);
a42bd6cc
JM
1865}
1866
d72a19f7 1867void helper_rfci (void)
a42bd6cc 1868{
d72a19f7
AJ
1869 do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1870 ~((target_ulong)0x3FFF0000), 0);
a42bd6cc
JM
1871}
1872
d72a19f7 1873void helper_rfdi (void)
a42bd6cc 1874{
d72a19f7
AJ
1875 do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1876 ~((target_ulong)0x3FFF0000), 0);
a42bd6cc
JM
1877}
1878
d72a19f7 1879void helper_rfmci (void)
a42bd6cc 1880{
d72a19f7
AJ
1881 do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1882 ~((target_ulong)0x3FFF0000), 0);
76a66253 1883}
76a66253
JM
1884#endif
1885
1886/* 440 specific */
ef0d51af 1887target_ulong helper_dlmzb (target_ulong high, target_ulong low, uint32_t update_Rc)
76a66253
JM
1888{
1889 target_ulong mask;
1890 int i;
1891
1892 i = 1;
1893 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
ef0d51af
AJ
1894 if ((high & mask) == 0) {
1895 if (update_Rc) {
1896 env->crf[0] = 0x4;
1897 }
76a66253 1898 goto done;
ef0d51af 1899 }
76a66253
JM
1900 i++;
1901 }
1902 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
ef0d51af
AJ
1903 if ((low & mask) == 0) {
1904 if (update_Rc) {
1905 env->crf[0] = 0x8;
1906 }
1907 goto done;
1908 }
76a66253
JM
1909 i++;
1910 }
ef0d51af
AJ
1911 if (update_Rc) {
1912 env->crf[0] = 0x2;
1913 }
76a66253 1914 done:
ef0d51af
AJ
1915 env->xer = (env->xer & ~0x7F) | i;
1916 if (update_Rc) {
1917 env->crf[0] |= xer_so;
1918 }
1919 return i;
fdabc366
FB
1920}
1921
d6a46fe8
AJ
1922/*****************************************************************************/
1923/* Altivec extension helpers */
1924#if defined(WORDS_BIGENDIAN)
1925#define HI_IDX 0
1926#define LO_IDX 1
1927#else
1928#define HI_IDX 1
1929#define LO_IDX 0
1930#endif
1931
1932#if defined(WORDS_BIGENDIAN)
1933#define VECTOR_FOR_INORDER_I(index, element) \
1934 for (index = 0; index < ARRAY_SIZE(r->element); index++)
1935#else
1936#define VECTOR_FOR_INORDER_I(index, element) \
1937 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
1938#endif
1939
34ba2857
AJ
1940/* If X is a NaN, store the corresponding QNaN into RESULT. Otherwise,
1941 * execute the following block. */
1942#define DO_HANDLE_NAN(result, x) \
1943 if (float32_is_nan(x) || float32_is_signaling_nan(x)) { \
1944 CPU_FloatU __f; \
1945 __f.f = x; \
1946 __f.l = __f.l | (1 << 22); /* Set QNaN bit. */ \
1947 result = __f.f; \
1948 } else
1949
1950#define HANDLE_NAN1(result, x) \
1951 DO_HANDLE_NAN(result, x)
1952#define HANDLE_NAN2(result, x, y) \
1953 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y)
1954#define HANDLE_NAN3(result, x, y, z) \
1955 DO_HANDLE_NAN(result, x) DO_HANDLE_NAN(result, y) DO_HANDLE_NAN(result, z)
1956
00d3b8f5
AJ
1957/* Saturating arithmetic helpers. */
1958#define SATCVT(from, to, from_type, to_type, min, max, use_min, use_max) \
1959 static always_inline to_type cvt##from##to (from_type x, int *sat) \
1960 { \
1961 to_type r; \
1962 if (use_min && x < min) { \
1963 r = min; \
1964 *sat = 1; \
1965 } else if (use_max && x > max) { \
1966 r = max; \
1967 *sat = 1; \
1968 } else { \
1969 r = x; \
1970 } \
1971 return r; \
1972 }
1973SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX, 1, 1)
1974SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX, 1, 1)
1975SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX, 1, 1)
c5b76b38
BS
1976
1977/* Work around gcc problems with the macro version */
1978static always_inline uint8_t cvtuhub(uint16_t x, int *sat)
1979{
1980 uint8_t r;
1981
1982 if (x > UINT8_MAX) {
1983 r = UINT8_MAX;
1984 *sat = 1;
1985 } else {
1986 r = x;
1987 }
1988 return r;
1989}
1990//SATCVT(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX, 0, 1)
00d3b8f5
AJ
1991SATCVT(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX, 0, 1)
1992SATCVT(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX, 0, 1)
1993SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX, 1, 1)
1994SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX, 1, 1)
1995SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX, 1, 1)
1996#undef SATCVT
1997
cbfb6ae9
AJ
1998#define LVE(name, access, swap, element) \
1999 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2000 { \
2001 size_t n_elems = ARRAY_SIZE(r->element); \
2002 int adjust = HI_IDX*(n_elems-1); \
2003 int sh = sizeof(r->element[0]) >> 1; \
2004 int index = (addr & 0xf) >> sh; \
2005 if(msr_le) { \
2006 r->element[LO_IDX ? index : (adjust - index)] = swap(access(addr)); \
2007 } else { \
2008 r->element[LO_IDX ? index : (adjust - index)] = access(addr); \
2009 } \
2010 }
2011#define I(x) (x)
2012LVE(lvebx, ldub, I, u8)
2013LVE(lvehx, lduw, bswap16, u16)
2014LVE(lvewx, ldl, bswap32, u32)
2015#undef I
2016#undef LVE
2017
bf8d8ded
AJ
2018void helper_lvsl (ppc_avr_t *r, target_ulong sh)
2019{
2020 int i, j = (sh & 0xf);
2021
2022 VECTOR_FOR_INORDER_I (i, u8) {
2023 r->u8[i] = j++;
2024 }
2025}
2026
2027void helper_lvsr (ppc_avr_t *r, target_ulong sh)
2028{
2029 int i, j = 0x10 - (sh & 0xf);
2030
2031 VECTOR_FOR_INORDER_I (i, u8) {
2032 r->u8[i] = j++;
2033 }
2034}
2035
cbfb6ae9
AJ
2036#define STVE(name, access, swap, element) \
2037 void helper_##name (ppc_avr_t *r, target_ulong addr) \
2038 { \
2039 size_t n_elems = ARRAY_SIZE(r->element); \
2040 int adjust = HI_IDX*(n_elems-1); \
2041 int sh = sizeof(r->element[0]) >> 1; \
2042 int index = (addr & 0xf) >> sh; \
2043 if(msr_le) { \
2044 access(addr, swap(r->element[LO_IDX ? index : (adjust - index)])); \
2045 } else { \
2046 access(addr, r->element[LO_IDX ? index : (adjust - index)]); \
2047 } \
2048 }
2049#define I(x) (x)
2050STVE(stvebx, stb, I, u8)
2051STVE(stvehx, stw, bswap16, u16)
2052STVE(stvewx, stl, bswap32, u32)
2053#undef I
2054#undef LVE
2055
6e87b7c7
AJ
2056void helper_mtvscr (ppc_avr_t *r)
2057{
2058#if defined(WORDS_BIGENDIAN)
2059 env->vscr = r->u32[3];
2060#else
2061 env->vscr = r->u32[0];
2062#endif
2063 set_flush_to_zero(vscr_nj, &env->vec_status);
2064}
2065
e343da72
AJ
2066void helper_vaddcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2067{
2068 int i;
2069 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2070 r->u32[i] = ~a->u32[i] < b->u32[i];
2071 }
2072}
2073
7872c51c
AJ
2074#define VARITH_DO(name, op, element) \
2075void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2076{ \
2077 int i; \
2078 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2079 r->element[i] = a->element[i] op b->element[i]; \
2080 } \
2081}
2082#define VARITH(suffix, element) \
2083 VARITH_DO(add##suffix, +, element) \
2084 VARITH_DO(sub##suffix, -, element)
2085VARITH(ubm, u8)
2086VARITH(uhm, u16)
2087VARITH(uwm, u32)
2088#undef VARITH_DO
2089#undef VARITH
2090
56fdd213
AJ
2091#define VARITHFP(suffix, func) \
2092 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2093 { \
2094 int i; \
2095 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2096 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2097 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
2098 } \
2099 } \
2100 }
2101VARITHFP(addfp, float32_add)
2102VARITHFP(subfp, float32_sub)
2103#undef VARITHFP
2104
5ab09f33
AJ
2105#define VARITHSAT_CASE(type, op, cvt, element) \
2106 { \
2107 type result = (type)a->element[i] op (type)b->element[i]; \
2108 r->element[i] = cvt(result, &sat); \
2109 }
2110
2111#define VARITHSAT_DO(name, op, optype, cvt, element) \
2112 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2113 { \
2114 int sat = 0; \
2115 int i; \
2116 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2117 switch (sizeof(r->element[0])) { \
2118 case 1: VARITHSAT_CASE(optype, op, cvt, element); break; \
2119 case 2: VARITHSAT_CASE(optype, op, cvt, element); break; \
2120 case 4: VARITHSAT_CASE(optype, op, cvt, element); break; \
2121 } \
2122 } \
2123 if (sat) { \
2124 env->vscr |= (1 << VSCR_SAT); \
2125 } \
2126 }
2127#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
2128 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
2129 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
2130#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
2131 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
2132 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
2133VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
2134VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
2135VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
2136VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
2137VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
2138VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
2139#undef VARITHSAT_CASE
2140#undef VARITHSAT_DO
2141#undef VARITHSAT_SIGNED
2142#undef VARITHSAT_UNSIGNED
2143
fab3cbe9
AJ
2144#define VAVG_DO(name, element, etype) \
2145 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2146 { \
2147 int i; \
2148 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2149 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
2150 r->element[i] = x >> 1; \
2151 } \
2152 }
2153
2154#define VAVG(type, signed_element, signed_type, unsigned_element, unsigned_type) \
2155 VAVG_DO(avgs##type, signed_element, signed_type) \
2156 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
2157VAVG(b, s8, int16_t, u8, uint16_t)
2158VAVG(h, s16, int32_t, u16, uint32_t)
2159VAVG(w, s32, int64_t, u32, uint64_t)
2160#undef VAVG_DO
2161#undef VAVG
2162
e140632e
AJ
2163#define VCF(suffix, cvt, element) \
2164 void helper_vcf##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2165 { \
2166 int i; \
2167 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2168 float32 t = cvt(b->element[i], &env->vec_status); \
2169 r->f[i] = float32_scalbn (t, -uim, &env->vec_status); \
2170 } \
2171 }
2172VCF(ux, uint32_to_float32, u32)
2173VCF(sx, int32_to_float32, s32)
2174#undef VCF
2175
1add6e23
AJ
2176#define VCMP_DO(suffix, compare, element, record) \
2177 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2178 { \
2179 uint32_t ones = (uint32_t)-1; \
2180 uint32_t all = ones; \
2181 uint32_t none = 0; \
2182 int i; \
2183 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2184 uint32_t result = (a->element[i] compare b->element[i] ? ones : 0x0); \
2185 switch (sizeof (a->element[0])) { \
2186 case 4: r->u32[i] = result; break; \
2187 case 2: r->u16[i] = result; break; \
2188 case 1: r->u8[i] = result; break; \
2189 } \
2190 all &= result; \
2191 none |= result; \
2192 } \
2193 if (record) { \
2194 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2195 } \
2196 }
2197#define VCMP(suffix, compare, element) \
2198 VCMP_DO(suffix, compare, element, 0) \
2199 VCMP_DO(suffix##_dot, compare, element, 1)
2200VCMP(equb, ==, u8)
2201VCMP(equh, ==, u16)
2202VCMP(equw, ==, u32)
2203VCMP(gtub, >, u8)
2204VCMP(gtuh, >, u16)
2205VCMP(gtuw, >, u32)
2206VCMP(gtsb, >, s8)
2207VCMP(gtsh, >, s16)
2208VCMP(gtsw, >, s32)
2209#undef VCMP_DO
2210#undef VCMP
2211
819ca121
AJ
2212#define VCMPFP_DO(suffix, compare, order, record) \
2213 void helper_vcmp##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2214 { \
2215 uint32_t ones = (uint32_t)-1; \
2216 uint32_t all = ones; \
2217 uint32_t none = 0; \
2218 int i; \
2219 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2220 uint32_t result; \
2221 int rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status); \
2222 if (rel == float_relation_unordered) { \
2223 result = 0; \
2224 } else if (rel compare order) { \
2225 result = ones; \
2226 } else { \
2227 result = 0; \
2228 } \
2229 r->u32[i] = result; \
2230 all &= result; \
2231 none |= result; \
2232 } \
2233 if (record) { \
2234 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
2235 } \
2236 }
2237#define VCMPFP(suffix, compare, order) \
2238 VCMPFP_DO(suffix, compare, order, 0) \
2239 VCMPFP_DO(suffix##_dot, compare, order, 1)
2240VCMPFP(eqfp, ==, float_relation_equal)
2241VCMPFP(gefp, !=, float_relation_less)
2242VCMPFP(gtfp, ==, float_relation_greater)
2243#undef VCMPFP_DO
2244#undef VCMPFP
2245
2246static always_inline void vcmpbfp_internal (ppc_avr_t *r, ppc_avr_t *a,
2247 ppc_avr_t *b, int record)
2248{
2249 int i;
2250 int all_in = 0;
2251 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2252 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
2253 if (le_rel == float_relation_unordered) {
2254 r->u32[i] = 0xc0000000;
2255 /* ALL_IN does not need to be updated here. */
2256 } else {
2257 float32 bneg = float32_chs(b->f[i]);
2258 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
2259 int le = le_rel != float_relation_greater;
2260 int ge = ge_rel != float_relation_less;
2261 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
2262 all_in |= (!le | !ge);
2263 }
2264 }
2265 if (record) {
2266 env->crf[6] = (all_in == 0) << 1;
2267 }
2268}
2269
2270void helper_vcmpbfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2271{
2272 vcmpbfp_internal(r, a, b, 0);
2273}
2274
2275void helper_vcmpbfp_dot (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2276{
2277 vcmpbfp_internal(r, a, b, 1);
2278}
2279
875b31db
AJ
2280#define VCT(suffix, satcvt, element) \
2281 void helper_vct##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t uim) \
2282 { \
2283 int i; \
2284 int sat = 0; \
2285 float_status s = env->vec_status; \
2286 set_float_rounding_mode(float_round_to_zero, &s); \
2287 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2288 if (float32_is_nan(b->f[i]) || \
2289 float32_is_signaling_nan(b->f[i])) { \
2290 r->element[i] = 0; \
2291 } else { \
2292 float64 t = float32_to_float64(b->f[i], &s); \
2293 int64_t j; \
2294 t = float64_scalbn(t, uim, &s); \
2295 j = float64_to_int64(t, &s); \
2296 r->element[i] = satcvt(j, &sat); \
2297 } \
2298 } \
2299 if (sat) { \
2300 env->vscr |= (1 << VSCR_SAT); \
2301 } \
2302 }
2303VCT(uxs, cvtsduw, u32)
2304VCT(sxs, cvtsdsw, s32)
2305#undef VCT
2306
35cf7c7e
AJ
2307void helper_vmaddfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2308{
2309 int i;
2310 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2311 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2312 /* Need to do the computation in higher precision and round
2313 * once at the end. */
2314 float64 af, bf, cf, t;
2315 af = float32_to_float64(a->f[i], &env->vec_status);
2316 bf = float32_to_float64(b->f[i], &env->vec_status);
2317 cf = float32_to_float64(c->f[i], &env->vec_status);
2318 t = float64_mul(af, cf, &env->vec_status);
2319 t = float64_add(t, bf, &env->vec_status);
2320 r->f[i] = float64_to_float32(t, &env->vec_status);
2321 }
2322 }
2323}
2324
b161ae27
AJ
2325void helper_vmhaddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2326{
2327 int sat = 0;
2328 int i;
2329
2330 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2331 int32_t prod = a->s16[i] * b->s16[i];
2332 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2333 r->s16[i] = cvtswsh (t, &sat);
2334 }
2335
2336 if (sat) {
2337 env->vscr |= (1 << VSCR_SAT);
2338 }
2339}
2340
2341void helper_vmhraddshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2342{
2343 int sat = 0;
2344 int i;
2345
2346 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2347 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
2348 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
2349 r->s16[i] = cvtswsh (t, &sat);
2350 }
2351
2352 if (sat) {
2353 env->vscr |= (1 << VSCR_SAT);
2354 }
2355}
2356
e4039339
AJ
2357#define VMINMAX_DO(name, compare, element) \
2358 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2359 { \
2360 int i; \
2361 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2362 if (a->element[i] compare b->element[i]) { \
2363 r->element[i] = b->element[i]; \
2364 } else { \
2365 r->element[i] = a->element[i]; \
2366 } \
2367 } \
2368 }
2369#define VMINMAX(suffix, element) \
2370 VMINMAX_DO(min##suffix, >, element) \
2371 VMINMAX_DO(max##suffix, <, element)
2372VMINMAX(sb, s8)
2373VMINMAX(sh, s16)
2374VMINMAX(sw, s32)
2375VMINMAX(ub, u8)
2376VMINMAX(uh, u16)
2377VMINMAX(uw, u32)
2378#undef VMINMAX_DO
2379#undef VMINMAX
2380
1536ff64
AJ
2381#define VMINMAXFP(suffix, rT, rF) \
2382 void helper_v##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2383 { \
2384 int i; \
2385 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2386 HANDLE_NAN2(r->f[i], a->f[i], b->f[i]) { \
2387 if (float32_lt_quiet(a->f[i], b->f[i], &env->vec_status)) { \
2388 r->f[i] = rT->f[i]; \
2389 } else { \
2390 r->f[i] = rF->f[i]; \
2391 } \
2392 } \
2393 } \
2394 }
2395VMINMAXFP(minfp, a, b)
2396VMINMAXFP(maxfp, b, a)
2397#undef VMINMAXFP
2398
bcd2ee23
AJ
2399void helper_vmladduhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2400{
2401 int i;
2402 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2403 int32_t prod = a->s16[i] * b->s16[i];
2404 r->s16[i] = (int16_t) (prod + c->s16[i]);
2405 }
2406}
2407
3b430048
AJ
2408#define VMRG_DO(name, element, highp) \
2409 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2410 { \
2411 ppc_avr_t result; \
2412 int i; \
2413 size_t n_elems = ARRAY_SIZE(r->element); \
2414 for (i = 0; i < n_elems/2; i++) { \
2415 if (highp) { \
2416 result.element[i*2+HI_IDX] = a->element[i]; \
2417 result.element[i*2+LO_IDX] = b->element[i]; \
2418 } else { \
2419 result.element[n_elems - i*2 - (1+HI_IDX)] = b->element[n_elems - i - 1]; \
2420 result.element[n_elems - i*2 - (1+LO_IDX)] = a->element[n_elems - i - 1]; \
2421 } \
2422 } \
2423 *r = result; \
2424 }
2425#if defined(WORDS_BIGENDIAN)
2426#define MRGHI 0
b392e756 2427#define MRGLO 1
3b430048
AJ
2428#else
2429#define MRGHI 1
2430#define MRGLO 0
2431#endif
2432#define VMRG(suffix, element) \
2433 VMRG_DO(mrgl##suffix, element, MRGHI) \
2434 VMRG_DO(mrgh##suffix, element, MRGLO)
2435VMRG(b, u8)
2436VMRG(h, u16)
2437VMRG(w, u32)
2438#undef VMRG_DO
2439#undef VMRG
2440#undef MRGHI
2441#undef MRGLO
2442
b04ae981
AJ
2443void helper_vmsummbm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2444{
2445 int32_t prod[16];
2446 int i;
2447
2448 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
2449 prod[i] = (int32_t)a->s8[i] * b->u8[i];
2450 }
2451
2452 VECTOR_FOR_INORDER_I(i, s32) {
2453 r->s32[i] = c->s32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2454 }
2455}
2456
eae07261
AJ
2457void helper_vmsumshm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2458{
2459 int32_t prod[8];
2460 int i;
2461
2462 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2463 prod[i] = a->s16[i] * b->s16[i];
2464 }
2465
2466 VECTOR_FOR_INORDER_I(i, s32) {
2467 r->s32[i] = c->s32[i] + prod[2*i] + prod[2*i+1];
2468 }
2469}
2470
2471void helper_vmsumshs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2472{
2473 int32_t prod[8];
2474 int i;
2475 int sat = 0;
2476
2477 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
2478 prod[i] = (int32_t)a->s16[i] * b->s16[i];
2479 }
2480
2481 VECTOR_FOR_INORDER_I (i, s32) {
2482 int64_t t = (int64_t)c->s32[i] + prod[2*i] + prod[2*i+1];
2483 r->u32[i] = cvtsdsw(t, &sat);
2484 }
2485
2486 if (sat) {
2487 env->vscr |= (1 << VSCR_SAT);
2488 }
2489}
2490
b04ae981
AJ
2491void helper_vmsumubm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2492{
2493 uint16_t prod[16];
2494 int i;
2495
2496 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2497 prod[i] = a->u8[i] * b->u8[i];
2498 }
2499
2500 VECTOR_FOR_INORDER_I(i, u32) {
2501 r->u32[i] = c->u32[i] + prod[4*i] + prod[4*i+1] + prod[4*i+2] + prod[4*i+3];
2502 }
2503}
2504
4d9903b6
AJ
2505void helper_vmsumuhm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2506{
2507 uint32_t prod[8];
2508 int i;
2509
2510 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2511 prod[i] = a->u16[i] * b->u16[i];
2512 }
2513
2514 VECTOR_FOR_INORDER_I(i, u32) {
2515 r->u32[i] = c->u32[i] + prod[2*i] + prod[2*i+1];
2516 }
2517}
2518
2519void helper_vmsumuhs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2520{
2521 uint32_t prod[8];
2522 int i;
2523 int sat = 0;
2524
2525 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
2526 prod[i] = a->u16[i] * b->u16[i];
2527 }
2528
2529 VECTOR_FOR_INORDER_I (i, s32) {
2530 uint64_t t = (uint64_t)c->u32[i] + prod[2*i] + prod[2*i+1];
2531 r->u32[i] = cvtuduw(t, &sat);
2532 }
2533
2534 if (sat) {
2535 env->vscr |= (1 << VSCR_SAT);
2536 }
2537}
2538
2c277908
AJ
2539#define VMUL_DO(name, mul_element, prod_element, evenp) \
2540 void helper_v##name (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2541 { \
2542 int i; \
2543 VECTOR_FOR_INORDER_I(i, prod_element) { \
2544 if (evenp) { \
2545 r->prod_element[i] = a->mul_element[i*2+HI_IDX] * b->mul_element[i*2+HI_IDX]; \
2546 } else { \
2547 r->prod_element[i] = a->mul_element[i*2+LO_IDX] * b->mul_element[i*2+LO_IDX]; \
2548 } \
2549 } \
2550 }
2551#define VMUL(suffix, mul_element, prod_element) \
2552 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
2553 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
2554VMUL(sb, s8, s16)
2555VMUL(sh, s16, s32)
2556VMUL(ub, u8, u16)
2557VMUL(uh, u16, u32)
2558#undef VMUL_DO
2559#undef VMUL
2560
35cf7c7e
AJ
2561void helper_vnmsubfp (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2562{
2563 int i;
2564 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2565 HANDLE_NAN3(r->f[i], a->f[i], b->f[i], c->f[i]) {
2566 /* Need to do the computation is higher precision and round
2567 * once at the end. */
2568 float64 af, bf, cf, t;
2569 af = float32_to_float64(a->f[i], &env->vec_status);
2570 bf = float32_to_float64(b->f[i], &env->vec_status);
2571 cf = float32_to_float64(c->f[i], &env->vec_status);
2572 t = float64_mul(af, cf, &env->vec_status);
2573 t = float64_sub(t, bf, &env->vec_status);
2574 t = float64_chs(t);
2575 r->f[i] = float64_to_float32(t, &env->vec_status);
2576 }
2577 }
2578}
2579
d1258698
AJ
2580void helper_vperm (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2581{
2582 ppc_avr_t result;
2583 int i;
2584 VECTOR_FOR_INORDER_I (i, u8) {
2585 int s = c->u8[i] & 0x1f;
2586#if defined(WORDS_BIGENDIAN)
2587 int index = s & 0xf;
2588#else
2589 int index = 15 - (s & 0xf);
2590#endif
2591 if (s & 0x10) {
2592 result.u8[i] = b->u8[index];
2593 } else {
2594 result.u8[i] = a->u8[index];
2595 }
2596 }
2597 *r = result;
2598}
2599
5335a145
AJ
2600#if defined(WORDS_BIGENDIAN)
2601#define PKBIG 1
2602#else
2603#define PKBIG 0
2604#endif
1dd9ffb9
AJ
2605void helper_vpkpx (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2606{
2607 int i, j;
2608 ppc_avr_t result;
2609#if defined(WORDS_BIGENDIAN)
2610 const ppc_avr_t *x[2] = { a, b };
2611#else
2612 const ppc_avr_t *x[2] = { b, a };
2613#endif
2614
2615 VECTOR_FOR_INORDER_I (i, u64) {
2616 VECTOR_FOR_INORDER_I (j, u32){
2617 uint32_t e = x[i]->u32[j];
2618 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
2619 ((e >> 6) & 0x3e0) |
2620 ((e >> 3) & 0x1f));
2621 }
2622 }
2623 *r = result;
2624}
2625
5335a145
AJ
2626#define VPK(suffix, from, to, cvt, dosat) \
2627 void helper_vpk##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2628 { \
2629 int i; \
2630 int sat = 0; \
2631 ppc_avr_t result; \
2632 ppc_avr_t *a0 = PKBIG ? a : b; \
2633 ppc_avr_t *a1 = PKBIG ? b : a; \
2634 VECTOR_FOR_INORDER_I (i, from) { \
2635 result.to[i] = cvt(a0->from[i], &sat); \
2636 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
2637 } \
2638 *r = result; \
2639 if (dosat && sat) { \
2640 env->vscr |= (1 << VSCR_SAT); \
2641 } \
2642 }
2643#define I(x, y) (x)
2644VPK(shss, s16, s8, cvtshsb, 1)
2645VPK(shus, s16, u8, cvtshub, 1)
2646VPK(swss, s32, s16, cvtswsh, 1)
2647VPK(swus, s32, u16, cvtswuh, 1)
2648VPK(uhus, u16, u8, cvtuhub, 1)
2649VPK(uwus, u32, u16, cvtuwuh, 1)
2650VPK(uhum, u16, u8, I, 0)
2651VPK(uwum, u32, u16, I, 0)
2652#undef I
2653#undef VPK
2654#undef PKBIG
2655
bdfbac35
AJ
2656void helper_vrefp (ppc_avr_t *r, ppc_avr_t *b)
2657{
2658 int i;
2659 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2660 HANDLE_NAN1(r->f[i], b->f[i]) {
2661 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
2662 }
2663 }
2664}
2665
f6b19645
AJ
2666#define VRFI(suffix, rounding) \
2667 void helper_vrfi##suffix (ppc_avr_t *r, ppc_avr_t *b) \
2668 { \
2669 int i; \
2670 float_status s = env->vec_status; \
2671 set_float_rounding_mode(rounding, &s); \
2672 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
2673 HANDLE_NAN1(r->f[i], b->f[i]) { \
2674 r->f[i] = float32_round_to_int (b->f[i], &s); \
2675 } \
2676 } \
2677 }
2678VRFI(n, float_round_nearest_even)
2679VRFI(m, float_round_down)
2680VRFI(p, float_round_up)
2681VRFI(z, float_round_to_zero)
2682#undef VRFI
2683
5e1d0985
AJ
2684#define VROTATE(suffix, element) \
2685 void helper_vrl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2686 { \
2687 int i; \
2688 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2689 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2690 unsigned int shift = b->element[i] & mask; \
2691 r->element[i] = (a->element[i] << shift) | (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
2692 } \
2693 }
2694VROTATE(b, u8)
2695VROTATE(h, u16)
2696VROTATE(w, u32)
2697#undef VROTATE
2698
071fc3b1
AJ
2699void helper_vrsqrtefp (ppc_avr_t *r, ppc_avr_t *b)
2700{
2701 int i;
2702 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2703 HANDLE_NAN1(r->f[i], b->f[i]) {
2704 float32 t = float32_sqrt(b->f[i], &env->vec_status);
2705 r->f[i] = float32_div(float32_one, t, &env->vec_status);
2706 }
2707 }
2708}
2709
d1258698
AJ
2710void helper_vsel (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
2711{
2712 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
2713 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
2714}
2715
b580763f 2716void helper_vlogefp (ppc_avr_t *r, ppc_avr_t *b)
f586ce09
AJ
2717{
2718 int i;
2719 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
2720 HANDLE_NAN1(r->f[i], b->f[i]) {
2721 r->f[i] = float32_log2(b->f[i], &env->vec_status);
2722 }
2723 }
2724}
2725
d9430add
AJ
2726#if defined(WORDS_BIGENDIAN)
2727#define LEFT 0
2728#define RIGHT 1
2729#else
2730#define LEFT 1
2731#define RIGHT 0
2732#endif
2733/* The specification says that the results are undefined if all of the
2734 * shift counts are not identical. We check to make sure that they are
2735 * to conform to what real hardware appears to do. */
2736#define VSHIFT(suffix, leftp) \
2737 void helper_vs##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2738 { \
1481e16a 2739 int shift = b->u8[LO_IDX*15] & 0x7; \
d9430add
AJ
2740 int doit = 1; \
2741 int i; \
2742 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
2743 doit = doit && ((b->u8[i] & 0x7) == shift); \
2744 } \
2745 if (doit) { \
2746 if (shift == 0) { \
2747 *r = *a; \
2748 } else if (leftp) { \
2749 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
2750 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
2751 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
2752 } else { \
2753 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
2754 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
2755 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
2756 } \
2757 } \
2758 }
2759VSHIFT(l, LEFT)
2760VSHIFT(r, RIGHT)
2761#undef VSHIFT
2762#undef LEFT
2763#undef RIGHT
2764
d79f0809
AJ
2765#define VSL(suffix, element) \
2766 void helper_vsl##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2767 { \
2768 int i; \
2769 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2770 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2771 unsigned int shift = b->element[i] & mask; \
2772 r->element[i] = a->element[i] << shift; \
2773 } \
2774 }
2775VSL(b, u8)
2776VSL(h, u16)
2777VSL(w, u32)
2778#undef VSL
2779
cd633b10
AJ
2780void helper_vsldoi (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
2781{
2782 int sh = shift & 0xf;
2783 int i;
2784 ppc_avr_t result;
2785
2786#if defined(WORDS_BIGENDIAN)
2787 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2788 int index = sh + i;
2789 if (index > 0xf) {
2790 result.u8[i] = b->u8[index-0x10];
2791 } else {
2792 result.u8[i] = a->u8[index];
2793 }
2794 }
2795#else
2796 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
2797 int index = (16 - sh) + i;
2798 if (index > 0xf) {
2799 result.u8[i] = a->u8[index-0x10];
2800 } else {
2801 result.u8[i] = b->u8[index];
2802 }
2803 }
2804#endif
2805 *r = result;
2806}
2807
7b239bec
AJ
2808void helper_vslo (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2809{
2810 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2811
2812#if defined (WORDS_BIGENDIAN)
2813 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2814 memset (&r->u8[16-sh], 0, sh);
2815#else
2816 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2817 memset (&r->u8[0], 0, sh);
2818#endif
2819}
2820
e4e6bee7
AJ
2821/* Experimental testing shows that hardware masks the immediate. */
2822#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
2823#if defined(WORDS_BIGENDIAN)
2824#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
2825#else
2826#define SPLAT_ELEMENT(element) (ARRAY_SIZE(r->element)-1 - _SPLAT_MASKED(element))
2827#endif
2828#define VSPLT(suffix, element) \
2829 void helper_vsplt##suffix (ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
2830 { \
2831 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
2832 int i; \
2833 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2834 r->element[i] = s; \
2835 } \
2836 }
2837VSPLT(b, u8)
2838VSPLT(h, u16)
2839VSPLT(w, u32)
2840#undef VSPLT
2841#undef SPLAT_ELEMENT
2842#undef _SPLAT_MASKED
2843
c026766b
AJ
2844#define VSPLTI(suffix, element, splat_type) \
2845 void helper_vspltis##suffix (ppc_avr_t *r, uint32_t splat) \
2846 { \
2847 splat_type x = (int8_t)(splat << 3) >> 3; \
2848 int i; \
2849 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2850 r->element[i] = x; \
2851 } \
2852 }
2853VSPLTI(b, s8, int8_t)
2854VSPLTI(h, s16, int16_t)
2855VSPLTI(w, s32, int32_t)
2856#undef VSPLTI
2857
07ef34c3
AJ
2858#define VSR(suffix, element) \
2859 void helper_vsr##suffix (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
2860 { \
2861 int i; \
2862 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
2863 unsigned int mask = ((1 << (3 + (sizeof (a->element[0]) >> 1))) - 1); \
2864 unsigned int shift = b->element[i] & mask; \
2865 r->element[i] = a->element[i] >> shift; \
2866 } \
2867 }
2868VSR(ab, s8)
2869VSR(ah, s16)
2870VSR(aw, s32)
2871VSR(b, u8)
2872VSR(h, u16)
2873VSR(w, u32)
2874#undef VSR
2875
7b239bec
AJ
2876void helper_vsro (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2877{
2878 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
2879
2880#if defined (WORDS_BIGENDIAN)
2881 memmove (&r->u8[sh], &a->u8[0], 16-sh);
2882 memset (&r->u8[0], 0, sh);
2883#else
2884 memmove (&r->u8[0], &a->u8[sh], 16-sh);
2885 memset (&r->u8[16-sh], 0, sh);
2886#endif
2887}
2888
e343da72
AJ
2889void helper_vsubcuw (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2890{
2891 int i;
2892 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2893 r->u32[i] = a->u32[i] >= b->u32[i];
2894 }
2895}
2896
8142cddd
AJ
2897void helper_vsumsws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2898{
2899 int64_t t;
2900 int i, upper;
2901 ppc_avr_t result;
2902 int sat = 0;
2903
2904#if defined(WORDS_BIGENDIAN)
2905 upper = ARRAY_SIZE(r->s32)-1;
2906#else
2907 upper = 0;
2908#endif
2909 t = (int64_t)b->s32[upper];
2910 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2911 t += a->s32[i];
2912 result.s32[i] = 0;
2913 }
2914 result.s32[upper] = cvtsdsw(t, &sat);
2915 *r = result;
2916
2917 if (sat) {
2918 env->vscr |= (1 << VSCR_SAT);
2919 }
2920}
2921
2922void helper_vsum2sws (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2923{
2924 int i, j, upper;
2925 ppc_avr_t result;
2926 int sat = 0;
2927
2928#if defined(WORDS_BIGENDIAN)
2929 upper = 1;
2930#else
2931 upper = 0;
2932#endif
2933 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
2934 int64_t t = (int64_t)b->s32[upper+i*2];
2935 result.u64[i] = 0;
2936 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
2937 t += a->s32[2*i+j];
2938 }
2939 result.s32[upper+i*2] = cvtsdsw(t, &sat);
2940 }
2941
2942 *r = result;
2943 if (sat) {
2944 env->vscr |= (1 << VSCR_SAT);
2945 }
2946}
2947
2948void helper_vsum4sbs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2949{
2950 int i, j;
2951 int sat = 0;
2952
2953 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2954 int64_t t = (int64_t)b->s32[i];
2955 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
2956 t += a->s8[4*i+j];
2957 }
2958 r->s32[i] = cvtsdsw(t, &sat);
2959 }
2960
2961 if (sat) {
2962 env->vscr |= (1 << VSCR_SAT);
2963 }
2964}
2965
2966void helper_vsum4shs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2967{
2968 int sat = 0;
2969 int i;
2970
2971 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
2972 int64_t t = (int64_t)b->s32[i];
2973 t += a->s16[2*i] + a->s16[2*i+1];
2974 r->s32[i] = cvtsdsw(t, &sat);
2975 }
2976
2977 if (sat) {
2978 env->vscr |= (1 << VSCR_SAT);
2979 }
2980}
2981
2982void helper_vsum4ubs (ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
2983{
2984 int i, j;
2985 int sat = 0;
2986
2987 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
2988 uint64_t t = (uint64_t)b->u32[i];
2989 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
2990 t += a->u8[4*i+j];
2991 }
2992 r->u32[i] = cvtuduw(t, &sat);
2993 }
2994
2995 if (sat) {
2996 env->vscr |= (1 << VSCR_SAT);
2997 }
2998}
2999
79f85c3a
AJ
3000#if defined(WORDS_BIGENDIAN)
3001#define UPKHI 1
3002#define UPKLO 0
3003#else
3004#define UPKHI 0
3005#define UPKLO 1
3006#endif
3007#define VUPKPX(suffix, hi) \
3008 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3009 { \
3010 int i; \
3011 ppc_avr_t result; \
3012 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
3013 uint16_t e = b->u16[hi ? i : i+4]; \
3014 uint8_t a = (e >> 15) ? 0xff : 0; \
3015 uint8_t r = (e >> 10) & 0x1f; \
3016 uint8_t g = (e >> 5) & 0x1f; \
3017 uint8_t b = e & 0x1f; \
3018 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
3019 } \
3020 *r = result; \
3021 }
3022VUPKPX(lpx, UPKLO)
3023VUPKPX(hpx, UPKHI)
3024#undef VUPKPX
3025
6cf1c6e5
AJ
3026#define VUPK(suffix, unpacked, packee, hi) \
3027 void helper_vupk##suffix (ppc_avr_t *r, ppc_avr_t *b) \
3028 { \
3029 int i; \
3030 ppc_avr_t result; \
3031 if (hi) { \
3032 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
3033 result.unpacked[i] = b->packee[i]; \
3034 } \
3035 } else { \
3036 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); i++) { \
3037 result.unpacked[i-ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
3038 } \
3039 } \
3040 *r = result; \
3041 }
3042VUPK(hsb, s16, s8, UPKHI)
3043VUPK(hsh, s32, s16, UPKHI)
3044VUPK(lsb, s16, s8, UPKLO)
3045VUPK(lsh, s32, s16, UPKLO)
3046#undef VUPK
79f85c3a
AJ
3047#undef UPKHI
3048#undef UPKLO
3049
34ba2857
AJ
3050#undef DO_HANDLE_NAN
3051#undef HANDLE_NAN1
3052#undef HANDLE_NAN2
3053#undef HANDLE_NAN3
d6a46fe8
AJ
3054#undef VECTOR_FOR_INORDER_I
3055#undef HI_IDX
3056#undef LO_IDX
3057
1c97856d 3058/*****************************************************************************/
0487d6a8
JM
3059/* SPE extension helpers */
3060/* Use a table to make this quicker */
3061static uint8_t hbrev[16] = {
3062 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
3063 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
3064};
3065
b068d6a7 3066static always_inline uint8_t byte_reverse (uint8_t val)
0487d6a8
JM
3067{
3068 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
3069}
3070
b068d6a7 3071static always_inline uint32_t word_reverse (uint32_t val)
0487d6a8
JM
3072{
3073 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
3074 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
3075}
3076
3cd7d1dd 3077#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
57951c27 3078target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
0487d6a8
JM
3079{
3080 uint32_t a, b, d, mask;
3081
3cd7d1dd 3082 mask = UINT32_MAX >> (32 - MASKBITS);
57951c27
AJ
3083 a = arg1 & mask;
3084 b = arg2 & mask;
3cd7d1dd 3085 d = word_reverse(1 + word_reverse(a | ~b));
57951c27 3086 return (arg1 & ~mask) | (d & b);
0487d6a8
JM
3087}
3088
57951c27 3089uint32_t helper_cntlsw32 (uint32_t val)
0487d6a8
JM
3090{
3091 if (val & 0x80000000)
603fccce 3092 return clz32(~val);
0487d6a8 3093 else
603fccce 3094 return clz32(val);
0487d6a8
JM
3095}
3096
57951c27 3097uint32_t helper_cntlzw32 (uint32_t val)
0487d6a8 3098{
603fccce 3099 return clz32(val);
0487d6a8
JM
3100}
3101
1c97856d
AJ
3102/* Single-precision floating-point conversions */
3103static always_inline uint32_t efscfsi (uint32_t val)
0487d6a8 3104{
0ca9d380 3105 CPU_FloatU u;
0487d6a8 3106
fbd265b6 3107 u.f = int32_to_float32(val, &env->vec_status);
0487d6a8 3108
0ca9d380 3109 return u.l;
0487d6a8
JM
3110}
3111
1c97856d 3112static always_inline uint32_t efscfui (uint32_t val)
0487d6a8 3113{
0ca9d380 3114 CPU_FloatU u;
0487d6a8 3115
fbd265b6 3116 u.f = uint32_to_float32(val, &env->vec_status);
0487d6a8 3117
0ca9d380 3118 return u.l;
0487d6a8
JM
3119}
3120
1c97856d 3121static always_inline int32_t efsctsi (uint32_t val)
0487d6a8 3122{
0ca9d380 3123 CPU_FloatU u;
0487d6a8 3124
0ca9d380 3125 u.l = val;
0487d6a8 3126 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3127 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3128 return 0;
3129
fbd265b6 3130 return float32_to_int32(u.f, &env->vec_status);
0487d6a8
JM
3131}
3132
1c97856d 3133static always_inline uint32_t efsctui (uint32_t val)
0487d6a8 3134{
0ca9d380 3135 CPU_FloatU u;
0487d6a8 3136
0ca9d380 3137 u.l = val;
0487d6a8 3138 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3139 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3140 return 0;
3141
fbd265b6 3142 return float32_to_uint32(u.f, &env->vec_status);
0487d6a8
JM
3143}
3144
1c97856d 3145static always_inline uint32_t efsctsiz (uint32_t val)
0487d6a8 3146{
0ca9d380 3147 CPU_FloatU u;
0487d6a8 3148
0ca9d380 3149 u.l = val;
0487d6a8 3150 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3151 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3152 return 0;
3153
fbd265b6 3154 return float32_to_int32_round_to_zero(u.f, &env->vec_status);
0487d6a8
JM
3155}
3156
1c97856d 3157static always_inline uint32_t efsctuiz (uint32_t val)
0487d6a8 3158{
0ca9d380 3159 CPU_FloatU u;
0487d6a8 3160
0ca9d380 3161 u.l = val;
0487d6a8 3162 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3163 if (unlikely(float32_is_nan(u.f)))
0487d6a8
JM
3164 return 0;
3165
fbd265b6 3166 return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
0487d6a8
JM
3167}
3168
1c97856d 3169static always_inline uint32_t efscfsf (uint32_t val)
0487d6a8 3170{
0ca9d380 3171 CPU_FloatU u;
0487d6a8
JM
3172 float32 tmp;
3173
fbd265b6
AJ
3174 u.f = int32_to_float32(val, &env->vec_status);
3175 tmp = int64_to_float32(1ULL << 32, &env->vec_status);
3176 u.f = float32_div(u.f, tmp, &env->vec_status);
0487d6a8 3177
0ca9d380 3178 return u.l;
0487d6a8
JM
3179}
3180
1c97856d 3181static always_inline uint32_t efscfuf (uint32_t val)
0487d6a8 3182{
0ca9d380 3183 CPU_FloatU u;
0487d6a8
JM
3184 float32 tmp;
3185
fbd265b6
AJ
3186 u.f = uint32_to_float32(val, &env->vec_status);
3187 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3188 u.f = float32_div(u.f, tmp, &env->vec_status);
0487d6a8 3189
0ca9d380 3190 return u.l;
0487d6a8
JM
3191}
3192
1c97856d 3193static always_inline uint32_t efsctsf (uint32_t val)
0487d6a8 3194{
0ca9d380 3195 CPU_FloatU u;
0487d6a8
JM
3196 float32 tmp;
3197
0ca9d380 3198 u.l = val;
0487d6a8 3199 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3200 if (unlikely(float32_is_nan(u.f)))
0487d6a8 3201 return 0;
fbd265b6
AJ
3202 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3203 u.f = float32_mul(u.f, tmp, &env->vec_status);
0487d6a8 3204
fbd265b6 3205 return float32_to_int32(u.f, &env->vec_status);
0487d6a8
JM
3206}
3207
1c97856d 3208static always_inline uint32_t efsctuf (uint32_t val)
0487d6a8 3209{
0ca9d380 3210 CPU_FloatU u;
0487d6a8
JM
3211 float32 tmp;
3212
0ca9d380 3213 u.l = val;
0487d6a8 3214 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3215 if (unlikely(float32_is_nan(u.f)))
0487d6a8 3216 return 0;
fbd265b6
AJ
3217 tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
3218 u.f = float32_mul(u.f, tmp, &env->vec_status);
0487d6a8 3219
fbd265b6 3220 return float32_to_uint32(u.f, &env->vec_status);
0487d6a8
JM
3221}
3222
1c97856d
AJ
3223#define HELPER_SPE_SINGLE_CONV(name) \
3224uint32_t helper_e##name (uint32_t val) \
3225{ \
3226 return e##name(val); \
3227}
3228/* efscfsi */
3229HELPER_SPE_SINGLE_CONV(fscfsi);
3230/* efscfui */
3231HELPER_SPE_SINGLE_CONV(fscfui);
3232/* efscfuf */
3233HELPER_SPE_SINGLE_CONV(fscfuf);
3234/* efscfsf */
3235HELPER_SPE_SINGLE_CONV(fscfsf);
3236/* efsctsi */
3237HELPER_SPE_SINGLE_CONV(fsctsi);
3238/* efsctui */
3239HELPER_SPE_SINGLE_CONV(fsctui);
3240/* efsctsiz */
3241HELPER_SPE_SINGLE_CONV(fsctsiz);
3242/* efsctuiz */
3243HELPER_SPE_SINGLE_CONV(fsctuiz);
3244/* efsctsf */
3245HELPER_SPE_SINGLE_CONV(fsctsf);
3246/* efsctuf */
3247HELPER_SPE_SINGLE_CONV(fsctuf);
3248
3249#define HELPER_SPE_VECTOR_CONV(name) \
3250uint64_t helper_ev##name (uint64_t val) \
3251{ \
3252 return ((uint64_t)e##name(val >> 32) << 32) | \
3253 (uint64_t)e##name(val); \
0487d6a8 3254}
1c97856d
AJ
3255/* evfscfsi */
3256HELPER_SPE_VECTOR_CONV(fscfsi);
3257/* evfscfui */
3258HELPER_SPE_VECTOR_CONV(fscfui);
3259/* evfscfuf */
3260HELPER_SPE_VECTOR_CONV(fscfuf);
3261/* evfscfsf */
3262HELPER_SPE_VECTOR_CONV(fscfsf);
3263/* evfsctsi */
3264HELPER_SPE_VECTOR_CONV(fsctsi);
3265/* evfsctui */
3266HELPER_SPE_VECTOR_CONV(fsctui);
3267/* evfsctsiz */
3268HELPER_SPE_VECTOR_CONV(fsctsiz);
3269/* evfsctuiz */
3270HELPER_SPE_VECTOR_CONV(fsctuiz);
3271/* evfsctsf */
3272HELPER_SPE_VECTOR_CONV(fsctsf);
3273/* evfsctuf */
3274HELPER_SPE_VECTOR_CONV(fsctuf);
0487d6a8 3275
1c97856d
AJ
3276/* Single-precision floating-point arithmetic */
3277static always_inline uint32_t efsadd (uint32_t op1, uint32_t op2)
0487d6a8 3278{
1c97856d
AJ
3279 CPU_FloatU u1, u2;
3280 u1.l = op1;
3281 u2.l = op2;
fbd265b6 3282 u1.f = float32_add(u1.f, u2.f, &env->vec_status);
1c97856d 3283 return u1.l;
0487d6a8
JM
3284}
3285
1c97856d 3286static always_inline uint32_t efssub (uint32_t op1, uint32_t op2)
0487d6a8 3287{
1c97856d
AJ
3288 CPU_FloatU u1, u2;
3289 u1.l = op1;
3290 u2.l = op2;
fbd265b6 3291 u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
1c97856d 3292 return u1.l;
0487d6a8
JM
3293}
3294
1c97856d 3295static always_inline uint32_t efsmul (uint32_t op1, uint32_t op2)
0487d6a8 3296{
1c97856d
AJ
3297 CPU_FloatU u1, u2;
3298 u1.l = op1;
3299 u2.l = op2;
fbd265b6 3300 u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
1c97856d 3301 return u1.l;
0487d6a8
JM
3302}
3303
1c97856d 3304static always_inline uint32_t efsdiv (uint32_t op1, uint32_t op2)
0487d6a8 3305{
1c97856d
AJ
3306 CPU_FloatU u1, u2;
3307 u1.l = op1;
3308 u2.l = op2;
fbd265b6 3309 u1.f = float32_div(u1.f, u2.f, &env->vec_status);
1c97856d 3310 return u1.l;
0487d6a8
JM
3311}
3312
1c97856d
AJ
3313#define HELPER_SPE_SINGLE_ARITH(name) \
3314uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3315{ \
3316 return e##name(op1, op2); \
3317}
3318/* efsadd */
3319HELPER_SPE_SINGLE_ARITH(fsadd);
3320/* efssub */
3321HELPER_SPE_SINGLE_ARITH(fssub);
3322/* efsmul */
3323HELPER_SPE_SINGLE_ARITH(fsmul);
3324/* efsdiv */
3325HELPER_SPE_SINGLE_ARITH(fsdiv);
3326
3327#define HELPER_SPE_VECTOR_ARITH(name) \
3328uint64_t helper_ev##name (uint64_t op1, uint64_t op2) \
3329{ \
3330 return ((uint64_t)e##name(op1 >> 32, op2 >> 32) << 32) | \
3331 (uint64_t)e##name(op1, op2); \
3332}
3333/* evfsadd */
3334HELPER_SPE_VECTOR_ARITH(fsadd);
3335/* evfssub */
3336HELPER_SPE_VECTOR_ARITH(fssub);
3337/* evfsmul */
3338HELPER_SPE_VECTOR_ARITH(fsmul);
3339/* evfsdiv */
3340HELPER_SPE_VECTOR_ARITH(fsdiv);
3341
3342/* Single-precision floating-point comparisons */
3343static always_inline uint32_t efststlt (uint32_t op1, uint32_t op2)
0487d6a8 3344{
1c97856d
AJ
3345 CPU_FloatU u1, u2;
3346 u1.l = op1;
3347 u2.l = op2;
fbd265b6 3348 return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3349}
3350
1c97856d 3351static always_inline uint32_t efststgt (uint32_t op1, uint32_t op2)
0487d6a8 3352{
1c97856d
AJ
3353 CPU_FloatU u1, u2;
3354 u1.l = op1;
3355 u2.l = op2;
fbd265b6 3356 return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
0487d6a8
JM
3357}
3358
1c97856d 3359static always_inline uint32_t efststeq (uint32_t op1, uint32_t op2)
0487d6a8 3360{
1c97856d
AJ
3361 CPU_FloatU u1, u2;
3362 u1.l = op1;
3363 u2.l = op2;
fbd265b6 3364 return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3365}
3366
1c97856d 3367static always_inline uint32_t efscmplt (uint32_t op1, uint32_t op2)
0487d6a8
JM
3368{
3369 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3370 return efststlt(op1, op2);
0487d6a8
JM
3371}
3372
1c97856d 3373static always_inline uint32_t efscmpgt (uint32_t op1, uint32_t op2)
0487d6a8
JM
3374{
3375 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3376 return efststgt(op1, op2);
0487d6a8
JM
3377}
3378
1c97856d 3379static always_inline uint32_t efscmpeq (uint32_t op1, uint32_t op2)
0487d6a8
JM
3380{
3381 /* XXX: TODO: test special values (NaN, infinites, ...) */
1c97856d 3382 return efststeq(op1, op2);
0487d6a8
JM
3383}
3384
1c97856d
AJ
3385#define HELPER_SINGLE_SPE_CMP(name) \
3386uint32_t helper_e##name (uint32_t op1, uint32_t op2) \
3387{ \
3388 return e##name(op1, op2) << 2; \
3389}
3390/* efststlt */
3391HELPER_SINGLE_SPE_CMP(fststlt);
3392/* efststgt */
3393HELPER_SINGLE_SPE_CMP(fststgt);
3394/* efststeq */
3395HELPER_SINGLE_SPE_CMP(fststeq);
3396/* efscmplt */
3397HELPER_SINGLE_SPE_CMP(fscmplt);
3398/* efscmpgt */
3399HELPER_SINGLE_SPE_CMP(fscmpgt);
3400/* efscmpeq */
3401HELPER_SINGLE_SPE_CMP(fscmpeq);
3402
3403static always_inline uint32_t evcmp_merge (int t0, int t1)
0487d6a8 3404{
1c97856d 3405 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
0487d6a8
JM
3406}
3407
1c97856d
AJ
3408#define HELPER_VECTOR_SPE_CMP(name) \
3409uint32_t helper_ev##name (uint64_t op1, uint64_t op2) \
3410{ \
3411 return evcmp_merge(e##name(op1 >> 32, op2 >> 32), e##name(op1, op2)); \
0487d6a8 3412}
1c97856d
AJ
3413/* evfststlt */
3414HELPER_VECTOR_SPE_CMP(fststlt);
3415/* evfststgt */
3416HELPER_VECTOR_SPE_CMP(fststgt);
3417/* evfststeq */
3418HELPER_VECTOR_SPE_CMP(fststeq);
3419/* evfscmplt */
3420HELPER_VECTOR_SPE_CMP(fscmplt);
3421/* evfscmpgt */
3422HELPER_VECTOR_SPE_CMP(fscmpgt);
3423/* evfscmpeq */
3424HELPER_VECTOR_SPE_CMP(fscmpeq);
0487d6a8 3425
1c97856d
AJ
3426/* Double-precision floating-point conversion */
3427uint64_t helper_efdcfsi (uint32_t val)
0487d6a8 3428{
1c97856d
AJ
3429 CPU_DoubleU u;
3430
fbd265b6 3431 u.d = int32_to_float64(val, &env->vec_status);
1c97856d
AJ
3432
3433 return u.ll;
0487d6a8
JM
3434}
3435
1c97856d 3436uint64_t helper_efdcfsid (uint64_t val)
0487d6a8 3437{
0ca9d380 3438 CPU_DoubleU u;
0487d6a8 3439
fbd265b6 3440 u.d = int64_to_float64(val, &env->vec_status);
0487d6a8 3441
0ca9d380 3442 return u.ll;
0487d6a8
JM
3443}
3444
1c97856d
AJ
3445uint64_t helper_efdcfui (uint32_t val)
3446{
3447 CPU_DoubleU u;
3448
fbd265b6 3449 u.d = uint32_to_float64(val, &env->vec_status);
1c97856d
AJ
3450
3451 return u.ll;
3452}
3453
3454uint64_t helper_efdcfuid (uint64_t val)
0487d6a8 3455{
0ca9d380 3456 CPU_DoubleU u;
0487d6a8 3457
fbd265b6 3458 u.d = uint64_to_float64(val, &env->vec_status);
0487d6a8 3459
0ca9d380 3460 return u.ll;
0487d6a8
JM
3461}
3462
1c97856d 3463uint32_t helper_efdctsi (uint64_t val)
0487d6a8 3464{
0ca9d380 3465 CPU_DoubleU u;
0487d6a8 3466
0ca9d380 3467 u.ll = val;
0487d6a8 3468 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3469 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3470 return 0;
3471
fbd265b6 3472 return float64_to_int32(u.d, &env->vec_status);
0487d6a8
JM
3473}
3474
1c97856d 3475uint32_t helper_efdctui (uint64_t val)
0487d6a8 3476{
0ca9d380 3477 CPU_DoubleU u;
0487d6a8 3478
0ca9d380 3479 u.ll = val;
0487d6a8 3480 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3481 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3482 return 0;
3483
fbd265b6 3484 return float64_to_uint32(u.d, &env->vec_status);
0487d6a8
JM
3485}
3486
1c97856d 3487uint32_t helper_efdctsiz (uint64_t val)
0487d6a8 3488{
0ca9d380 3489 CPU_DoubleU u;
0487d6a8 3490
0ca9d380 3491 u.ll = val;
0487d6a8 3492 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3493 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3494 return 0;
3495
fbd265b6 3496 return float64_to_int32_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3497}
3498
1c97856d 3499uint64_t helper_efdctsidz (uint64_t val)
0487d6a8 3500{
0ca9d380 3501 CPU_DoubleU u;
0487d6a8 3502
0ca9d380 3503 u.ll = val;
0487d6a8 3504 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3505 if (unlikely(float64_is_nan(u.d)))
0487d6a8
JM
3506 return 0;
3507
fbd265b6 3508 return float64_to_int64_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3509}
3510
1c97856d 3511uint32_t helper_efdctuiz (uint64_t val)
0487d6a8 3512{
1c97856d 3513 CPU_DoubleU u;
0487d6a8 3514
1c97856d
AJ
3515 u.ll = val;
3516 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3517 if (unlikely(float64_is_nan(u.d)))
1c97856d 3518 return 0;
0487d6a8 3519
fbd265b6 3520 return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3521}
3522
1c97856d 3523uint64_t helper_efdctuidz (uint64_t val)
0487d6a8 3524{
1c97856d 3525 CPU_DoubleU u;
0487d6a8 3526
1c97856d
AJ
3527 u.ll = val;
3528 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3529 if (unlikely(float64_is_nan(u.d)))
1c97856d 3530 return 0;
0487d6a8 3531
fbd265b6 3532 return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
0487d6a8
JM
3533}
3534
1c97856d 3535uint64_t helper_efdcfsf (uint32_t val)
0487d6a8 3536{
0ca9d380 3537 CPU_DoubleU u;
0487d6a8
JM
3538 float64 tmp;
3539
fbd265b6
AJ
3540 u.d = int32_to_float64(val, &env->vec_status);
3541 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3542 u.d = float64_div(u.d, tmp, &env->vec_status);
0487d6a8 3543
0ca9d380 3544 return u.ll;
0487d6a8
JM
3545}
3546
1c97856d 3547uint64_t helper_efdcfuf (uint32_t val)
0487d6a8 3548{
0ca9d380 3549 CPU_DoubleU u;
0487d6a8
JM
3550 float64 tmp;
3551
fbd265b6
AJ
3552 u.d = uint32_to_float64(val, &env->vec_status);
3553 tmp = int64_to_float64(1ULL << 32, &env->vec_status);
3554 u.d = float64_div(u.d, tmp, &env->vec_status);
0487d6a8 3555
0ca9d380 3556 return u.ll;
0487d6a8
JM
3557}
3558
1c97856d 3559uint32_t helper_efdctsf (uint64_t val)
0487d6a8 3560{
0ca9d380 3561 CPU_DoubleU u;
0487d6a8
JM
3562 float64 tmp;
3563
0ca9d380 3564 u.ll = val;
0487d6a8 3565 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3566 if (unlikely(float64_is_nan(u.d)))
0487d6a8 3567 return 0;
fbd265b6
AJ
3568 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3569 u.d = float64_mul(u.d, tmp, &env->vec_status);
0487d6a8 3570
fbd265b6 3571 return float64_to_int32(u.d, &env->vec_status);
0487d6a8
JM
3572}
3573
1c97856d 3574uint32_t helper_efdctuf (uint64_t val)
0487d6a8 3575{
0ca9d380 3576 CPU_DoubleU u;
0487d6a8
JM
3577 float64 tmp;
3578
0ca9d380 3579 u.ll = val;
0487d6a8 3580 /* NaN are not treated the same way IEEE 754 does */
a44d2ce1 3581 if (unlikely(float64_is_nan(u.d)))
0487d6a8 3582 return 0;
fbd265b6
AJ
3583 tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
3584 u.d = float64_mul(u.d, tmp, &env->vec_status);
0487d6a8 3585
fbd265b6 3586 return float64_to_uint32(u.d, &env->vec_status);
0487d6a8
JM
3587}
3588
1c97856d 3589uint32_t helper_efscfd (uint64_t val)
0487d6a8 3590{
0ca9d380
AJ
3591 CPU_DoubleU u1;
3592 CPU_FloatU u2;
0487d6a8 3593
0ca9d380 3594 u1.ll = val;
fbd265b6 3595 u2.f = float64_to_float32(u1.d, &env->vec_status);
0487d6a8 3596
0ca9d380 3597 return u2.l;
0487d6a8
JM
3598}
3599
1c97856d 3600uint64_t helper_efdcfs (uint32_t val)
0487d6a8 3601{
0ca9d380
AJ
3602 CPU_DoubleU u2;
3603 CPU_FloatU u1;
0487d6a8 3604
0ca9d380 3605 u1.l = val;
fbd265b6 3606 u2.d = float32_to_float64(u1.f, &env->vec_status);
0487d6a8 3607
0ca9d380 3608 return u2.ll;
0487d6a8
JM
3609}
3610
1c97856d
AJ
3611/* Double precision fixed-point arithmetic */
3612uint64_t helper_efdadd (uint64_t op1, uint64_t op2)
0487d6a8 3613{
1c97856d
AJ
3614 CPU_DoubleU u1, u2;
3615 u1.ll = op1;
3616 u2.ll = op2;
fbd265b6 3617 u1.d = float64_add(u1.d, u2.d, &env->vec_status);
1c97856d 3618 return u1.ll;
0487d6a8
JM
3619}
3620
1c97856d 3621uint64_t helper_efdsub (uint64_t op1, uint64_t op2)
0487d6a8 3622{
1c97856d
AJ
3623 CPU_DoubleU u1, u2;
3624 u1.ll = op1;
3625 u2.ll = op2;
fbd265b6 3626 u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
1c97856d 3627 return u1.ll;
0487d6a8
JM
3628}
3629
1c97856d 3630uint64_t helper_efdmul (uint64_t op1, uint64_t op2)
0487d6a8 3631{
1c97856d
AJ
3632 CPU_DoubleU u1, u2;
3633 u1.ll = op1;
3634 u2.ll = op2;
fbd265b6 3635 u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
1c97856d 3636 return u1.ll;
0487d6a8
JM
3637}
3638
1c97856d 3639uint64_t helper_efddiv (uint64_t op1, uint64_t op2)
0487d6a8 3640{
1c97856d
AJ
3641 CPU_DoubleU u1, u2;
3642 u1.ll = op1;
3643 u2.ll = op2;
fbd265b6 3644 u1.d = float64_div(u1.d, u2.d, &env->vec_status);
1c97856d 3645 return u1.ll;
0487d6a8
JM
3646}
3647
1c97856d
AJ
3648/* Double precision floating point helpers */
3649uint32_t helper_efdtstlt (uint64_t op1, uint64_t op2)
0487d6a8 3650{
1c97856d
AJ
3651 CPU_DoubleU u1, u2;
3652 u1.ll = op1;
3653 u2.ll = op2;
fbd265b6 3654 return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3655}
3656
1c97856d 3657uint32_t helper_efdtstgt (uint64_t op1, uint64_t op2)
0487d6a8 3658{
1c97856d
AJ
3659 CPU_DoubleU u1, u2;
3660 u1.ll = op1;
3661 u2.ll = op2;
fbd265b6 3662 return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
0487d6a8
JM
3663}
3664
1c97856d 3665uint32_t helper_efdtsteq (uint64_t op1, uint64_t op2)
0487d6a8 3666{
1c97856d
AJ
3667 CPU_DoubleU u1, u2;
3668 u1.ll = op1;
3669 u2.ll = op2;
fbd265b6 3670 return float64_eq(u1.d, u2.d, &env->vec_status) ? 4 : 0;
0487d6a8
JM
3671}
3672
1c97856d 3673uint32_t helper_efdcmplt (uint64_t op1, uint64_t op2)
0487d6a8 3674{
1c97856d
AJ
3675 /* XXX: TODO: test special values (NaN, infinites, ...) */
3676 return helper_efdtstlt(op1, op2);
0487d6a8
JM
3677}
3678
1c97856d
AJ
3679uint32_t helper_efdcmpgt (uint64_t op1, uint64_t op2)
3680{
3681 /* XXX: TODO: test special values (NaN, infinites, ...) */
3682 return helper_efdtstgt(op1, op2);
3683}
0487d6a8 3684
1c97856d
AJ
3685uint32_t helper_efdcmpeq (uint64_t op1, uint64_t op2)
3686{
3687 /* XXX: TODO: test special values (NaN, infinites, ...) */
3688 return helper_efdtsteq(op1, op2);
3689}
0487d6a8 3690
fdabc366
FB
3691/*****************************************************************************/
3692/* Softmmu support */
3693#if !defined (CONFIG_USER_ONLY)
3694
3695#define MMUSUFFIX _mmu
fdabc366
FB
3696
3697#define SHIFT 0
3698#include "softmmu_template.h"
3699
3700#define SHIFT 1
3701#include "softmmu_template.h"
3702
3703#define SHIFT 2
3704#include "softmmu_template.h"
3705
3706#define SHIFT 3
3707#include "softmmu_template.h"
3708
3709/* try to fill the TLB and return an exception if error. If retaddr is
3710 NULL, it means that the function was called in C code (i.e. not
3711 from generated code or from helper.c) */
3712/* XXX: fix it to restore all registers */
6ebbf390 3713void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
fdabc366
FB
3714{
3715 TranslationBlock *tb;
3716 CPUState *saved_env;
44f8625d 3717 unsigned long pc;
fdabc366
FB
3718 int ret;
3719
3720 /* XXX: hack to restore env in all cases, even if not called from
3721 generated code */
3722 saved_env = env;
3723 env = cpu_single_env;
6ebbf390 3724 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
76a66253 3725 if (unlikely(ret != 0)) {
fdabc366
FB
3726 if (likely(retaddr)) {
3727 /* now we have a real cpu fault */
44f8625d 3728 pc = (unsigned long)retaddr;
fdabc366
FB
3729 tb = tb_find_pc(pc);
3730 if (likely(tb)) {
3731 /* the PC is inside the translated code. It means that we have
3732 a virtual CPU fault */
3733 cpu_restore_state(tb, env, pc, NULL);
76a66253 3734 }
fdabc366 3735 }
e06fcd75 3736 helper_raise_exception_err(env->exception_index, env->error_code);
fdabc366
FB
3737 }
3738 env = saved_env;
9a64fbe4
FB
3739}
3740
74d37793
AJ
3741/* Segment registers load and store */
3742target_ulong helper_load_sr (target_ulong sr_num)
3743{
f6b868fc
BS
3744#if defined(TARGET_PPC64)
3745 if (env->mmu_model & POWERPC_MMU_64)
3746 return ppc_load_sr(env, sr_num);
3747#endif
74d37793
AJ
3748 return env->sr[sr_num];
3749}
3750
3751void helper_store_sr (target_ulong sr_num, target_ulong val)
3752{
45d827d2 3753 ppc_store_sr(env, sr_num, val);
74d37793
AJ
3754}
3755
3756/* SLB management */
3757#if defined(TARGET_PPC64)
3758target_ulong helper_load_slb (target_ulong slb_nr)
3759{
3760 return ppc_load_slb(env, slb_nr);
3761}
3762
f6b868fc 3763void helper_store_slb (target_ulong rb, target_ulong rs)
74d37793 3764{
f6b868fc 3765 ppc_store_slb(env, rb, rs);
74d37793
AJ
3766}
3767
3768void helper_slbia (void)
3769{
3770 ppc_slb_invalidate_all(env);
3771}
3772
3773void helper_slbie (target_ulong addr)
3774{
3775 ppc_slb_invalidate_one(env, addr);
3776}
3777
3778#endif /* defined(TARGET_PPC64) */
3779
3780/* TLB management */
3781void helper_tlbia (void)
3782{
3783 ppc_tlb_invalidate_all(env);
3784}
3785
3786void helper_tlbie (target_ulong addr)
3787{
3788 ppc_tlb_invalidate_one(env, addr);
3789}
3790
76a66253
JM
3791/* Software driven TLBs management */
3792/* PowerPC 602/603 software TLB load instructions helpers */
74d37793 3793static void do_6xx_tlb (target_ulong new_EPN, int is_code)
76a66253
JM
3794{
3795 target_ulong RPN, CMP, EPN;
3796 int way;
d9bce9d9 3797
76a66253
JM
3798 RPN = env->spr[SPR_RPA];
3799 if (is_code) {
3800 CMP = env->spr[SPR_ICMP];
3801 EPN = env->spr[SPR_IMISS];
3802 } else {
3803 CMP = env->spr[SPR_DCMP];
3804 EPN = env->spr[SPR_DMISS];
3805 }
3806 way = (env->spr[SPR_SRR1] >> 17) & 1;
d12d51d5 3807 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
6b542af7 3808 " PTE1 " ADDRX " way %d\n",
0e69805a 3809 __func__, new_EPN, EPN, CMP, RPN, way);
76a66253 3810 /* Store this TLB */
0f3955e2 3811 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
d9bce9d9 3812 way, is_code, CMP, RPN);
76a66253
JM
3813}
3814
74d37793 3815void helper_6xx_tlbd (target_ulong EPN)
0f3955e2 3816{
74d37793 3817 do_6xx_tlb(EPN, 0);
0f3955e2
AJ
3818}
3819
74d37793 3820void helper_6xx_tlbi (target_ulong EPN)
0f3955e2 3821{
74d37793 3822 do_6xx_tlb(EPN, 1);
0f3955e2
AJ
3823}
3824
3825/* PowerPC 74xx software TLB load instructions helpers */
74d37793 3826static void do_74xx_tlb (target_ulong new_EPN, int is_code)
7dbe11ac
JM
3827{
3828 target_ulong RPN, CMP, EPN;
3829 int way;
3830
3831 RPN = env->spr[SPR_PTELO];
3832 CMP = env->spr[SPR_PTEHI];
3833 EPN = env->spr[SPR_TLBMISS] & ~0x3;
3834 way = env->spr[SPR_TLBMISS] & 0x3;
d12d51d5 3835 LOG_SWTLB("%s: EPN " ADDRX " " ADDRX " PTE0 " ADDRX
6b542af7 3836 " PTE1 " ADDRX " way %d\n",
0e69805a 3837 __func__, new_EPN, EPN, CMP, RPN, way);
7dbe11ac 3838 /* Store this TLB */
0f3955e2 3839 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
7dbe11ac
JM
3840 way, is_code, CMP, RPN);
3841}
3842
74d37793 3843void helper_74xx_tlbd (target_ulong EPN)
0f3955e2 3844{
74d37793 3845 do_74xx_tlb(EPN, 0);
0f3955e2
AJ
3846}
3847
74d37793 3848void helper_74xx_tlbi (target_ulong EPN)
0f3955e2 3849{
74d37793 3850 do_74xx_tlb(EPN, 1);
0f3955e2
AJ
3851}
3852
a11b8151 3853static always_inline target_ulong booke_tlb_to_page_size (int size)
a8dea12f
JM
3854{
3855 return 1024 << (2 * size);
3856}
3857
a11b8151 3858static always_inline int booke_page_size_to_tlb (target_ulong page_size)
a8dea12f
JM
3859{
3860 int size;
3861
3862 switch (page_size) {
3863 case 0x00000400UL:
3864 size = 0x0;
3865 break;
3866 case 0x00001000UL:
3867 size = 0x1;
3868 break;
3869 case 0x00004000UL:
3870 size = 0x2;
3871 break;
3872 case 0x00010000UL:
3873 size = 0x3;
3874 break;
3875 case 0x00040000UL:
3876 size = 0x4;
3877 break;
3878 case 0x00100000UL:
3879 size = 0x5;
3880 break;
3881 case 0x00400000UL:
3882 size = 0x6;
3883 break;
3884 case 0x01000000UL:
3885 size = 0x7;
3886 break;
3887 case 0x04000000UL:
3888 size = 0x8;
3889 break;
3890 case 0x10000000UL:
3891 size = 0x9;
3892 break;
3893 case 0x40000000UL:
3894 size = 0xA;
3895 break;
3896#if defined (TARGET_PPC64)
3897 case 0x000100000000ULL:
3898 size = 0xB;
3899 break;
3900 case 0x000400000000ULL:
3901 size = 0xC;
3902 break;
3903 case 0x001000000000ULL:
3904 size = 0xD;
3905 break;
3906 case 0x004000000000ULL:
3907 size = 0xE;
3908 break;
3909 case 0x010000000000ULL:
3910 size = 0xF;
3911 break;
3912#endif
3913 default:
3914 size = -1;
3915 break;
3916 }
3917
3918 return size;
3919}
3920
76a66253 3921/* Helpers for 4xx TLB management */
74d37793 3922target_ulong helper_4xx_tlbre_lo (target_ulong entry)
76a66253 3923{
a8dea12f 3924 ppcemb_tlb_t *tlb;
74d37793 3925 target_ulong ret;
a8dea12f 3926 int size;
76a66253 3927
74d37793
AJ
3928 entry &= 0x3F;
3929 tlb = &env->tlb[entry].tlbe;
3930 ret = tlb->EPN;
a8dea12f 3931 if (tlb->prot & PAGE_VALID)
74d37793 3932 ret |= 0x400;
a8dea12f
JM
3933 size = booke_page_size_to_tlb(tlb->size);
3934 if (size < 0 || size > 0x7)
3935 size = 1;
74d37793 3936 ret |= size << 7;
a8dea12f 3937 env->spr[SPR_40x_PID] = tlb->PID;
74d37793 3938 return ret;
76a66253
JM
3939}
3940
74d37793 3941target_ulong helper_4xx_tlbre_hi (target_ulong entry)
76a66253 3942{
a8dea12f 3943 ppcemb_tlb_t *tlb;
74d37793 3944 target_ulong ret;
76a66253 3945
74d37793
AJ
3946 entry &= 0x3F;
3947 tlb = &env->tlb[entry].tlbe;
3948 ret = tlb->RPN;
a8dea12f 3949 if (tlb->prot & PAGE_EXEC)
74d37793 3950 ret |= 0x200;
a8dea12f 3951 if (tlb->prot & PAGE_WRITE)
74d37793
AJ
3952 ret |= 0x100;
3953 return ret;
76a66253
JM
3954}
3955
74d37793 3956void helper_4xx_tlbwe_hi (target_ulong entry, target_ulong val)
76a66253 3957{
a8dea12f 3958 ppcemb_tlb_t *tlb;
76a66253
JM
3959 target_ulong page, end;
3960
d12d51d5 3961 LOG_SWTLB("%s entry %d val " ADDRX "\n", __func__, (int)entry, val);
74d37793
AJ
3962 entry &= 0x3F;
3963 tlb = &env->tlb[entry].tlbe;
76a66253
JM
3964 /* Invalidate previous TLB (if it's valid) */
3965 if (tlb->prot & PAGE_VALID) {
3966 end = tlb->EPN + tlb->size;
d12d51d5 3967 LOG_SWTLB("%s: invalidate old TLB %d start " ADDRX
74d37793 3968 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
76a66253
JM
3969 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
3970 tlb_flush_page(env, page);
3971 }
74d37793 3972 tlb->size = booke_tlb_to_page_size((val >> 7) & 0x7);
c294fc58
JM
3973 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
3974 * If this ever occurs, one should use the ppcemb target instead
3975 * of the ppc or ppc64 one
3976 */
74d37793 3977 if ((val & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
71c8b8fd
JM
3978 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
3979 "are not supported (%d)\n",
74d37793 3980 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
c294fc58 3981 }
74d37793
AJ
3982 tlb->EPN = val & ~(tlb->size - 1);
3983 if (val & 0x40)
76a66253
JM
3984 tlb->prot |= PAGE_VALID;
3985 else
3986 tlb->prot &= ~PAGE_VALID;
74d37793 3987 if (val & 0x20) {
c294fc58
JM
3988 /* XXX: TO BE FIXED */
3989 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
3990 }
c55e9aef 3991 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
74d37793 3992 tlb->attr = val & 0xFF;
d12d51d5 3993 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
c55e9aef 3994 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
0e69805a 3995 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
c55e9aef
JM
3996 tlb->prot & PAGE_READ ? 'r' : '-',
3997 tlb->prot & PAGE_WRITE ? 'w' : '-',
3998 tlb->prot & PAGE_EXEC ? 'x' : '-',
3999 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
76a66253
JM
4000 /* Invalidate new TLB (if valid) */
4001 if (tlb->prot & PAGE_VALID) {
4002 end = tlb->EPN + tlb->size;
d12d51d5 4003 LOG_SWTLB("%s: invalidate TLB %d start " ADDRX
0e69805a 4004 " end " ADDRX "\n", __func__, (int)entry, tlb->EPN, end);
76a66253
JM
4005 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
4006 tlb_flush_page(env, page);
4007 }
76a66253
JM
4008}
4009
74d37793 4010void helper_4xx_tlbwe_lo (target_ulong entry, target_ulong val)
76a66253 4011{
a8dea12f 4012 ppcemb_tlb_t *tlb;
76a66253 4013
d12d51d5 4014 LOG_SWTLB("%s entry %i val " ADDRX "\n", __func__, (int)entry, val);
74d37793
AJ
4015 entry &= 0x3F;
4016 tlb = &env->tlb[entry].tlbe;
4017 tlb->RPN = val & 0xFFFFFC00;
76a66253 4018 tlb->prot = PAGE_READ;
74d37793 4019 if (val & 0x200)
76a66253 4020 tlb->prot |= PAGE_EXEC;
74d37793 4021 if (val & 0x100)
76a66253 4022 tlb->prot |= PAGE_WRITE;
d12d51d5 4023 LOG_SWTLB("%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
c55e9aef 4024 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
74d37793 4025 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
c55e9aef
JM
4026 tlb->prot & PAGE_READ ? 'r' : '-',
4027 tlb->prot & PAGE_WRITE ? 'w' : '-',
4028 tlb->prot & PAGE_EXEC ? 'x' : '-',
4029 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
76a66253 4030}
5eb7995e 4031
74d37793
AJ
4032target_ulong helper_4xx_tlbsx (target_ulong address)
4033{
4034 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
4035}
4036
a4bb6c3e 4037/* PowerPC 440 TLB management */
74d37793 4038void helper_440_tlbwe (uint32_t word, target_ulong entry, target_ulong value)
5eb7995e
JM
4039{
4040 ppcemb_tlb_t *tlb;
a4bb6c3e 4041 target_ulong EPN, RPN, size;
5eb7995e
JM
4042 int do_flush_tlbs;
4043
d12d51d5 4044 LOG_SWTLB("%s word %d entry %d value " ADDRX "\n",
0e69805a 4045 __func__, word, (int)entry, value);
5eb7995e 4046 do_flush_tlbs = 0;
74d37793
AJ
4047 entry &= 0x3F;
4048 tlb = &env->tlb[entry].tlbe;
a4bb6c3e
JM
4049 switch (word) {
4050 default:
4051 /* Just here to please gcc */
4052 case 0:
74d37793 4053 EPN = value & 0xFFFFFC00;
a4bb6c3e 4054 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
5eb7995e 4055 do_flush_tlbs = 1;
a4bb6c3e 4056 tlb->EPN = EPN;
74d37793 4057 size = booke_tlb_to_page_size((value >> 4) & 0xF);
a4bb6c3e
JM
4058 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
4059 do_flush_tlbs = 1;
4060 tlb->size = size;
4061 tlb->attr &= ~0x1;
74d37793
AJ
4062 tlb->attr |= (value >> 8) & 1;
4063 if (value & 0x200) {
a4bb6c3e
JM
4064 tlb->prot |= PAGE_VALID;
4065 } else {
4066 if (tlb->prot & PAGE_VALID) {
4067 tlb->prot &= ~PAGE_VALID;
4068 do_flush_tlbs = 1;
4069 }
5eb7995e 4070 }
a4bb6c3e
JM
4071 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
4072 if (do_flush_tlbs)
4073 tlb_flush(env, 1);
4074 break;
4075 case 1:
74d37793 4076 RPN = value & 0xFFFFFC0F;
a4bb6c3e
JM
4077 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
4078 tlb_flush(env, 1);
4079 tlb->RPN = RPN;
4080 break;
4081 case 2:
74d37793 4082 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
a4bb6c3e 4083 tlb->prot = tlb->prot & PAGE_VALID;
74d37793 4084 if (value & 0x1)
a4bb6c3e 4085 tlb->prot |= PAGE_READ << 4;
74d37793 4086 if (value & 0x2)
a4bb6c3e 4087 tlb->prot |= PAGE_WRITE << 4;
74d37793 4088 if (value & 0x4)
a4bb6c3e 4089 tlb->prot |= PAGE_EXEC << 4;
74d37793 4090 if (value & 0x8)
a4bb6c3e 4091 tlb->prot |= PAGE_READ;
74d37793 4092 if (value & 0x10)
a4bb6c3e 4093 tlb->prot |= PAGE_WRITE;
74d37793 4094 if (value & 0x20)
a4bb6c3e
JM
4095 tlb->prot |= PAGE_EXEC;
4096 break;
5eb7995e 4097 }
5eb7995e
JM
4098}
4099
74d37793 4100target_ulong helper_440_tlbre (uint32_t word, target_ulong entry)
5eb7995e
JM
4101{
4102 ppcemb_tlb_t *tlb;
74d37793 4103 target_ulong ret;
5eb7995e
JM
4104 int size;
4105
74d37793
AJ
4106 entry &= 0x3F;
4107 tlb = &env->tlb[entry].tlbe;
a4bb6c3e
JM
4108 switch (word) {
4109 default:
4110 /* Just here to please gcc */
4111 case 0:
74d37793 4112 ret = tlb->EPN;
a4bb6c3e
JM
4113 size = booke_page_size_to_tlb(tlb->size);
4114 if (size < 0 || size > 0xF)
4115 size = 1;
74d37793 4116 ret |= size << 4;
a4bb6c3e 4117 if (tlb->attr & 0x1)
74d37793 4118 ret |= 0x100;
a4bb6c3e 4119 if (tlb->prot & PAGE_VALID)
74d37793 4120 ret |= 0x200;
a4bb6c3e
JM
4121 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
4122 env->spr[SPR_440_MMUCR] |= tlb->PID;
4123 break;
4124 case 1:
74d37793 4125 ret = tlb->RPN;
a4bb6c3e
JM
4126 break;
4127 case 2:
74d37793 4128 ret = tlb->attr & ~0x1;
a4bb6c3e 4129 if (tlb->prot & (PAGE_READ << 4))
74d37793 4130 ret |= 0x1;
a4bb6c3e 4131 if (tlb->prot & (PAGE_WRITE << 4))
74d37793 4132 ret |= 0x2;
a4bb6c3e 4133 if (tlb->prot & (PAGE_EXEC << 4))
74d37793 4134 ret |= 0x4;
a4bb6c3e 4135 if (tlb->prot & PAGE_READ)
74d37793 4136 ret |= 0x8;
a4bb6c3e 4137 if (tlb->prot & PAGE_WRITE)
74d37793 4138 ret |= 0x10;
a4bb6c3e 4139 if (tlb->prot & PAGE_EXEC)
74d37793 4140 ret |= 0x20;
a4bb6c3e
JM
4141 break;
4142 }
74d37793 4143 return ret;
5eb7995e 4144}
74d37793
AJ
4145
4146target_ulong helper_440_tlbsx (target_ulong address)
4147{
4148 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
4149}
4150
76a66253 4151#endif /* !CONFIG_USER_ONLY */