2 * PowerPC integer and vector emulation helpers for QEMU.
4 * Copyright (c) 2003-2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/host-utils.h"
23 #include "helper_regs.h"
24 /*****************************************************************************/
25 /* Fixed point operations helpers */
26 #if defined(TARGET_PPC64)
28 uint64_t helper_mulldo(CPUPPCState
*env
, uint64_t arg1
, uint64_t arg2
)
33 muls64(&tl
, (uint64_t *)&th
, arg1
, arg2
);
34 /* If th != 0 && th != -1, then we had an overflow */
35 if (likely((uint64_t)(th
+ 1) <= 1)) {
38 env
->so
= env
->ov
= 1;
44 target_ulong
helper_cntlzw(target_ulong t
)
49 #if defined(TARGET_PPC64)
50 target_ulong
helper_cntlzd(target_ulong t
)
56 #if defined(TARGET_PPC64)
58 uint64_t helper_bpermd(uint64_t rs
, uint64_t rb
)
63 for (i
= 0; i
< 8; i
++) {
64 int index
= (rs
>> (i
*8)) & 0xFF;
66 if (rb
& (1ull << (63-index
))) {
76 target_ulong
helper_cmpb(target_ulong rs
, target_ulong rb
)
78 target_ulong mask
= 0xff;
82 for (i
= 0; i
< sizeof(target_ulong
); i
++) {
83 if ((rs
& mask
) == (rb
& mask
)) {
91 /* shift right arithmetic helper */
92 target_ulong
helper_sraw(CPUPPCState
*env
, target_ulong value
,
97 if (likely(!(shift
& 0x20))) {
98 if (likely((uint32_t)shift
!= 0)) {
100 ret
= (int32_t)value
>> shift
;
101 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
107 ret
= (int32_t)value
;
111 ret
= (int32_t)value
>> 31;
112 env
->ca
= (ret
!= 0);
114 return (target_long
)ret
;
117 #if defined(TARGET_PPC64)
118 target_ulong
helper_srad(CPUPPCState
*env
, target_ulong value
,
123 if (likely(!(shift
& 0x40))) {
124 if (likely((uint64_t)shift
!= 0)) {
126 ret
= (int64_t)value
>> shift
;
127 if (likely(ret
>= 0 || (value
& ((1 << shift
) - 1)) == 0)) {
133 ret
= (int64_t)value
;
137 ret
= (int64_t)value
>> 63;
138 env
->ca
= (ret
!= 0);
144 #if defined(TARGET_PPC64)
145 target_ulong
helper_popcntb(target_ulong val
)
147 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) &
148 0x5555555555555555ULL
);
149 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) &
150 0x3333333333333333ULL
);
151 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) &
152 0x0f0f0f0f0f0f0f0fULL
);
156 target_ulong
helper_popcntw(target_ulong val
)
158 val
= (val
& 0x5555555555555555ULL
) + ((val
>> 1) &
159 0x5555555555555555ULL
);
160 val
= (val
& 0x3333333333333333ULL
) + ((val
>> 2) &
161 0x3333333333333333ULL
);
162 val
= (val
& 0x0f0f0f0f0f0f0f0fULL
) + ((val
>> 4) &
163 0x0f0f0f0f0f0f0f0fULL
);
164 val
= (val
& 0x00ff00ff00ff00ffULL
) + ((val
>> 8) &
165 0x00ff00ff00ff00ffULL
);
166 val
= (val
& 0x0000ffff0000ffffULL
) + ((val
>> 16) &
167 0x0000ffff0000ffffULL
);
171 target_ulong
helper_popcntd(target_ulong val
)
176 target_ulong
helper_popcntb(target_ulong val
)
178 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
179 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
180 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
184 target_ulong
helper_popcntw(target_ulong val
)
186 val
= (val
& 0x55555555) + ((val
>> 1) & 0x55555555);
187 val
= (val
& 0x33333333) + ((val
>> 2) & 0x33333333);
188 val
= (val
& 0x0f0f0f0f) + ((val
>> 4) & 0x0f0f0f0f);
189 val
= (val
& 0x00ff00ff) + ((val
>> 8) & 0x00ff00ff);
190 val
= (val
& 0x0000ffff) + ((val
>> 16) & 0x0000ffff);
195 /*****************************************************************************/
196 /* PowerPC 601 specific instructions (POWER bridge) */
197 target_ulong
helper_div(CPUPPCState
*env
, target_ulong arg1
, target_ulong arg2
)
199 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
201 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
202 (int32_t)arg2
== 0) {
203 env
->spr
[SPR_MQ
] = 0;
206 env
->spr
[SPR_MQ
] = tmp
% arg2
;
207 return tmp
/ (int32_t)arg2
;
211 target_ulong
helper_divo(CPUPPCState
*env
, target_ulong arg1
,
214 uint64_t tmp
= (uint64_t)arg1
<< 32 | env
->spr
[SPR_MQ
];
216 if (((int32_t)tmp
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
217 (int32_t)arg2
== 0) {
218 env
->so
= env
->ov
= 1;
219 env
->spr
[SPR_MQ
] = 0;
222 env
->spr
[SPR_MQ
] = tmp
% arg2
;
223 tmp
/= (int32_t)arg2
;
224 if ((int32_t)tmp
!= tmp
) {
225 env
->so
= env
->ov
= 1;
233 target_ulong
helper_divs(CPUPPCState
*env
, target_ulong arg1
,
236 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
237 (int32_t)arg2
== 0) {
238 env
->spr
[SPR_MQ
] = 0;
241 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
242 return (int32_t)arg1
/ (int32_t)arg2
;
246 target_ulong
helper_divso(CPUPPCState
*env
, target_ulong arg1
,
249 if (((int32_t)arg1
== INT32_MIN
&& (int32_t)arg2
== (int32_t)-1) ||
250 (int32_t)arg2
== 0) {
251 env
->so
= env
->ov
= 1;
252 env
->spr
[SPR_MQ
] = 0;
256 env
->spr
[SPR_MQ
] = (int32_t)arg1
% (int32_t)arg2
;
257 return (int32_t)arg1
/ (int32_t)arg2
;
261 /*****************************************************************************/
262 /* 602 specific instructions */
263 /* mfrom is the most crazy instruction ever seen, imho ! */
264 /* Real implementation uses a ROM table. Do the same */
265 /* Extremely decomposed:
267 * return 256 * log10(10 + 1.0) + 0.5
269 #if !defined(CONFIG_USER_ONLY)
270 target_ulong
helper_602_mfrom(target_ulong arg
)
272 if (likely(arg
< 602)) {
273 #include "mfrom_table.c"
274 return mfrom_ROM_table
[arg
];
281 /*****************************************************************************/
282 /* Altivec extension helpers */
283 #if defined(HOST_WORDS_BIGENDIAN)
291 #if defined(HOST_WORDS_BIGENDIAN)
292 #define VECTOR_FOR_INORDER_I(index, element) \
293 for (index = 0; index < ARRAY_SIZE(r->element); index++)
295 #define VECTOR_FOR_INORDER_I(index, element) \
296 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
299 /* Saturating arithmetic helpers. */
300 #define SATCVT(from, to, from_type, to_type, min, max) \
301 static inline to_type cvt##from##to(from_type x, int *sat) \
305 if (x < (from_type)min) { \
308 } else if (x > (from_type)max) { \
316 #define SATCVTU(from, to, from_type, to_type, min, max) \
317 static inline to_type cvt##from##to(from_type x, int *sat) \
321 if (x > (from_type)max) { \
329 SATCVT(sh
, sb
, int16_t, int8_t, INT8_MIN
, INT8_MAX
)
330 SATCVT(sw
, sh
, int32_t, int16_t, INT16_MIN
, INT16_MAX
)
331 SATCVT(sd
, sw
, int64_t, int32_t, INT32_MIN
, INT32_MAX
)
333 SATCVTU(uh
, ub
, uint16_t, uint8_t, 0, UINT8_MAX
)
334 SATCVTU(uw
, uh
, uint32_t, uint16_t, 0, UINT16_MAX
)
335 SATCVTU(ud
, uw
, uint64_t, uint32_t, 0, UINT32_MAX
)
336 SATCVT(sh
, ub
, int16_t, uint8_t, 0, UINT8_MAX
)
337 SATCVT(sw
, uh
, int32_t, uint16_t, 0, UINT16_MAX
)
338 SATCVT(sd
, uw
, int64_t, uint32_t, 0, UINT32_MAX
)
342 void helper_lvsl(ppc_avr_t
*r
, target_ulong sh
)
344 int i
, j
= (sh
& 0xf);
346 VECTOR_FOR_INORDER_I(i
, u8
) {
351 void helper_lvsr(ppc_avr_t
*r
, target_ulong sh
)
353 int i
, j
= 0x10 - (sh
& 0xf);
355 VECTOR_FOR_INORDER_I(i
, u8
) {
360 void helper_mtvscr(CPUPPCState
*env
, ppc_avr_t
*r
)
362 #if defined(HOST_WORDS_BIGENDIAN)
363 env
->vscr
= r
->u32
[3];
365 env
->vscr
= r
->u32
[0];
367 set_flush_to_zero(vscr_nj
, &env
->vec_status
);
370 void helper_vaddcuw(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
374 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
375 r
->u32
[i
] = ~a
->u32
[i
] < b
->u32
[i
];
379 #define VARITH_DO(name, op, element) \
380 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
384 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
385 r->element[i] = a->element[i] op b->element[i]; \
388 #define VARITH(suffix, element) \
389 VARITH_DO(add##suffix, +, element) \
390 VARITH_DO(sub##suffix, -, element)
397 #define VARITHFP(suffix, func) \
398 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
403 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
404 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
407 VARITHFP(addfp
, float32_add
)
408 VARITHFP(subfp
, float32_sub
)
409 VARITHFP(minfp
, float32_min
)
410 VARITHFP(maxfp
, float32_max
)
413 #define VARITHFPFMA(suffix, type) \
414 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
415 ppc_avr_t *b, ppc_avr_t *c) \
418 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
419 r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
420 type, &env->vec_status); \
423 VARITHFPFMA(maddfp
, 0);
424 VARITHFPFMA(nmsubfp
, float_muladd_negate_result
| float_muladd_negate_c
);
427 #define VARITHSAT_CASE(type, op, cvt, element) \
429 type result = (type)a->element[i] op (type)b->element[i]; \
430 r->element[i] = cvt(result, &sat); \
433 #define VARITHSAT_DO(name, op, optype, cvt, element) \
434 void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
440 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
441 switch (sizeof(r->element[0])) { \
443 VARITHSAT_CASE(optype, op, cvt, element); \
446 VARITHSAT_CASE(optype, op, cvt, element); \
449 VARITHSAT_CASE(optype, op, cvt, element); \
454 env->vscr |= (1 << VSCR_SAT); \
457 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
458 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
459 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
460 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
461 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
462 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
463 VARITHSAT_SIGNED(b
, s8
, int16_t, cvtshsb
)
464 VARITHSAT_SIGNED(h
, s16
, int32_t, cvtswsh
)
465 VARITHSAT_SIGNED(w
, s32
, int64_t, cvtsdsw
)
466 VARITHSAT_UNSIGNED(b
, u8
, uint16_t, cvtshub
)
467 VARITHSAT_UNSIGNED(h
, u16
, uint32_t, cvtswuh
)
468 VARITHSAT_UNSIGNED(w
, u32
, uint64_t, cvtsduw
)
469 #undef VARITHSAT_CASE
471 #undef VARITHSAT_SIGNED
472 #undef VARITHSAT_UNSIGNED
474 #define VAVG_DO(name, element, etype) \
475 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
479 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
480 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
481 r->element[i] = x >> 1; \
485 #define VAVG(type, signed_element, signed_type, unsigned_element, \
487 VAVG_DO(avgs##type, signed_element, signed_type) \
488 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
489 VAVG(b
, s8
, int16_t, u8
, uint16_t)
490 VAVG(h
, s16
, int32_t, u16
, uint32_t)
491 VAVG(w
, s32
, int64_t, u32
, uint64_t)
495 #define VCF(suffix, cvt, element) \
496 void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
497 ppc_avr_t *b, uint32_t uim) \
501 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
502 float32 t = cvt(b->element[i], &env->vec_status); \
503 r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
506 VCF(ux
, uint32_to_float32
, u32
)
507 VCF(sx
, int32_to_float32
, s32
)
510 #define VCMP_DO(suffix, compare, element, record) \
511 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
512 ppc_avr_t *a, ppc_avr_t *b) \
514 uint32_t ones = (uint32_t)-1; \
515 uint32_t all = ones; \
519 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
520 uint32_t result = (a->element[i] compare b->element[i] ? \
522 switch (sizeof(a->element[0])) { \
524 r->u32[i] = result; \
527 r->u16[i] = result; \
537 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
540 #define VCMP(suffix, compare, element) \
541 VCMP_DO(suffix, compare, element, 0) \
542 VCMP_DO(suffix##_dot, compare, element, 1)
555 #define VCMPFP_DO(suffix, compare, order, record) \
556 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
557 ppc_avr_t *a, ppc_avr_t *b) \
559 uint32_t ones = (uint32_t)-1; \
560 uint32_t all = ones; \
564 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
566 int rel = float32_compare_quiet(a->f[i], b->f[i], \
568 if (rel == float_relation_unordered) { \
570 } else if (rel compare order) { \
575 r->u32[i] = result; \
580 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
583 #define VCMPFP(suffix, compare, order) \
584 VCMPFP_DO(suffix, compare, order, 0) \
585 VCMPFP_DO(suffix##_dot, compare, order, 1)
586 VCMPFP(eqfp
, ==, float_relation_equal
)
587 VCMPFP(gefp
, !=, float_relation_less
)
588 VCMPFP(gtfp
, ==, float_relation_greater
)
592 static inline void vcmpbfp_internal(CPUPPCState
*env
, ppc_avr_t
*r
,
593 ppc_avr_t
*a
, ppc_avr_t
*b
, int record
)
598 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
599 int le_rel
= float32_compare_quiet(a
->f
[i
], b
->f
[i
], &env
->vec_status
);
600 if (le_rel
== float_relation_unordered
) {
601 r
->u32
[i
] = 0xc0000000;
602 /* ALL_IN does not need to be updated here. */
604 float32 bneg
= float32_chs(b
->f
[i
]);
605 int ge_rel
= float32_compare_quiet(a
->f
[i
], bneg
, &env
->vec_status
);
606 int le
= le_rel
!= float_relation_greater
;
607 int ge
= ge_rel
!= float_relation_less
;
609 r
->u32
[i
] = ((!le
) << 31) | ((!ge
) << 30);
610 all_in
|= (!le
| !ge
);
614 env
->crf
[6] = (all_in
== 0) << 1;
618 void helper_vcmpbfp(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
620 vcmpbfp_internal(env
, r
, a
, b
, 0);
623 void helper_vcmpbfp_dot(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
626 vcmpbfp_internal(env
, r
, a
, b
, 1);
629 #define VCT(suffix, satcvt, element) \
630 void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
631 ppc_avr_t *b, uint32_t uim) \
635 float_status s = env->vec_status; \
637 set_float_rounding_mode(float_round_to_zero, &s); \
638 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
639 if (float32_is_any_nan(b->f[i])) { \
642 float64 t = float32_to_float64(b->f[i], &s); \
645 t = float64_scalbn(t, uim, &s); \
646 j = float64_to_int64(t, &s); \
647 r->element[i] = satcvt(j, &sat); \
651 env->vscr |= (1 << VSCR_SAT); \
654 VCT(uxs
, cvtsduw
, u32
)
655 VCT(sxs
, cvtsdsw
, s32
)
658 void helper_vmhaddshs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
659 ppc_avr_t
*b
, ppc_avr_t
*c
)
664 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
665 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
666 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
668 r
->s16
[i
] = cvtswsh(t
, &sat
);
672 env
->vscr
|= (1 << VSCR_SAT
);
676 void helper_vmhraddshs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
677 ppc_avr_t
*b
, ppc_avr_t
*c
)
682 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
683 int32_t prod
= a
->s16
[i
] * b
->s16
[i
] + 0x00004000;
684 int32_t t
= (int32_t)c
->s16
[i
] + (prod
>> 15);
685 r
->s16
[i
] = cvtswsh(t
, &sat
);
689 env
->vscr
|= (1 << VSCR_SAT
);
693 #define VMINMAX_DO(name, compare, element) \
694 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
698 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
699 if (a->element[i] compare b->element[i]) { \
700 r->element[i] = b->element[i]; \
702 r->element[i] = a->element[i]; \
706 #define VMINMAX(suffix, element) \
707 VMINMAX_DO(min##suffix, >, element) \
708 VMINMAX_DO(max##suffix, <, element)
718 void helper_vmladduhm(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, ppc_avr_t
*c
)
722 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
723 int32_t prod
= a
->s16
[i
] * b
->s16
[i
];
724 r
->s16
[i
] = (int16_t) (prod
+ c
->s16
[i
]);
728 #define VMRG_DO(name, element, highp) \
729 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
733 size_t n_elems = ARRAY_SIZE(r->element); \
735 for (i = 0; i < n_elems / 2; i++) { \
737 result.element[i*2+HI_IDX] = a->element[i]; \
738 result.element[i*2+LO_IDX] = b->element[i]; \
740 result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
741 b->element[n_elems - i - 1]; \
742 result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
743 a->element[n_elems - i - 1]; \
748 #if defined(HOST_WORDS_BIGENDIAN)
755 #define VMRG(suffix, element) \
756 VMRG_DO(mrgl##suffix, element, MRGHI) \
757 VMRG_DO(mrgh##suffix, element, MRGLO)
766 void helper_vmsummbm(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
767 ppc_avr_t
*b
, ppc_avr_t
*c
)
772 for (i
= 0; i
< ARRAY_SIZE(r
->s8
); i
++) {
773 prod
[i
] = (int32_t)a
->s8
[i
] * b
->u8
[i
];
776 VECTOR_FOR_INORDER_I(i
, s32
) {
777 r
->s32
[i
] = c
->s32
[i
] + prod
[4 * i
] + prod
[4 * i
+ 1] +
778 prod
[4 * i
+ 2] + prod
[4 * i
+ 3];
782 void helper_vmsumshm(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
783 ppc_avr_t
*b
, ppc_avr_t
*c
)
788 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
789 prod
[i
] = a
->s16
[i
] * b
->s16
[i
];
792 VECTOR_FOR_INORDER_I(i
, s32
) {
793 r
->s32
[i
] = c
->s32
[i
] + prod
[2 * i
] + prod
[2 * i
+ 1];
797 void helper_vmsumshs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
798 ppc_avr_t
*b
, ppc_avr_t
*c
)
804 for (i
= 0; i
< ARRAY_SIZE(r
->s16
); i
++) {
805 prod
[i
] = (int32_t)a
->s16
[i
] * b
->s16
[i
];
808 VECTOR_FOR_INORDER_I(i
, s32
) {
809 int64_t t
= (int64_t)c
->s32
[i
] + prod
[2 * i
] + prod
[2 * i
+ 1];
811 r
->u32
[i
] = cvtsdsw(t
, &sat
);
815 env
->vscr
|= (1 << VSCR_SAT
);
819 void helper_vmsumubm(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
820 ppc_avr_t
*b
, ppc_avr_t
*c
)
825 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
826 prod
[i
] = a
->u8
[i
] * b
->u8
[i
];
829 VECTOR_FOR_INORDER_I(i
, u32
) {
830 r
->u32
[i
] = c
->u32
[i
] + prod
[4 * i
] + prod
[4 * i
+ 1] +
831 prod
[4 * i
+ 2] + prod
[4 * i
+ 3];
835 void helper_vmsumuhm(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
836 ppc_avr_t
*b
, ppc_avr_t
*c
)
841 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
842 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
845 VECTOR_FOR_INORDER_I(i
, u32
) {
846 r
->u32
[i
] = c
->u32
[i
] + prod
[2 * i
] + prod
[2 * i
+ 1];
850 void helper_vmsumuhs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
,
851 ppc_avr_t
*b
, ppc_avr_t
*c
)
857 for (i
= 0; i
< ARRAY_SIZE(r
->u16
); i
++) {
858 prod
[i
] = a
->u16
[i
] * b
->u16
[i
];
861 VECTOR_FOR_INORDER_I(i
, s32
) {
862 uint64_t t
= (uint64_t)c
->u32
[i
] + prod
[2 * i
] + prod
[2 * i
+ 1];
864 r
->u32
[i
] = cvtuduw(t
, &sat
);
868 env
->vscr
|= (1 << VSCR_SAT
);
872 #define VMUL_DO(name, mul_element, prod_element, evenp) \
873 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
877 VECTOR_FOR_INORDER_I(i, prod_element) { \
879 r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \
880 b->mul_element[i * 2 + HI_IDX]; \
882 r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \
883 b->mul_element[i * 2 + LO_IDX]; \
887 #define VMUL(suffix, mul_element, prod_element) \
888 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
889 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
897 void helper_vperm(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
,
903 VECTOR_FOR_INORDER_I(i
, u8
) {
904 int s
= c
->u8
[i
] & 0x1f;
905 #if defined(HOST_WORDS_BIGENDIAN)
908 int index
= 15 - (s
& 0xf);
912 result
.u8
[i
] = b
->u8
[index
];
914 result
.u8
[i
] = a
->u8
[index
];
920 #if defined(HOST_WORDS_BIGENDIAN)
925 void helper_vpkpx(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
929 #if defined(HOST_WORDS_BIGENDIAN)
930 const ppc_avr_t
*x
[2] = { a
, b
};
932 const ppc_avr_t
*x
[2] = { b
, a
};
935 VECTOR_FOR_INORDER_I(i
, u64
) {
936 VECTOR_FOR_INORDER_I(j
, u32
) {
937 uint32_t e
= x
[i
]->u32
[j
];
939 result
.u16
[4*i
+j
] = (((e
>> 9) & 0xfc00) |
947 #define VPK(suffix, from, to, cvt, dosat) \
948 void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
949 ppc_avr_t *a, ppc_avr_t *b) \
954 ppc_avr_t *a0 = PKBIG ? a : b; \
955 ppc_avr_t *a1 = PKBIG ? b : a; \
957 VECTOR_FOR_INORDER_I(i, from) { \
958 result.to[i] = cvt(a0->from[i], &sat); \
959 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
962 if (dosat && sat) { \
963 env->vscr |= (1 << VSCR_SAT); \
967 VPK(shss
, s16
, s8
, cvtshsb
, 1)
968 VPK(shus
, s16
, u8
, cvtshub
, 1)
969 VPK(swss
, s32
, s16
, cvtswsh
, 1)
970 VPK(swus
, s32
, u16
, cvtswuh
, 1)
971 VPK(uhus
, u16
, u8
, cvtuhub
, 1)
972 VPK(uwus
, u32
, u16
, cvtuwuh
, 1)
973 VPK(uhum
, u16
, u8
, I
, 0)
974 VPK(uwum
, u32
, u16
, I
, 0)
979 void helper_vrefp(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*b
)
983 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
984 r
->f
[i
] = float32_div(float32_one
, b
->f
[i
], &env
->vec_status
);
988 #define VRFI(suffix, rounding) \
989 void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
993 float_status s = env->vec_status; \
995 set_float_rounding_mode(rounding, &s); \
996 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
997 r->f[i] = float32_round_to_int (b->f[i], &s); \
1000 VRFI(n
, float_round_nearest_even
)
1001 VRFI(m
, float_round_down
)
1002 VRFI(p
, float_round_up
)
1003 VRFI(z
, float_round_to_zero
)
1006 #define VROTATE(suffix, element) \
1007 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1011 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1012 unsigned int mask = ((1 << \
1013 (3 + (sizeof(a->element[0]) >> 1))) \
1015 unsigned int shift = b->element[i] & mask; \
1016 r->element[i] = (a->element[i] << shift) | \
1017 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
1025 void helper_vrsqrtefp(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*b
)
1029 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
1030 float32 t
= float32_sqrt(b
->f
[i
], &env
->vec_status
);
1032 r
->f
[i
] = float32_div(float32_one
, t
, &env
->vec_status
);
1036 void helper_vsel(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
,
1039 r
->u64
[0] = (a
->u64
[0] & ~c
->u64
[0]) | (b
->u64
[0] & c
->u64
[0]);
1040 r
->u64
[1] = (a
->u64
[1] & ~c
->u64
[1]) | (b
->u64
[1] & c
->u64
[1]);
1043 void helper_vexptefp(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*b
)
1047 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
1048 r
->f
[i
] = float32_exp2(b
->f
[i
], &env
->vec_status
);
1052 void helper_vlogefp(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*b
)
1056 for (i
= 0; i
< ARRAY_SIZE(r
->f
); i
++) {
1057 r
->f
[i
] = float32_log2(b
->f
[i
], &env
->vec_status
);
1061 #if defined(HOST_WORDS_BIGENDIAN)
1068 /* The specification says that the results are undefined if all of the
1069 * shift counts are not identical. We check to make sure that they are
1070 * to conform to what real hardware appears to do. */
1071 #define VSHIFT(suffix, leftp) \
1072 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1074 int shift = b->u8[LO_IDX*15] & 0x7; \
1078 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
1079 doit = doit && ((b->u8[i] & 0x7) == shift); \
1084 } else if (leftp) { \
1085 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
1087 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
1088 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
1090 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
1092 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
1093 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
1103 #define VSL(suffix, element) \
1104 void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1108 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1109 unsigned int mask = ((1 << \
1110 (3 + (sizeof(a->element[0]) >> 1))) \
1112 unsigned int shift = b->element[i] & mask; \
1114 r->element[i] = a->element[i] << shift; \
1122 void helper_vsldoi(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
, uint32_t shift
)
1124 int sh
= shift
& 0xf;
1128 #if defined(HOST_WORDS_BIGENDIAN)
1129 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
1132 result
.u8
[i
] = b
->u8
[index
- 0x10];
1134 result
.u8
[i
] = a
->u8
[index
];
1138 for (i
= 0; i
< ARRAY_SIZE(r
->u8
); i
++) {
1139 int index
= (16 - sh
) + i
;
1141 result
.u8
[i
] = a
->u8
[index
- 0x10];
1143 result
.u8
[i
] = b
->u8
[index
];
1150 void helper_vslo(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1152 int sh
= (b
->u8
[LO_IDX
*0xf] >> 3) & 0xf;
1154 #if defined(HOST_WORDS_BIGENDIAN)
1155 memmove(&r
->u8
[0], &a
->u8
[sh
], 16 - sh
);
1156 memset(&r
->u8
[16-sh
], 0, sh
);
1158 memmove(&r
->u8
[sh
], &a
->u8
[0], 16 - sh
);
1159 memset(&r
->u8
[0], 0, sh
);
1163 /* Experimental testing shows that hardware masks the immediate. */
1164 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
1165 #if defined(HOST_WORDS_BIGENDIAN)
1166 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
1168 #define SPLAT_ELEMENT(element) \
1169 (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
1171 #define VSPLT(suffix, element) \
1172 void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
1174 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
1177 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1178 r->element[i] = s; \
1185 #undef SPLAT_ELEMENT
1186 #undef _SPLAT_MASKED
1188 #define VSPLTI(suffix, element, splat_type) \
1189 void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
1191 splat_type x = (int8_t)(splat << 3) >> 3; \
1194 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1195 r->element[i] = x; \
1198 VSPLTI(b
, s8
, int8_t)
1199 VSPLTI(h
, s16
, int16_t)
1200 VSPLTI(w
, s32
, int32_t)
1203 #define VSR(suffix, element) \
1204 void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1208 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1209 unsigned int mask = ((1 << \
1210 (3 + (sizeof(a->element[0]) >> 1))) \
1212 unsigned int shift = b->element[i] & mask; \
1214 r->element[i] = a->element[i] >> shift; \
1225 void helper_vsro(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1227 int sh
= (b
->u8
[LO_IDX
* 0xf] >> 3) & 0xf;
1229 #if defined(HOST_WORDS_BIGENDIAN)
1230 memmove(&r
->u8
[sh
], &a
->u8
[0], 16 - sh
);
1231 memset(&r
->u8
[0], 0, sh
);
1233 memmove(&r
->u8
[0], &a
->u8
[sh
], 16 - sh
);
1234 memset(&r
->u8
[16 - sh
], 0, sh
);
1238 void helper_vsubcuw(ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1242 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
1243 r
->u32
[i
] = a
->u32
[i
] >= b
->u32
[i
];
1247 void helper_vsumsws(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1254 #if defined(HOST_WORDS_BIGENDIAN)
1255 upper
= ARRAY_SIZE(r
->s32
)-1;
1259 t
= (int64_t)b
->s32
[upper
];
1260 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
1264 result
.s32
[upper
] = cvtsdsw(t
, &sat
);
1268 env
->vscr
|= (1 << VSCR_SAT
);
1272 void helper_vsum2sws(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1278 #if defined(HOST_WORDS_BIGENDIAN)
1283 for (i
= 0; i
< ARRAY_SIZE(r
->u64
); i
++) {
1284 int64_t t
= (int64_t)b
->s32
[upper
+ i
* 2];
1287 for (j
= 0; j
< ARRAY_SIZE(r
->u64
); j
++) {
1288 t
+= a
->s32
[2 * i
+ j
];
1290 result
.s32
[upper
+ i
* 2] = cvtsdsw(t
, &sat
);
1295 env
->vscr
|= (1 << VSCR_SAT
);
1299 void helper_vsum4sbs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1304 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
1305 int64_t t
= (int64_t)b
->s32
[i
];
1307 for (j
= 0; j
< ARRAY_SIZE(r
->s32
); j
++) {
1308 t
+= a
->s8
[4 * i
+ j
];
1310 r
->s32
[i
] = cvtsdsw(t
, &sat
);
1314 env
->vscr
|= (1 << VSCR_SAT
);
1318 void helper_vsum4shs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1323 for (i
= 0; i
< ARRAY_SIZE(r
->s32
); i
++) {
1324 int64_t t
= (int64_t)b
->s32
[i
];
1326 t
+= a
->s16
[2 * i
] + a
->s16
[2 * i
+ 1];
1327 r
->s32
[i
] = cvtsdsw(t
, &sat
);
1331 env
->vscr
|= (1 << VSCR_SAT
);
1335 void helper_vsum4ubs(CPUPPCState
*env
, ppc_avr_t
*r
, ppc_avr_t
*a
, ppc_avr_t
*b
)
1340 for (i
= 0; i
< ARRAY_SIZE(r
->u32
); i
++) {
1341 uint64_t t
= (uint64_t)b
->u32
[i
];
1343 for (j
= 0; j
< ARRAY_SIZE(r
->u32
); j
++) {
1344 t
+= a
->u8
[4 * i
+ j
];
1346 r
->u32
[i
] = cvtuduw(t
, &sat
);
1350 env
->vscr
|= (1 << VSCR_SAT
);
1354 #if defined(HOST_WORDS_BIGENDIAN)
1361 #define VUPKPX(suffix, hi) \
1362 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1367 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
1368 uint16_t e = b->u16[hi ? i : i+4]; \
1369 uint8_t a = (e >> 15) ? 0xff : 0; \
1370 uint8_t r = (e >> 10) & 0x1f; \
1371 uint8_t g = (e >> 5) & 0x1f; \
1372 uint8_t b = e & 0x1f; \
1374 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
1382 #define VUPK(suffix, unpacked, packee, hi) \
1383 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1389 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
1390 result.unpacked[i] = b->packee[i]; \
1393 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
1395 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
1400 VUPK(hsb
, s16
, s8
, UPKHI
)
1401 VUPK(hsh
, s32
, s16
, UPKHI
)
1402 VUPK(lsb
, s16
, s8
, UPKLO
)
1403 VUPK(lsh
, s32
, s16
, UPKLO
)
1408 #undef VECTOR_FOR_INORDER_I
1412 /*****************************************************************************/
1413 /* SPE extension helpers */
1414 /* Use a table to make this quicker */
1415 static const uint8_t hbrev
[16] = {
1416 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1417 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1420 static inline uint8_t byte_reverse(uint8_t val
)
1422 return hbrev
[val
>> 4] | (hbrev
[val
& 0xF] << 4);
1425 static inline uint32_t word_reverse(uint32_t val
)
1427 return byte_reverse(val
>> 24) | (byte_reverse(val
>> 16) << 8) |
1428 (byte_reverse(val
>> 8) << 16) | (byte_reverse(val
) << 24);
1431 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
1432 target_ulong
helper_brinc(target_ulong arg1
, target_ulong arg2
)
1434 uint32_t a
, b
, d
, mask
;
1436 mask
= UINT32_MAX
>> (32 - MASKBITS
);
1439 d
= word_reverse(1 + word_reverse(a
| ~b
));
1440 return (arg1
& ~mask
) | (d
& b
);
1443 uint32_t helper_cntlsw32(uint32_t val
)
1445 if (val
& 0x80000000) {
1452 uint32_t helper_cntlzw32(uint32_t val
)
1458 target_ulong
helper_dlmzb(CPUPPCState
*env
, target_ulong high
,
1459 target_ulong low
, uint32_t update_Rc
)
1465 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1466 if ((high
& mask
) == 0) {
1474 for (mask
= 0xFF000000; mask
!= 0; mask
= mask
>> 8) {
1475 if ((low
& mask
) == 0) {
1487 env
->xer
= (env
->xer
& ~0x7F) | i
;
1489 env
->crf
[0] |= xer_so
;