]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/int_helper.c
target-ppc: Altivec 2.07: Add/Subtract Unsigned Doubleword Modulo
[mirror_qemu.git] / target-ppc / int_helper.c
1 /*
2 * PowerPC integer and vector emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "cpu.h"
20 #include "qemu/host-utils.h"
21 #include "helper.h"
22
23 #include "helper_regs.h"
24 /*****************************************************************************/
25 /* Fixed point operations helpers */
26 #if defined(TARGET_PPC64)
27
28 uint64_t helper_mulldo(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
29 {
30 int64_t th;
31 uint64_t tl;
32
33 muls64(&tl, (uint64_t *)&th, arg1, arg2);
34 /* If th != 0 && th != -1, then we had an overflow */
35 if (likely((uint64_t)(th + 1) <= 1)) {
36 env->ov = 0;
37 } else {
38 env->so = env->ov = 1;
39 }
40 return (int64_t)tl;
41 }
42 #endif
43
44 target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
45 uint32_t oe)
46 {
47 uint64_t rt = 0;
48 int overflow = 0;
49
50 uint64_t dividend = (uint64_t)ra << 32;
51 uint64_t divisor = (uint32_t)rb;
52
53 if (unlikely(divisor == 0)) {
54 overflow = 1;
55 } else {
56 rt = dividend / divisor;
57 overflow = rt > UINT32_MAX;
58 }
59
60 if (unlikely(overflow)) {
61 rt = 0; /* Undefined */
62 }
63
64 if (oe) {
65 if (unlikely(overflow)) {
66 env->so = env->ov = 1;
67 } else {
68 env->ov = 0;
69 }
70 }
71
72 return (target_ulong)rt;
73 }
74
75 target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
76 uint32_t oe)
77 {
78 int64_t rt = 0;
79 int overflow = 0;
80
81 int64_t dividend = (int64_t)ra << 32;
82 int64_t divisor = (int64_t)((int32_t)rb);
83
84 if (unlikely((divisor == 0) ||
85 ((divisor == -1ull) && (dividend == INT64_MIN)))) {
86 overflow = 1;
87 } else {
88 rt = dividend / divisor;
89 overflow = rt != (int32_t)rt;
90 }
91
92 if (unlikely(overflow)) {
93 rt = 0; /* Undefined */
94 }
95
96 if (oe) {
97 if (unlikely(overflow)) {
98 env->so = env->ov = 1;
99 } else {
100 env->ov = 0;
101 }
102 }
103
104 return (target_ulong)rt;
105 }
106
107 #if defined(TARGET_PPC64)
108
109 uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
110 {
111 uint64_t rt = 0;
112 int overflow = 0;
113
114 overflow = divu128(&rt, &ra, rb);
115
116 if (unlikely(overflow)) {
117 rt = 0; /* Undefined */
118 }
119
120 if (oe) {
121 if (unlikely(overflow)) {
122 env->so = env->ov = 1;
123 } else {
124 env->ov = 0;
125 }
126 }
127
128 return rt;
129 }
130
131 uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
132 {
133 int64_t rt = 0;
134 int64_t ra = (int64_t)rau;
135 int64_t rb = (int64_t)rbu;
136 int overflow = divs128(&rt, &ra, rb);
137
138 if (unlikely(overflow)) {
139 rt = 0; /* Undefined */
140 }
141
142 if (oe) {
143
144 if (unlikely(overflow)) {
145 env->so = env->ov = 1;
146 } else {
147 env->ov = 0;
148 }
149 }
150
151 return rt;
152 }
153
154 #endif
155
156
157 target_ulong helper_cntlzw(target_ulong t)
158 {
159 return clz32(t);
160 }
161
162 #if defined(TARGET_PPC64)
163 target_ulong helper_cntlzd(target_ulong t)
164 {
165 return clz64(t);
166 }
167 #endif
168
169 #if defined(TARGET_PPC64)
170
171 uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
172 {
173 int i;
174 uint64_t ra = 0;
175
176 for (i = 0; i < 8; i++) {
177 int index = (rs >> (i*8)) & 0xFF;
178 if (index < 64) {
179 if (rb & (1ull << (63-index))) {
180 ra |= 1 << i;
181 }
182 }
183 }
184 return ra;
185 }
186
187 #endif
188
189 target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
190 {
191 target_ulong mask = 0xff;
192 target_ulong ra = 0;
193 int i;
194
195 for (i = 0; i < sizeof(target_ulong); i++) {
196 if ((rs & mask) == (rb & mask)) {
197 ra |= mask;
198 }
199 mask <<= 8;
200 }
201 return ra;
202 }
203
204 /* shift right arithmetic helper */
205 target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
206 target_ulong shift)
207 {
208 int32_t ret;
209
210 if (likely(!(shift & 0x20))) {
211 if (likely((uint32_t)shift != 0)) {
212 shift &= 0x1f;
213 ret = (int32_t)value >> shift;
214 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
215 env->ca = 0;
216 } else {
217 env->ca = 1;
218 }
219 } else {
220 ret = (int32_t)value;
221 env->ca = 0;
222 }
223 } else {
224 ret = (int32_t)value >> 31;
225 env->ca = (ret != 0);
226 }
227 return (target_long)ret;
228 }
229
230 #if defined(TARGET_PPC64)
231 target_ulong helper_srad(CPUPPCState *env, target_ulong value,
232 target_ulong shift)
233 {
234 int64_t ret;
235
236 if (likely(!(shift & 0x40))) {
237 if (likely((uint64_t)shift != 0)) {
238 shift &= 0x3f;
239 ret = (int64_t)value >> shift;
240 if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
241 env->ca = 0;
242 } else {
243 env->ca = 1;
244 }
245 } else {
246 ret = (int64_t)value;
247 env->ca = 0;
248 }
249 } else {
250 ret = (int64_t)value >> 63;
251 env->ca = (ret != 0);
252 }
253 return ret;
254 }
255 #endif
256
257 #if defined(TARGET_PPC64)
258 target_ulong helper_popcntb(target_ulong val)
259 {
260 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
261 0x5555555555555555ULL);
262 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
263 0x3333333333333333ULL);
264 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
265 0x0f0f0f0f0f0f0f0fULL);
266 return val;
267 }
268
269 target_ulong helper_popcntw(target_ulong val)
270 {
271 val = (val & 0x5555555555555555ULL) + ((val >> 1) &
272 0x5555555555555555ULL);
273 val = (val & 0x3333333333333333ULL) + ((val >> 2) &
274 0x3333333333333333ULL);
275 val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
276 0x0f0f0f0f0f0f0f0fULL);
277 val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
278 0x00ff00ff00ff00ffULL);
279 val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
280 0x0000ffff0000ffffULL);
281 return val;
282 }
283
284 target_ulong helper_popcntd(target_ulong val)
285 {
286 return ctpop64(val);
287 }
288 #else
289 target_ulong helper_popcntb(target_ulong val)
290 {
291 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
292 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
293 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
294 return val;
295 }
296
297 target_ulong helper_popcntw(target_ulong val)
298 {
299 val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
300 val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
301 val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
302 val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
303 val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
304 return val;
305 }
306 #endif
307
308 /*****************************************************************************/
309 /* PowerPC 601 specific instructions (POWER bridge) */
310 target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
311 {
312 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
313
314 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
315 (int32_t)arg2 == 0) {
316 env->spr[SPR_MQ] = 0;
317 return INT32_MIN;
318 } else {
319 env->spr[SPR_MQ] = tmp % arg2;
320 return tmp / (int32_t)arg2;
321 }
322 }
323
324 target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
325 target_ulong arg2)
326 {
327 uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
328
329 if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
330 (int32_t)arg2 == 0) {
331 env->so = env->ov = 1;
332 env->spr[SPR_MQ] = 0;
333 return INT32_MIN;
334 } else {
335 env->spr[SPR_MQ] = tmp % arg2;
336 tmp /= (int32_t)arg2;
337 if ((int32_t)tmp != tmp) {
338 env->so = env->ov = 1;
339 } else {
340 env->ov = 0;
341 }
342 return tmp;
343 }
344 }
345
346 target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
347 target_ulong arg2)
348 {
349 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
350 (int32_t)arg2 == 0) {
351 env->spr[SPR_MQ] = 0;
352 return INT32_MIN;
353 } else {
354 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
355 return (int32_t)arg1 / (int32_t)arg2;
356 }
357 }
358
359 target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
360 target_ulong arg2)
361 {
362 if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
363 (int32_t)arg2 == 0) {
364 env->so = env->ov = 1;
365 env->spr[SPR_MQ] = 0;
366 return INT32_MIN;
367 } else {
368 env->ov = 0;
369 env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
370 return (int32_t)arg1 / (int32_t)arg2;
371 }
372 }
373
374 /*****************************************************************************/
375 /* 602 specific instructions */
376 /* mfrom is the most crazy instruction ever seen, imho ! */
377 /* Real implementation uses a ROM table. Do the same */
378 /* Extremely decomposed:
379 * -arg / 256
380 * return 256 * log10(10 + 1.0) + 0.5
381 */
382 #if !defined(CONFIG_USER_ONLY)
383 target_ulong helper_602_mfrom(target_ulong arg)
384 {
385 if (likely(arg < 602)) {
386 #include "mfrom_table.c"
387 return mfrom_ROM_table[arg];
388 } else {
389 return 0;
390 }
391 }
392 #endif
393
394 /*****************************************************************************/
395 /* Altivec extension helpers */
396 #if defined(HOST_WORDS_BIGENDIAN)
397 #define HI_IDX 0
398 #define LO_IDX 1
399 #else
400 #define HI_IDX 1
401 #define LO_IDX 0
402 #endif
403
404 #if defined(HOST_WORDS_BIGENDIAN)
405 #define VECTOR_FOR_INORDER_I(index, element) \
406 for (index = 0; index < ARRAY_SIZE(r->element); index++)
407 #else
408 #define VECTOR_FOR_INORDER_I(index, element) \
409 for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
410 #endif
411
412 /* Saturating arithmetic helpers. */
413 #define SATCVT(from, to, from_type, to_type, min, max) \
414 static inline to_type cvt##from##to(from_type x, int *sat) \
415 { \
416 to_type r; \
417 \
418 if (x < (from_type)min) { \
419 r = min; \
420 *sat = 1; \
421 } else if (x > (from_type)max) { \
422 r = max; \
423 *sat = 1; \
424 } else { \
425 r = x; \
426 } \
427 return r; \
428 }
429 #define SATCVTU(from, to, from_type, to_type, min, max) \
430 static inline to_type cvt##from##to(from_type x, int *sat) \
431 { \
432 to_type r; \
433 \
434 if (x > (from_type)max) { \
435 r = max; \
436 *sat = 1; \
437 } else { \
438 r = x; \
439 } \
440 return r; \
441 }
442 SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
443 SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
444 SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
445
446 SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
447 SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
448 SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
449 SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
450 SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
451 SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
452 #undef SATCVT
453 #undef SATCVTU
454
455 void helper_lvsl(ppc_avr_t *r, target_ulong sh)
456 {
457 int i, j = (sh & 0xf);
458
459 VECTOR_FOR_INORDER_I(i, u8) {
460 r->u8[i] = j++;
461 }
462 }
463
464 void helper_lvsr(ppc_avr_t *r, target_ulong sh)
465 {
466 int i, j = 0x10 - (sh & 0xf);
467
468 VECTOR_FOR_INORDER_I(i, u8) {
469 r->u8[i] = j++;
470 }
471 }
472
473 void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
474 {
475 #if defined(HOST_WORDS_BIGENDIAN)
476 env->vscr = r->u32[3];
477 #else
478 env->vscr = r->u32[0];
479 #endif
480 set_flush_to_zero(vscr_nj, &env->vec_status);
481 }
482
483 void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
484 {
485 int i;
486
487 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
488 r->u32[i] = ~a->u32[i] < b->u32[i];
489 }
490 }
491
492 #define VARITH_DO(name, op, element) \
493 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
494 { \
495 int i; \
496 \
497 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
498 r->element[i] = a->element[i] op b->element[i]; \
499 } \
500 }
501 #define VARITH(suffix, element) \
502 VARITH_DO(add##suffix, +, element) \
503 VARITH_DO(sub##suffix, -, element)
504 VARITH(ubm, u8)
505 VARITH(uhm, u16)
506 VARITH(uwm, u32)
507 VARITH(udm, u64)
508 #undef VARITH_DO
509 #undef VARITH
510
511 #define VARITHFP(suffix, func) \
512 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
513 ppc_avr_t *b) \
514 { \
515 int i; \
516 \
517 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
518 r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
519 } \
520 }
521 VARITHFP(addfp, float32_add)
522 VARITHFP(subfp, float32_sub)
523 VARITHFP(minfp, float32_min)
524 VARITHFP(maxfp, float32_max)
525 #undef VARITHFP
526
527 #define VARITHFPFMA(suffix, type) \
528 void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
529 ppc_avr_t *b, ppc_avr_t *c) \
530 { \
531 int i; \
532 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
533 r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
534 type, &env->vec_status); \
535 } \
536 }
537 VARITHFPFMA(maddfp, 0);
538 VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
539 #undef VARITHFPFMA
540
541 #define VARITHSAT_CASE(type, op, cvt, element) \
542 { \
543 type result = (type)a->element[i] op (type)b->element[i]; \
544 r->element[i] = cvt(result, &sat); \
545 }
546
547 #define VARITHSAT_DO(name, op, optype, cvt, element) \
548 void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
549 ppc_avr_t *b) \
550 { \
551 int sat = 0; \
552 int i; \
553 \
554 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
555 switch (sizeof(r->element[0])) { \
556 case 1: \
557 VARITHSAT_CASE(optype, op, cvt, element); \
558 break; \
559 case 2: \
560 VARITHSAT_CASE(optype, op, cvt, element); \
561 break; \
562 case 4: \
563 VARITHSAT_CASE(optype, op, cvt, element); \
564 break; \
565 } \
566 } \
567 if (sat) { \
568 env->vscr |= (1 << VSCR_SAT); \
569 } \
570 }
571 #define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
572 VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
573 VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
574 #define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
575 VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
576 VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
577 VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
578 VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
579 VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
580 VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
581 VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
582 VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
583 #undef VARITHSAT_CASE
584 #undef VARITHSAT_DO
585 #undef VARITHSAT_SIGNED
586 #undef VARITHSAT_UNSIGNED
587
588 #define VAVG_DO(name, element, etype) \
589 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
590 { \
591 int i; \
592 \
593 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
594 etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
595 r->element[i] = x >> 1; \
596 } \
597 }
598
599 #define VAVG(type, signed_element, signed_type, unsigned_element, \
600 unsigned_type) \
601 VAVG_DO(avgs##type, signed_element, signed_type) \
602 VAVG_DO(avgu##type, unsigned_element, unsigned_type)
603 VAVG(b, s8, int16_t, u8, uint16_t)
604 VAVG(h, s16, int32_t, u16, uint32_t)
605 VAVG(w, s32, int64_t, u32, uint64_t)
606 #undef VAVG_DO
607 #undef VAVG
608
609 #define VCF(suffix, cvt, element) \
610 void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
611 ppc_avr_t *b, uint32_t uim) \
612 { \
613 int i; \
614 \
615 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
616 float32 t = cvt(b->element[i], &env->vec_status); \
617 r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
618 } \
619 }
620 VCF(ux, uint32_to_float32, u32)
621 VCF(sx, int32_to_float32, s32)
622 #undef VCF
623
624 #define VCMP_DO(suffix, compare, element, record) \
625 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
626 ppc_avr_t *a, ppc_avr_t *b) \
627 { \
628 uint32_t ones = (uint32_t)-1; \
629 uint32_t all = ones; \
630 uint32_t none = 0; \
631 int i; \
632 \
633 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
634 uint32_t result = (a->element[i] compare b->element[i] ? \
635 ones : 0x0); \
636 switch (sizeof(a->element[0])) { \
637 case 4: \
638 r->u32[i] = result; \
639 break; \
640 case 2: \
641 r->u16[i] = result; \
642 break; \
643 case 1: \
644 r->u8[i] = result; \
645 break; \
646 } \
647 all &= result; \
648 none |= result; \
649 } \
650 if (record) { \
651 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
652 } \
653 }
654 #define VCMP(suffix, compare, element) \
655 VCMP_DO(suffix, compare, element, 0) \
656 VCMP_DO(suffix##_dot, compare, element, 1)
657 VCMP(equb, ==, u8)
658 VCMP(equh, ==, u16)
659 VCMP(equw, ==, u32)
660 VCMP(gtub, >, u8)
661 VCMP(gtuh, >, u16)
662 VCMP(gtuw, >, u32)
663 VCMP(gtsb, >, s8)
664 VCMP(gtsh, >, s16)
665 VCMP(gtsw, >, s32)
666 #undef VCMP_DO
667 #undef VCMP
668
669 #define VCMPFP_DO(suffix, compare, order, record) \
670 void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
671 ppc_avr_t *a, ppc_avr_t *b) \
672 { \
673 uint32_t ones = (uint32_t)-1; \
674 uint32_t all = ones; \
675 uint32_t none = 0; \
676 int i; \
677 \
678 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
679 uint32_t result; \
680 int rel = float32_compare_quiet(a->f[i], b->f[i], \
681 &env->vec_status); \
682 if (rel == float_relation_unordered) { \
683 result = 0; \
684 } else if (rel compare order) { \
685 result = ones; \
686 } else { \
687 result = 0; \
688 } \
689 r->u32[i] = result; \
690 all &= result; \
691 none |= result; \
692 } \
693 if (record) { \
694 env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
695 } \
696 }
697 #define VCMPFP(suffix, compare, order) \
698 VCMPFP_DO(suffix, compare, order, 0) \
699 VCMPFP_DO(suffix##_dot, compare, order, 1)
700 VCMPFP(eqfp, ==, float_relation_equal)
701 VCMPFP(gefp, !=, float_relation_less)
702 VCMPFP(gtfp, ==, float_relation_greater)
703 #undef VCMPFP_DO
704 #undef VCMPFP
705
706 static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
707 ppc_avr_t *a, ppc_avr_t *b, int record)
708 {
709 int i;
710 int all_in = 0;
711
712 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
713 int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
714 if (le_rel == float_relation_unordered) {
715 r->u32[i] = 0xc0000000;
716 /* ALL_IN does not need to be updated here. */
717 } else {
718 float32 bneg = float32_chs(b->f[i]);
719 int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
720 int le = le_rel != float_relation_greater;
721 int ge = ge_rel != float_relation_less;
722
723 r->u32[i] = ((!le) << 31) | ((!ge) << 30);
724 all_in |= (!le | !ge);
725 }
726 }
727 if (record) {
728 env->crf[6] = (all_in == 0) << 1;
729 }
730 }
731
732 void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
733 {
734 vcmpbfp_internal(env, r, a, b, 0);
735 }
736
737 void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
738 ppc_avr_t *b)
739 {
740 vcmpbfp_internal(env, r, a, b, 1);
741 }
742
743 #define VCT(suffix, satcvt, element) \
744 void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
745 ppc_avr_t *b, uint32_t uim) \
746 { \
747 int i; \
748 int sat = 0; \
749 float_status s = env->vec_status; \
750 \
751 set_float_rounding_mode(float_round_to_zero, &s); \
752 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
753 if (float32_is_any_nan(b->f[i])) { \
754 r->element[i] = 0; \
755 } else { \
756 float64 t = float32_to_float64(b->f[i], &s); \
757 int64_t j; \
758 \
759 t = float64_scalbn(t, uim, &s); \
760 j = float64_to_int64(t, &s); \
761 r->element[i] = satcvt(j, &sat); \
762 } \
763 } \
764 if (sat) { \
765 env->vscr |= (1 << VSCR_SAT); \
766 } \
767 }
768 VCT(uxs, cvtsduw, u32)
769 VCT(sxs, cvtsdsw, s32)
770 #undef VCT
771
772 void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
773 ppc_avr_t *b, ppc_avr_t *c)
774 {
775 int sat = 0;
776 int i;
777
778 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
779 int32_t prod = a->s16[i] * b->s16[i];
780 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
781
782 r->s16[i] = cvtswsh(t, &sat);
783 }
784
785 if (sat) {
786 env->vscr |= (1 << VSCR_SAT);
787 }
788 }
789
790 void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
791 ppc_avr_t *b, ppc_avr_t *c)
792 {
793 int sat = 0;
794 int i;
795
796 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
797 int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
798 int32_t t = (int32_t)c->s16[i] + (prod >> 15);
799 r->s16[i] = cvtswsh(t, &sat);
800 }
801
802 if (sat) {
803 env->vscr |= (1 << VSCR_SAT);
804 }
805 }
806
807 #define VMINMAX_DO(name, compare, element) \
808 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
809 { \
810 int i; \
811 \
812 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
813 if (a->element[i] compare b->element[i]) { \
814 r->element[i] = b->element[i]; \
815 } else { \
816 r->element[i] = a->element[i]; \
817 } \
818 } \
819 }
820 #define VMINMAX(suffix, element) \
821 VMINMAX_DO(min##suffix, >, element) \
822 VMINMAX_DO(max##suffix, <, element)
823 VMINMAX(sb, s8)
824 VMINMAX(sh, s16)
825 VMINMAX(sw, s32)
826 VMINMAX(ub, u8)
827 VMINMAX(uh, u16)
828 VMINMAX(uw, u32)
829 #undef VMINMAX_DO
830 #undef VMINMAX
831
832 void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
833 {
834 int i;
835
836 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
837 int32_t prod = a->s16[i] * b->s16[i];
838 r->s16[i] = (int16_t) (prod + c->s16[i]);
839 }
840 }
841
842 #define VMRG_DO(name, element, highp) \
843 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
844 { \
845 ppc_avr_t result; \
846 int i; \
847 size_t n_elems = ARRAY_SIZE(r->element); \
848 \
849 for (i = 0; i < n_elems / 2; i++) { \
850 if (highp) { \
851 result.element[i*2+HI_IDX] = a->element[i]; \
852 result.element[i*2+LO_IDX] = b->element[i]; \
853 } else { \
854 result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
855 b->element[n_elems - i - 1]; \
856 result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
857 a->element[n_elems - i - 1]; \
858 } \
859 } \
860 *r = result; \
861 }
862 #if defined(HOST_WORDS_BIGENDIAN)
863 #define MRGHI 0
864 #define MRGLO 1
865 #else
866 #define MRGHI 1
867 #define MRGLO 0
868 #endif
869 #define VMRG(suffix, element) \
870 VMRG_DO(mrgl##suffix, element, MRGHI) \
871 VMRG_DO(mrgh##suffix, element, MRGLO)
872 VMRG(b, u8)
873 VMRG(h, u16)
874 VMRG(w, u32)
875 #undef VMRG_DO
876 #undef VMRG
877 #undef MRGHI
878 #undef MRGLO
879
880 void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
881 ppc_avr_t *b, ppc_avr_t *c)
882 {
883 int32_t prod[16];
884 int i;
885
886 for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
887 prod[i] = (int32_t)a->s8[i] * b->u8[i];
888 }
889
890 VECTOR_FOR_INORDER_I(i, s32) {
891 r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
892 prod[4 * i + 2] + prod[4 * i + 3];
893 }
894 }
895
896 void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
897 ppc_avr_t *b, ppc_avr_t *c)
898 {
899 int32_t prod[8];
900 int i;
901
902 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
903 prod[i] = a->s16[i] * b->s16[i];
904 }
905
906 VECTOR_FOR_INORDER_I(i, s32) {
907 r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
908 }
909 }
910
911 void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
912 ppc_avr_t *b, ppc_avr_t *c)
913 {
914 int32_t prod[8];
915 int i;
916 int sat = 0;
917
918 for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
919 prod[i] = (int32_t)a->s16[i] * b->s16[i];
920 }
921
922 VECTOR_FOR_INORDER_I(i, s32) {
923 int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
924
925 r->u32[i] = cvtsdsw(t, &sat);
926 }
927
928 if (sat) {
929 env->vscr |= (1 << VSCR_SAT);
930 }
931 }
932
933 void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
934 ppc_avr_t *b, ppc_avr_t *c)
935 {
936 uint16_t prod[16];
937 int i;
938
939 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
940 prod[i] = a->u8[i] * b->u8[i];
941 }
942
943 VECTOR_FOR_INORDER_I(i, u32) {
944 r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
945 prod[4 * i + 2] + prod[4 * i + 3];
946 }
947 }
948
949 void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
950 ppc_avr_t *b, ppc_avr_t *c)
951 {
952 uint32_t prod[8];
953 int i;
954
955 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
956 prod[i] = a->u16[i] * b->u16[i];
957 }
958
959 VECTOR_FOR_INORDER_I(i, u32) {
960 r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
961 }
962 }
963
964 void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
965 ppc_avr_t *b, ppc_avr_t *c)
966 {
967 uint32_t prod[8];
968 int i;
969 int sat = 0;
970
971 for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
972 prod[i] = a->u16[i] * b->u16[i];
973 }
974
975 VECTOR_FOR_INORDER_I(i, s32) {
976 uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
977
978 r->u32[i] = cvtuduw(t, &sat);
979 }
980
981 if (sat) {
982 env->vscr |= (1 << VSCR_SAT);
983 }
984 }
985
986 #define VMUL_DO(name, mul_element, prod_element, evenp) \
987 void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
988 { \
989 int i; \
990 \
991 VECTOR_FOR_INORDER_I(i, prod_element) { \
992 if (evenp) { \
993 r->prod_element[i] = a->mul_element[i * 2 + HI_IDX] * \
994 b->mul_element[i * 2 + HI_IDX]; \
995 } else { \
996 r->prod_element[i] = a->mul_element[i * 2 + LO_IDX] * \
997 b->mul_element[i * 2 + LO_IDX]; \
998 } \
999 } \
1000 }
1001 #define VMUL(suffix, mul_element, prod_element) \
1002 VMUL_DO(mule##suffix, mul_element, prod_element, 1) \
1003 VMUL_DO(mulo##suffix, mul_element, prod_element, 0)
1004 VMUL(sb, s8, s16)
1005 VMUL(sh, s16, s32)
1006 VMUL(ub, u8, u16)
1007 VMUL(uh, u16, u32)
1008 #undef VMUL_DO
1009 #undef VMUL
1010
1011 void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1012 ppc_avr_t *c)
1013 {
1014 ppc_avr_t result;
1015 int i;
1016
1017 VECTOR_FOR_INORDER_I(i, u8) {
1018 int s = c->u8[i] & 0x1f;
1019 #if defined(HOST_WORDS_BIGENDIAN)
1020 int index = s & 0xf;
1021 #else
1022 int index = 15 - (s & 0xf);
1023 #endif
1024
1025 if (s & 0x10) {
1026 result.u8[i] = b->u8[index];
1027 } else {
1028 result.u8[i] = a->u8[index];
1029 }
1030 }
1031 *r = result;
1032 }
1033
1034 #if defined(HOST_WORDS_BIGENDIAN)
1035 #define PKBIG 1
1036 #else
1037 #define PKBIG 0
1038 #endif
1039 void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1040 {
1041 int i, j;
1042 ppc_avr_t result;
1043 #if defined(HOST_WORDS_BIGENDIAN)
1044 const ppc_avr_t *x[2] = { a, b };
1045 #else
1046 const ppc_avr_t *x[2] = { b, a };
1047 #endif
1048
1049 VECTOR_FOR_INORDER_I(i, u64) {
1050 VECTOR_FOR_INORDER_I(j, u32) {
1051 uint32_t e = x[i]->u32[j];
1052
1053 result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
1054 ((e >> 6) & 0x3e0) |
1055 ((e >> 3) & 0x1f));
1056 }
1057 }
1058 *r = result;
1059 }
1060
1061 #define VPK(suffix, from, to, cvt, dosat) \
1062 void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
1063 ppc_avr_t *a, ppc_avr_t *b) \
1064 { \
1065 int i; \
1066 int sat = 0; \
1067 ppc_avr_t result; \
1068 ppc_avr_t *a0 = PKBIG ? a : b; \
1069 ppc_avr_t *a1 = PKBIG ? b : a; \
1070 \
1071 VECTOR_FOR_INORDER_I(i, from) { \
1072 result.to[i] = cvt(a0->from[i], &sat); \
1073 result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
1074 } \
1075 *r = result; \
1076 if (dosat && sat) { \
1077 env->vscr |= (1 << VSCR_SAT); \
1078 } \
1079 }
1080 #define I(x, y) (x)
1081 VPK(shss, s16, s8, cvtshsb, 1)
1082 VPK(shus, s16, u8, cvtshub, 1)
1083 VPK(swss, s32, s16, cvtswsh, 1)
1084 VPK(swus, s32, u16, cvtswuh, 1)
1085 VPK(uhus, u16, u8, cvtuhub, 1)
1086 VPK(uwus, u32, u16, cvtuwuh, 1)
1087 VPK(uhum, u16, u8, I, 0)
1088 VPK(uwum, u32, u16, I, 0)
1089 #undef I
1090 #undef VPK
1091 #undef PKBIG
1092
1093 void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1094 {
1095 int i;
1096
1097 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1098 r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
1099 }
1100 }
1101
1102 #define VRFI(suffix, rounding) \
1103 void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
1104 ppc_avr_t *b) \
1105 { \
1106 int i; \
1107 float_status s = env->vec_status; \
1108 \
1109 set_float_rounding_mode(rounding, &s); \
1110 for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
1111 r->f[i] = float32_round_to_int (b->f[i], &s); \
1112 } \
1113 }
1114 VRFI(n, float_round_nearest_even)
1115 VRFI(m, float_round_down)
1116 VRFI(p, float_round_up)
1117 VRFI(z, float_round_to_zero)
1118 #undef VRFI
1119
1120 #define VROTATE(suffix, element) \
1121 void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1122 { \
1123 int i; \
1124 \
1125 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1126 unsigned int mask = ((1 << \
1127 (3 + (sizeof(a->element[0]) >> 1))) \
1128 - 1); \
1129 unsigned int shift = b->element[i] & mask; \
1130 r->element[i] = (a->element[i] << shift) | \
1131 (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
1132 } \
1133 }
1134 VROTATE(b, u8)
1135 VROTATE(h, u16)
1136 VROTATE(w, u32)
1137 #undef VROTATE
1138
1139 void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1140 {
1141 int i;
1142
1143 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1144 float32 t = float32_sqrt(b->f[i], &env->vec_status);
1145
1146 r->f[i] = float32_div(float32_one, t, &env->vec_status);
1147 }
1148 }
1149
1150 void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
1151 ppc_avr_t *c)
1152 {
1153 r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
1154 r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
1155 }
1156
1157 void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1158 {
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1162 r->f[i] = float32_exp2(b->f[i], &env->vec_status);
1163 }
1164 }
1165
1166 void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
1167 {
1168 int i;
1169
1170 for (i = 0; i < ARRAY_SIZE(r->f); i++) {
1171 r->f[i] = float32_log2(b->f[i], &env->vec_status);
1172 }
1173 }
1174
1175 #if defined(HOST_WORDS_BIGENDIAN)
1176 #define LEFT 0
1177 #define RIGHT 1
1178 #else
1179 #define LEFT 1
1180 #define RIGHT 0
1181 #endif
1182 /* The specification says that the results are undefined if all of the
1183 * shift counts are not identical. We check to make sure that they are
1184 * to conform to what real hardware appears to do. */
1185 #define VSHIFT(suffix, leftp) \
1186 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1187 { \
1188 int shift = b->u8[LO_IDX*15] & 0x7; \
1189 int doit = 1; \
1190 int i; \
1191 \
1192 for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
1193 doit = doit && ((b->u8[i] & 0x7) == shift); \
1194 } \
1195 if (doit) { \
1196 if (shift == 0) { \
1197 *r = *a; \
1198 } else if (leftp) { \
1199 uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
1200 \
1201 r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
1202 r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
1203 } else { \
1204 uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
1205 \
1206 r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
1207 r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
1208 } \
1209 } \
1210 }
1211 VSHIFT(l, LEFT)
1212 VSHIFT(r, RIGHT)
1213 #undef VSHIFT
1214 #undef LEFT
1215 #undef RIGHT
1216
1217 #define VSL(suffix, element) \
1218 void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1219 { \
1220 int i; \
1221 \
1222 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1223 unsigned int mask = ((1 << \
1224 (3 + (sizeof(a->element[0]) >> 1))) \
1225 - 1); \
1226 unsigned int shift = b->element[i] & mask; \
1227 \
1228 r->element[i] = a->element[i] << shift; \
1229 } \
1230 }
1231 VSL(b, u8)
1232 VSL(h, u16)
1233 VSL(w, u32)
1234 #undef VSL
1235
1236 void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
1237 {
1238 int sh = shift & 0xf;
1239 int i;
1240 ppc_avr_t result;
1241
1242 #if defined(HOST_WORDS_BIGENDIAN)
1243 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1244 int index = sh + i;
1245 if (index > 0xf) {
1246 result.u8[i] = b->u8[index - 0x10];
1247 } else {
1248 result.u8[i] = a->u8[index];
1249 }
1250 }
1251 #else
1252 for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
1253 int index = (16 - sh) + i;
1254 if (index > 0xf) {
1255 result.u8[i] = a->u8[index - 0x10];
1256 } else {
1257 result.u8[i] = b->u8[index];
1258 }
1259 }
1260 #endif
1261 *r = result;
1262 }
1263
1264 void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1265 {
1266 int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
1267
1268 #if defined(HOST_WORDS_BIGENDIAN)
1269 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1270 memset(&r->u8[16-sh], 0, sh);
1271 #else
1272 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1273 memset(&r->u8[0], 0, sh);
1274 #endif
1275 }
1276
1277 /* Experimental testing shows that hardware masks the immediate. */
1278 #define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
1279 #if defined(HOST_WORDS_BIGENDIAN)
1280 #define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
1281 #else
1282 #define SPLAT_ELEMENT(element) \
1283 (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
1284 #endif
1285 #define VSPLT(suffix, element) \
1286 void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
1287 { \
1288 uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
1289 int i; \
1290 \
1291 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1292 r->element[i] = s; \
1293 } \
1294 }
1295 VSPLT(b, u8)
1296 VSPLT(h, u16)
1297 VSPLT(w, u32)
1298 #undef VSPLT
1299 #undef SPLAT_ELEMENT
1300 #undef _SPLAT_MASKED
1301
1302 #define VSPLTI(suffix, element, splat_type) \
1303 void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
1304 { \
1305 splat_type x = (int8_t)(splat << 3) >> 3; \
1306 int i; \
1307 \
1308 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1309 r->element[i] = x; \
1310 } \
1311 }
1312 VSPLTI(b, s8, int8_t)
1313 VSPLTI(h, s16, int16_t)
1314 VSPLTI(w, s32, int32_t)
1315 #undef VSPLTI
1316
1317 #define VSR(suffix, element) \
1318 void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
1319 { \
1320 int i; \
1321 \
1322 for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
1323 unsigned int mask = ((1 << \
1324 (3 + (sizeof(a->element[0]) >> 1))) \
1325 - 1); \
1326 unsigned int shift = b->element[i] & mask; \
1327 \
1328 r->element[i] = a->element[i] >> shift; \
1329 } \
1330 }
1331 VSR(ab, s8)
1332 VSR(ah, s16)
1333 VSR(aw, s32)
1334 VSR(b, u8)
1335 VSR(h, u16)
1336 VSR(w, u32)
1337 #undef VSR
1338
1339 void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1340 {
1341 int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
1342
1343 #if defined(HOST_WORDS_BIGENDIAN)
1344 memmove(&r->u8[sh], &a->u8[0], 16 - sh);
1345 memset(&r->u8[0], 0, sh);
1346 #else
1347 memmove(&r->u8[0], &a->u8[sh], 16 - sh);
1348 memset(&r->u8[16 - sh], 0, sh);
1349 #endif
1350 }
1351
1352 void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1353 {
1354 int i;
1355
1356 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1357 r->u32[i] = a->u32[i] >= b->u32[i];
1358 }
1359 }
1360
1361 void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1362 {
1363 int64_t t;
1364 int i, upper;
1365 ppc_avr_t result;
1366 int sat = 0;
1367
1368 #if defined(HOST_WORDS_BIGENDIAN)
1369 upper = ARRAY_SIZE(r->s32)-1;
1370 #else
1371 upper = 0;
1372 #endif
1373 t = (int64_t)b->s32[upper];
1374 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1375 t += a->s32[i];
1376 result.s32[i] = 0;
1377 }
1378 result.s32[upper] = cvtsdsw(t, &sat);
1379 *r = result;
1380
1381 if (sat) {
1382 env->vscr |= (1 << VSCR_SAT);
1383 }
1384 }
1385
1386 void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1387 {
1388 int i, j, upper;
1389 ppc_avr_t result;
1390 int sat = 0;
1391
1392 #if defined(HOST_WORDS_BIGENDIAN)
1393 upper = 1;
1394 #else
1395 upper = 0;
1396 #endif
1397 for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
1398 int64_t t = (int64_t)b->s32[upper + i * 2];
1399
1400 result.u64[i] = 0;
1401 for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
1402 t += a->s32[2 * i + j];
1403 }
1404 result.s32[upper + i * 2] = cvtsdsw(t, &sat);
1405 }
1406
1407 *r = result;
1408 if (sat) {
1409 env->vscr |= (1 << VSCR_SAT);
1410 }
1411 }
1412
1413 void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1414 {
1415 int i, j;
1416 int sat = 0;
1417
1418 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1419 int64_t t = (int64_t)b->s32[i];
1420
1421 for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
1422 t += a->s8[4 * i + j];
1423 }
1424 r->s32[i] = cvtsdsw(t, &sat);
1425 }
1426
1427 if (sat) {
1428 env->vscr |= (1 << VSCR_SAT);
1429 }
1430 }
1431
1432 void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1433 {
1434 int sat = 0;
1435 int i;
1436
1437 for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
1438 int64_t t = (int64_t)b->s32[i];
1439
1440 t += a->s16[2 * i] + a->s16[2 * i + 1];
1441 r->s32[i] = cvtsdsw(t, &sat);
1442 }
1443
1444 if (sat) {
1445 env->vscr |= (1 << VSCR_SAT);
1446 }
1447 }
1448
1449 void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
1450 {
1451 int i, j;
1452 int sat = 0;
1453
1454 for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
1455 uint64_t t = (uint64_t)b->u32[i];
1456
1457 for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
1458 t += a->u8[4 * i + j];
1459 }
1460 r->u32[i] = cvtuduw(t, &sat);
1461 }
1462
1463 if (sat) {
1464 env->vscr |= (1 << VSCR_SAT);
1465 }
1466 }
1467
1468 #if defined(HOST_WORDS_BIGENDIAN)
1469 #define UPKHI 1
1470 #define UPKLO 0
1471 #else
1472 #define UPKHI 0
1473 #define UPKLO 1
1474 #endif
1475 #define VUPKPX(suffix, hi) \
1476 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1477 { \
1478 int i; \
1479 ppc_avr_t result; \
1480 \
1481 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
1482 uint16_t e = b->u16[hi ? i : i+4]; \
1483 uint8_t a = (e >> 15) ? 0xff : 0; \
1484 uint8_t r = (e >> 10) & 0x1f; \
1485 uint8_t g = (e >> 5) & 0x1f; \
1486 uint8_t b = e & 0x1f; \
1487 \
1488 result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
1489 } \
1490 *r = result; \
1491 }
1492 VUPKPX(lpx, UPKLO)
1493 VUPKPX(hpx, UPKHI)
1494 #undef VUPKPX
1495
1496 #define VUPK(suffix, unpacked, packee, hi) \
1497 void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
1498 { \
1499 int i; \
1500 ppc_avr_t result; \
1501 \
1502 if (hi) { \
1503 for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
1504 result.unpacked[i] = b->packee[i]; \
1505 } \
1506 } else { \
1507 for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
1508 i++) { \
1509 result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
1510 } \
1511 } \
1512 *r = result; \
1513 }
1514 VUPK(hsb, s16, s8, UPKHI)
1515 VUPK(hsh, s32, s16, UPKHI)
1516 VUPK(lsb, s16, s8, UPKLO)
1517 VUPK(lsh, s32, s16, UPKLO)
1518 #undef VUPK
1519 #undef UPKHI
1520 #undef UPKLO
1521
1522 #undef VECTOR_FOR_INORDER_I
1523 #undef HI_IDX
1524 #undef LO_IDX
1525
1526 /*****************************************************************************/
1527 /* SPE extension helpers */
1528 /* Use a table to make this quicker */
1529 static const uint8_t hbrev[16] = {
1530 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1531 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1532 };
1533
1534 static inline uint8_t byte_reverse(uint8_t val)
1535 {
1536 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1537 }
1538
1539 static inline uint32_t word_reverse(uint32_t val)
1540 {
1541 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1542 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1543 }
1544
1545 #define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
1546 target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
1547 {
1548 uint32_t a, b, d, mask;
1549
1550 mask = UINT32_MAX >> (32 - MASKBITS);
1551 a = arg1 & mask;
1552 b = arg2 & mask;
1553 d = word_reverse(1 + word_reverse(a | ~b));
1554 return (arg1 & ~mask) | (d & b);
1555 }
1556
1557 uint32_t helper_cntlsw32(uint32_t val)
1558 {
1559 if (val & 0x80000000) {
1560 return clz32(~val);
1561 } else {
1562 return clz32(val);
1563 }
1564 }
1565
1566 uint32_t helper_cntlzw32(uint32_t val)
1567 {
1568 return clz32(val);
1569 }
1570
1571 /* 440 specific */
1572 target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
1573 target_ulong low, uint32_t update_Rc)
1574 {
1575 target_ulong mask;
1576 int i;
1577
1578 i = 1;
1579 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1580 if ((high & mask) == 0) {
1581 if (update_Rc) {
1582 env->crf[0] = 0x4;
1583 }
1584 goto done;
1585 }
1586 i++;
1587 }
1588 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1589 if ((low & mask) == 0) {
1590 if (update_Rc) {
1591 env->crf[0] = 0x8;
1592 }
1593 goto done;
1594 }
1595 i++;
1596 }
1597 if (update_Rc) {
1598 env->crf[0] = 0x2;
1599 }
1600 done:
1601 env->xer = (env->xer & ~0x7F) | i;
1602 if (update_Rc) {
1603 env->crf[0] |= xer_so;
1604 }
1605 return i;
1606 }