#define GET_FIELD_SP(X, FROM, TO) \
GET_FIELD(X, 63 - (TO), 63 - (FROM))
-target_ulong helper_array8(CPUState *env, target_ulong pixel_addr,
- target_ulong cubesize)
+target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
{
return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
(GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
GET_FIELD_SP(pixel_addr, 11, 12);
}
-target_ulong helper_alignaddr(CPUState *env, target_ulong addr,
- target_ulong offset)
-{
- uint64_t tmp;
-
- tmp = addr + offset;
- env->gsr &= ~7ULL;
- env->gsr |= tmp & 7ULL;
- return tmp & ~7ULL;
-}
-
-uint64_t helper_faligndata(CPUState *env, uint64_t src1, uint64_t src2)
-{
- uint64_t tmp;
-
- tmp = src1 << ((env->gsr & 7) * 8);
- /* on many architectures a shift of 64 does nothing */
- if ((env->gsr & 7) != 0) {
- tmp |= src2 >> (64 - (env->gsr & 7) * 8);
- }
- return tmp;
-}
-
#ifdef HOST_WORDS_BIGENDIAN
#define VIS_B64(n) b[7 - (n)]
#define VIS_W64(n) w[3 - (n)]
float32 f;
} VIS32;
-uint64_t helper_fpmerge(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fpmerge(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
return d.ll;
}
-uint64_t helper_fmul8x16(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmul8x16al(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmul8x16au(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmul8sux16(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmul8ulx16(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmuld8sux16(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fmuld8ulx16(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2)
{
VIS64 s, d;
uint32_t tmp;
return d.ll;
}
-uint64_t helper_fexpand(CPUState *env, uint64_t src1, uint64_t src2)
+uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
{
VIS32 s;
VIS64 d;
}
#define VIS_HELPER(name, F) \
- uint64_t name##16(CPUState *env, uint64_t src1, uint64_t src2) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
{ \
VIS64 s, d; \
\
return d.ll; \
} \
\
- uint32_t name##16s(CPUState *env, uint32_t src1, \
- uint32_t src2) \
+ uint32_t name##16s(uint32_t src1, uint32_t src2) \
{ \
VIS32 s, d; \
\
return d.l; \
} \
\
- uint64_t name##32(CPUState *env, uint64_t src1, uint64_t src2) \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
{ \
VIS64 s, d; \
\
return d.ll; \
} \
\
- uint32_t name##32s(CPUState *env, uint32_t src1, \
- uint32_t src2) \
+ uint32_t name##32s(uint32_t src1, uint32_t src2) \
{ \
VIS32 s, d; \
\
VIS_HELPER(helper_fpsub, FSUB)
#define VIS_CMPHELPER(name, F) \
- uint64_t name##16(CPUState *env, uint64_t src1, uint64_t src2) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
{ \
VIS64 s, d; \
\
return d.ll; \
} \
\
- uint64_t name##32(CPUState *env, uint64_t src1, uint64_t src2) \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
{ \
VIS64 s, d; \
\
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
VIS_CMPHELPER(helper_fcmple, FCMPLE)
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
+
+uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2)
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ int s1, s2;
+
+ s1 = (src1 >> (56 - (i * 8))) & 0xff;
+ s2 = (src2 >> (56 - (i * 8))) & 0xff;
+
+ /* Absolute value of difference. */
+ s1 -= s2;
+ if (s1 < 0) {
+ s1 = -s1;
+ }
+
+ sum += s1;
+ }
+
+ return sum;
+}
+
+uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0xf;
+ uint32_t ret = 0;
+ int byte;
+
+ for (byte = 0; byte < 4; byte++) {
+ uint32_t val;
+ int16_t src = rs2 >> (byte * 16);
+ int32_t scaled = src << scale;
+ int32_t from_fixed = scaled >> 7;
+
+ val = (from_fixed < 0 ? 0 :
+ from_fixed > 255 ? 255 : from_fixed);
+
+ ret |= val << (8 * byte);
+ }
+
+ return ret;
+}
+
+uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint64_t ret = 0;
+ int word;
+
+ ret = (rs1 << 8) & ~(0x000000ff000000ffULL);
+ for (word = 0; word < 2; word++) {
+ uint64_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = (int64_t)src << scale;
+ int64_t from_fixed = scaled >> 23;
+
+ val = (from_fixed < 0 ? 0 :
+ (from_fixed > 255) ? 255 : from_fixed);
+
+ ret |= val << (32 * word);
+ }
+
+ return ret;
+}
+
+uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint32_t ret = 0;
+ int word;
+
+ for (word = 0; word < 2; word++) {
+ uint32_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = src << scale;
+ int64_t from_fixed = scaled >> 16;
+
+ val = (from_fixed < -32768 ? -32768 :
+ from_fixed > 32767 ? 32767 : from_fixed);
+
+ ret |= (val & 0xffff) << (word * 16);
+ }
+
+ return ret;
+}
+
+uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
+{
+ union {
+ uint64_t ll[2];
+ uint8_t b[16];
+ } s;
+ VIS64 r;
+ uint32_t i, mask, host;
+
+ /* Set up S such that we can index across all of the bytes. */
+#ifdef HOST_WORDS_BIGENDIAN
+ s.ll[0] = src1;
+ s.ll[1] = src2;
+ host = 0;
+#else
+ s.ll[1] = src1;
+ s.ll[0] = src2;
+ host = 15;
+#endif
+ mask = gsr >> 32;
+
+ for (i = 0; i < 8; ++i) {
+ unsigned e = (mask >> (28 - i*4)) & 0xf;
+ r.VIS_B64(i) = s.b[e ^ host];
+ }
+
+ return r.ll;
+}