typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
+/*
+ * Return true if the specified S reg is in a scalar bank
+ * (ie if it is s0..s7)
+ */
+static inline bool vfp_sreg_is_scalar(int reg)
+{
+ return (reg & 0x18) == 0;
+}
+
+/*
+ * Return true if the specified D reg is in a scalar bank
+ * (ie if it is d0..d3 or d16..d19)
+ */
+static inline bool vfp_dreg_is_scalar(int reg)
+{
+ return (reg & 0xc) == 0;
+}
+
+/*
+ * Advance the S reg number forwards by delta within its bank
+ * (ie increment the low 3 bits but leave the rest the same)
+ */
+static inline int vfp_advance_sreg(int reg, int delta)
+{
+ return ((reg + delta) & 0x7) | (reg & ~0x7);
+}
+
+/*
+ * Advance the D reg number forwards by delta within its bank
+ * (ie increment the low 2 bits but leave the rest the same)
+ */
+static inline int vfp_advance_dreg(int reg, int delta)
+{
+ return ((reg + delta) & 0x3) | (reg & ~0x3);
+}
+
/*
* Perform a 3-operand VFP data processing instruction. fn is the
* callback to do the actual operation; this function deals with the
{
uint32_t delta_m = 0;
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 f0, f1, fd;
TCGv_ptr fpst;
}
if (veclen > 0) {
- bank_mask = 0x18;
-
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = s->vec_stride + 1;
- if ((vm & bank_mask) == 0) {
+ if (vfp_sreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
- vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
+ vd = vfp_advance_sreg(vd, delta_d);
+ vn = vfp_advance_sreg(vn, delta_d);
neon_load_reg32(f0, vn);
if (delta_m) {
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ vm = vfp_advance_sreg(vm, delta_m);
neon_load_reg32(f1, vm);
}
}
{
uint32_t delta_m = 0;
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 f0, f1, fd;
TCGv_ptr fpst;
}
if (veclen > 0) {
- bank_mask = 0xc;
-
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = (s->vec_stride >> 1) + 1;
- if ((vm & bank_mask) == 0) {
+ if (vfp_dreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
}
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
- vn = ((vn + delta_d) & (bank_mask - 1)) | (vn & bank_mask);
+ vd = vfp_advance_dreg(vd, delta_d);
+ vn = vfp_advance_dreg(vn, delta_d);
neon_load_reg64(f0, vn);
if (delta_m) {
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ vm = vfp_advance_dreg(vm, delta_m);
neon_load_reg64(f1, vm);
}
}
{
uint32_t delta_m = 0;
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 f0, fd;
}
if (veclen > 0) {
- bank_mask = 0x18;
-
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = s->vec_stride + 1;
- if ((vm & bank_mask) == 0) {
+ if (vfp_sreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
if (delta_m == 0) {
/* single source one-many */
while (veclen--) {
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vd = vfp_advance_sreg(vd, delta_d);
neon_store_reg32(fd, vd);
}
break;
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ vd = vfp_advance_sreg(vd, delta_d);
+ vm = vfp_advance_sreg(vm, delta_m);
neon_load_reg32(f0, vm);
}
{
uint32_t delta_m = 0;
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 f0, fd;
}
if (veclen > 0) {
- bank_mask = 0xc;
-
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
delta_d = (s->vec_stride >> 1) + 1;
- if ((vm & bank_mask) == 0) {
+ if (vfp_dreg_is_scalar(vm)) {
/* mixed scalar/vector */
delta_m = 0;
} else {
if (delta_m == 0) {
/* single source one-many */
while (veclen--) {
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vd = vfp_advance_dreg(vd, delta_d);
neon_store_reg64(fd, vd);
}
break;
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
- vm = ((vm + delta_m) & (bank_mask - 1)) | (vm & bank_mask);
+ vd = vfp_advance_dreg(vd, delta_d);
+ vd = vfp_advance_dreg(vm, delta_m);
neon_load_reg64(f0, vm);
}
static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
{
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i32 fd;
uint32_t n, i, vd;
}
if (veclen > 0) {
- bank_mask = 0x18;
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_sreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vd = vfp_advance_sreg(vd, delta_d);
}
tcg_temp_free_i32(fd);
static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
{
uint32_t delta_d = 0;
- uint32_t bank_mask = 0;
int veclen = s->vec_len;
TCGv_i64 fd;
uint32_t n, i, vd;
}
if (veclen > 0) {
- bank_mask = 0xc;
/* Figure out what type of vector operation this is. */
- if ((vd & bank_mask) == 0) {
+ if (vfp_dreg_is_scalar(vd)) {
/* scalar */
veclen = 0;
} else {
/* Set up the operands for the next iteration */
veclen--;
- vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask);
+ vfp_advance_dreg(vd, delta_d);
}
tcg_temp_free_i64(fd);