return float64_mul(a, b, fpst);
}
-uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
- uint32_t rn, uint32_t numregs)
-{
- /* Helper function for SIMD TBL and TBX. We have to do the table
- * lookup part for the 64 bits worth of indices we're passed in.
- * result is the initial results vector (either zeroes for TBL
- * or some guest values for TBX), rn the register number where
- * the table starts, and numregs the number of registers in the table.
- * We return the results of the lookups.
- */
- int shift;
-
- for (shift = 0; shift < 64; shift += 8) {
- int index = extract64(indices, shift, 8);
- if (index < 16 * numregs) {
- /* Convert index (a byte offset into the virtual table
- * which is a series of 128-bit vectors concatenated)
- * into the correct register element plus a bit offset
- * into that element, bearing in mind that the table
- * can wrap around from V31 to V0.
- */
- int elt = (rn * 2 + (index >> 3)) % 64;
- int bitidx = (index & 7) * 8;
- uint64_t *q = aa64_vfp_qreg(env, elt >> 1);
- uint64_t val = extract64(q[elt & 1], bitidx, 8);
-
- result = deposit64(result, shift, 8, val);
- }
- }
- return result;
-}
-
/* 64bit/double versions of the neon float compare functions */
uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
{
float16 nan = a;
if (float16_is_signaling_nan(a, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float16_silence_nan(a, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float16_silence_nan(a, fpst);
+ }
}
if (fpst->default_nan_mode) {
nan = float16_default_nan(fpst);
float32 nan = a;
if (float32_is_signaling_nan(a, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float32_silence_nan(a, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float32_silence_nan(a, fpst);
+ }
}
if (fpst->default_nan_mode) {
nan = float32_default_nan(fpst);
float64 nan = a;
if (float64_is_signaling_nan(a, fpst)) {
float_raise(float_flag_invalid, fpst);
- nan = float64_silence_nan(a, fpst);
+ if (!fpst->default_nan_mode) {
+ nan = float64_silence_nan(a, fpst);
+ }
}
if (fpst->default_nan_mode) {
nan = float64_default_nan(fpst);
cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
*/
cmpv = int128_make128(env->exclusive_high, env->exclusive_val);
newv = int128_make128(new_hi, new_lo);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
success = int128_eq(oldv, cmpv);
return !success;
cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs] = int128_getlo(oldv);
env->xregs[rs + 1] = int128_gethi(oldv);
cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]);
newv = int128_make128(new_lo, new_hi);
- oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
env->xregs[rs + 1] = int128_getlo(oldv);
env->xregs[rs] = int128_gethi(oldv);
* the hflags rebuild, since we can pull the composite TBII field
* from there.
*/
- tbii = FIELD_EX32(env->hflags, TBFLAG_A64, TBII);
+ tbii = EX_TBFLAG_A64(env->hflags, TBII);
if ((tbii >> extract64(new_pc, 55, 1)) & 1) {
/* TBI is enabled. */
int core_mmu_idx = cpu_mmu_index(env, false);
if (!arm_singlestep_active(env)) {
env->pstate &= ~PSTATE_SS;
}
+ helper_rebuild_hflags_a64(env, cur_el);
qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: "
"resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc);
}