]> git.proxmox.com Git - qemu.git/commitdiff
Merge branch 'linux-user-for-upstream' of git://gitorious.org/qemu-maemo/qemu
authorAurelien Jarno <aurelien@aurel32.net>
Wed, 9 Feb 2011 18:53:36 +0000 (19:53 +0100)
committerAurelien Jarno <aurelien@aurel32.net>
Wed, 9 Feb 2011 18:53:36 +0000 (19:53 +0100)
* 'linux-user-for-upstream' of git://gitorious.org/qemu-maemo/qemu:
  linux-user: fix for loopmount ioctl
  linux-user: fix build errors for mmap2-only ports
  user: speed up init_paths a bit
  linux-user: implement sched_{g,s}etaffinity
  linux-user/FLAT: allow targets to override FLAT processing
  linux-user/FLAT: fix auto-stack sizing
  linux-user: decode MAP_{UNINITIALIZED,EXECUTABLE} in strace
  linux-user: add ppoll syscall support
  linux-user/elfload: add FDPIC support
  linux-user: fix sizeof handling for getsockopt
  linux-user: Fix possible realloc memory leak
  linux-user: Add support for -version option

target-arm/helpers.h
target-arm/neon_helper.c
target-arm/translate.c
target-mips/op_helper.c

index 8a2564ec6fa5e9e5511349a00bcd3703b972fc8f..4d0de00eab3458777473c578ae88e96edf3027ba 100644 (file)
@@ -299,10 +299,13 @@ DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
 
 DEF_HELPER_1(neon_narrow_u8, i32, i64)
 DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
 DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
 DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
index 268af33a6cb52d7a663a74d65c7d668ff49d4e1a..61890dd69a8feef67e21d2562a8dbf0c28453575 100644 (file)
@@ -1053,6 +1053,33 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
     return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
 }
 
+uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
+{
+    uint16_t s;
+    uint8_t d;
+    uint32_t res = 0;
+#define SAT8(n) \
+    s = x >> n; \
+    if (s & 0x8000) { \
+        SET_QC(); \
+    } else { \
+        if (s > 0xff) { \
+            d = 0xff; \
+            SET_QC(); \
+        } else  { \
+            d = s; \
+        } \
+        res |= (uint32_t)d << (n / 2); \
+    }
+
+    SAT8(0);
+    SAT8(16);
+    SAT8(32);
+    SAT8(48);
+#undef SAT8
+    return res;
+}
+
 uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
 {
     uint16_t s;
@@ -1099,6 +1126,29 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
     return res;
 }
 
+uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
+{
+    uint32_t high;
+    uint32_t low;
+    low = x;
+    if (low & 0x80000000) {
+        low = 0;
+        SET_QC();
+    } else if (low > 0xffff) {
+        low = 0xffff;
+        SET_QC();
+    }
+    high = x >> 32;
+    if (high & 0x80000000) {
+        high = 0;
+        SET_QC();
+    } else if (high > 0xffff) {
+        high = 0xffff;
+        SET_QC();
+    }
+    return low | (high << 16);
+}
+
 uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
 {
     uint32_t high;
@@ -1133,6 +1183,19 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
     return (uint16_t)low | (high << 16);
 }
 
+uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
+{
+    if (x & 0x8000000000000000ull) {
+        SET_QC();
+        return 0;
+    }
+    if (x > 0xffffffffu) {
+        SET_QC();
+        return 0xffffffffu;
+    }
+    return x;
+}
+
 uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
 {
     if (x > 0xffffffffu) {
@@ -1146,7 +1209,7 @@ uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
 {
     if ((int64_t)x != (int32_t)x) {
         SET_QC();
-        return (x >> 63) ^ 0x7fffffff;
+        return ((int64_t)x >> 63) ^ 0x7fffffff;
     }
     return x;
 }
index e4649e6d542bda05b797d38e8949d1ff31b4cd14..3087a5dcdd999dc87fad7a4a1595b9b755ee03cb 100644 (file)
@@ -4065,6 +4065,16 @@ static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
     }
 }
 
+static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
+{
+    switch (size) {
+    case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+    case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+    case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
+    default: abort();
+    }
+}
+
 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
                                          int q, int u)
 {
@@ -4688,7 +4698,19 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
                         } else if (op == 4 || (op == 5 && u)) {
                             /* Insert */
-                            cpu_abort(env, "VS[LR]I.64 not implemented");
+                            neon_load_reg64(cpu_V1, rd + pass);
+                            uint64_t mask;
+                            if (shift < -63 || shift > 63) {
+                                mask = 0;
+                            } else {
+                                if (op == 4) {
+                                    mask = 0xffffffffffffffffull >> -shift;
+                                } else {
+                                    mask = 0xffffffffffffffffull << shift;
+                                }
+                            }
+                            tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
+                            tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
                         }
                         neon_store_reg64(cpu_V0, rd + pass);
                     } else { /* size < 3 */
@@ -4870,16 +4892,28 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         /* The shift is less than the width of the source
                            type, so we can just shift the whole register.  */
                         tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
+                        /* Widen the result of shift: we need to clear
+                         * the potential overflow bits resulting from
+                         * left bits of the narrow input appearing as
+                         * right bits of left the neighbour narrow
+                         * input.  */
                         if (size < 2 || !u) {
                             uint64_t imm64;
                             if (size == 0) {
                                 imm = (0xffu >> (8 - shift));
                                 imm |= imm << 16;
-                            } else {
+                            } else if (size == 1) {
                                 imm = 0xffff >> (16 - shift);
+                            } else {
+                                /* size == 2 */
+                                imm = 0xffffffff >> (32 - shift);
+                            }
+                            if (size < 2) {
+                                imm64 = imm | (((uint64_t)imm) << 32);
+                            } else {
+                                imm64 = imm;
                             }
-                            imm64 = imm | (((uint64_t)imm) << 32);
-                            tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
+                            tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
                         }
                     }
                     neon_store_reg64(cpu_V0, rd + pass);
@@ -5461,12 +5495,18 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     for (pass = 0; pass < 2; pass++) {
                         neon_load_reg64(cpu_V0, rm + pass);
                         tmp = new_tmp();
-                        if (op == 36 && q == 0) {
-                            gen_neon_narrow(size, tmp, cpu_V0);
-                        } else if (q) {
-                            gen_neon_narrow_satu(size, tmp, cpu_V0);
-                        } else {
-                            gen_neon_narrow_sats(size, tmp, cpu_V0);
+                        if (op == 36) {
+                            if (q) { /* VQMOVUN */
+                                gen_neon_unarrow_sats(size, tmp, cpu_V0);
+                            } else { /* VMOVN */
+                                gen_neon_narrow(size, tmp, cpu_V0);
+                            }
+                        } else { /* VQMOVN */
+                            if (q) {
+                                gen_neon_narrow_satu(size, tmp, cpu_V0);
+                            } else {
+                                gen_neon_narrow_sats(size, tmp, cpu_V0);
+                            }
                         }
                         if (pass == 0) {
                             tmp2 = tmp;
index 669faf17ae17be5d6418cbfb5db26a75a5db4b3d..bd16ce35430cdbe884369fc7ca9b8507ec6f545a 100644 (file)
@@ -2282,6 +2282,7 @@ uint64_t helper_float_roundl_d(uint64_t fdt0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2295,6 +2296,7 @@ uint64_t helper_float_roundl_s(uint32_t fst0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2308,6 +2310,7 @@ uint32_t helper_float_roundw_d(uint64_t fdt0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2321,6 +2324,7 @@ uint32_t helper_float_roundw_s(uint32_t fst0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2334,6 +2338,7 @@ uint64_t helper_float_truncl_d(uint64_t fdt0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
     update_fcr31();
     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
@@ -2345,6 +2350,7 @@ uint64_t helper_float_truncl_s(uint32_t fst0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
     update_fcr31();
     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
@@ -2356,6 +2362,7 @@ uint32_t helper_float_truncw_d(uint64_t fdt0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
     update_fcr31();
     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
@@ -2367,6 +2374,7 @@ uint32_t helper_float_truncw_s(uint32_t fst0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
     update_fcr31();
     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
@@ -2378,6 +2386,7 @@ uint64_t helper_float_ceill_d(uint64_t fdt0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2391,6 +2400,7 @@ uint64_t helper_float_ceill_s(uint32_t fst0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2404,6 +2414,7 @@ uint32_t helper_float_ceilw_d(uint64_t fdt0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2417,6 +2428,7 @@ uint32_t helper_float_ceilw_s(uint32_t fst0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2430,6 +2442,7 @@ uint64_t helper_float_floorl_d(uint64_t fdt0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2443,6 +2456,7 @@ uint64_t helper_float_floorl_s(uint32_t fst0)
 {
     uint64_t dt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2456,6 +2470,7 @@ uint32_t helper_float_floorw_d(uint64_t fdt0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;
@@ -2469,6 +2484,7 @@ uint32_t helper_float_floorw_s(uint32_t fst0)
 {
     uint32_t wt2;
 
+    set_float_exception_flags(0, &env->active_fpu.fp_status);
     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
     RESTORE_ROUNDING_MODE;