]> git.proxmox.com Git - qemu.git/blobdiff - target-arm/translate.c
target-arm: convert disas_neon_data_insn and helpers not to use cpu_T
[qemu.git] / target-arm / translate.c
index 935cfe73efdb0cbb66dcb0fa963195bda2370feb..228d8569631e0720743174128426df8b11b1d6aa 100644 (file)
@@ -109,44 +109,20 @@ void arm_translate_init(void)
 #include "helpers.h"
 }
 
-/* The code generator doesn't like lots of temporaries, so maintain our own
-   cache for reuse within a function.  */
-#define MAX_TEMPS 8
 static int num_temps;
-static TCGv temps[MAX_TEMPS];
 
 /* Allocate a temporary variable.  */
 static TCGv_i32 new_tmp(void)
 {
-    TCGv tmp;
-    if (num_temps == MAX_TEMPS)
-        abort();
-
-    if (GET_TCGV_I32(temps[num_temps]))
-      return temps[num_temps++];
-
-    tmp = tcg_temp_new_i32();
-    temps[num_temps++] = tmp;
-    return tmp;
+    num_temps++;
+    return tcg_temp_new_i32();
 }
 
 /* Release a temporary variable.  */
 static void dead_tmp(TCGv tmp)
 {
-    int i;
+    tcg_temp_free(tmp);
     num_temps--;
-    i = num_temps;
-    if (TCGV_EQUAL(temps[i], tmp))
-        return;
-
-    /* Shuffle this temp to the last slot.  */
-    while (!TCGV_EQUAL(temps[i], tmp))
-        i--;
-    while (i < num_temps) {
-        temps[i] = temps[i + 1];
-        i++;
-    }
-    temps[i] = tmp;
 }
 
 static inline TCGv load_cpu_offset(int offset)
@@ -211,22 +187,10 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
 
 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
-
-#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
 
 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
-#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
-#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
 
 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
@@ -240,7 +204,6 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
 
-#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
 
 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
 /* Set NZCV flags from the high 4 bits of var.  */
@@ -362,17 +325,17 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
 }
 
 /* Unsigned 32x32->64 multiply.  */
-static void gen_op_mull_T0_T1(void)
+static void gen_mull(TCGv a, TCGv b)
 {
     TCGv_i64 tmp1 = tcg_temp_new_i64();
     TCGv_i64 tmp2 = tcg_temp_new_i64();
 
-    tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
-    tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
+    tcg_gen_extu_i32_i64(tmp1, a);
+    tcg_gen_extu_i32_i64(tmp2, b);
     tcg_gen_mul_i64(tmp1, tmp1, tmp2);
-    tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
+    tcg_gen_trunc_i64_i32(a, tmp1);
     tcg_gen_shri_i64(tmp1, tmp1, 32);
-    tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
+    tcg_gen_trunc_i64_i32(b, tmp1);
 }
 
 /* Signed 32x32->64 multiply.  */
@@ -438,12 +401,12 @@ static inline void gen_logic_CC(TCGv var)
 }
 
 /* T0 += T1 + CF.  */
-static void gen_adc_T0_T1(void)
+static void gen_adc(TCGv t0, TCGv t1)
 {
     TCGv tmp;
-    gen_op_addl_T0_T1();
+    tcg_gen_add_i32(t0, t0, t1);
     tmp = load_cpu_field(CF);
-    tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
+    tcg_gen_add_i32(t0, t0, tmp);
     dead_tmp(tmp);
 }
 
@@ -468,9 +431,6 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
     dead_tmp(tmp);
 }
 
-#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
-#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
-
 /* T0 &= ~T1.  Clobbers T1.  */
 /* FIXME: Implement bic natively.  */
 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
@@ -888,11 +848,6 @@ static inline void gen_movl_T1_reg(DisasContext *s, int reg)
     load_reg_var(s, cpu_T[1], reg);
 }
 
-static inline void gen_movl_T2_reg(DisasContext *s, int reg)
-{
-    load_reg_var(s, cpu_T[2], reg);
-}
-
 static inline void gen_set_pc_im(uint32_t val)
 {
     tcg_gen_movi_i32(cpu_R[15], val);
@@ -927,8 +882,7 @@ static inline void gen_movl_reg_T1(DisasContext *s, int reg)
 /* Force a TB lookup after an instruction that changes the CPU state.  */
 static inline void gen_lookup_tb(DisasContext *s)
 {
-    gen_op_movl_T0_im(s->pc);
-    gen_movl_reg_T0(s, 15);
+    tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
     s->is_jmp = DISAS_UPDATE;
 }
 
@@ -1158,14 +1112,6 @@ neon_reg_offset (int reg, int n)
     return vfp_reg_offset(0, sreg);
 }
 
-/* FIXME: Remove these.  */
-#define neon_T0 cpu_T[0]
-#define neon_T1 cpu_T[1]
-#define NEON_GET_REG(T, reg, n) \
-  tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
-#define NEON_SET_REG(T, reg, n) \
-  tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
-
 static TCGv neon_load_reg(int reg, int pass)
 {
     TCGv tmp = new_tmp();
@@ -1928,6 +1874,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
         if ((insn & 0x000ff00f) != 0x0003f000)
             return 1;
         gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
+        gen_op_movl_T0_T1();
         switch ((insn >> 22) & 3) {
         case 0:
             for (i = 0; i < 7; i ++) {
@@ -1974,6 +1921,7 @@ static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
         if ((insn & 0x000ff00f) != 0x0003f000)
             return 1;
         gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
+        gen_op_movl_T0_T1();
         switch ((insn >> 22) & 3) {
         case 0:
             for (i = 0; i < 7; i ++) {
@@ -3444,8 +3392,8 @@ static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
     return mask;
 }
 
-/* Returns nonzero if access to the PSR is not permitted.  */
-static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
+/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
+static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
 {
     TCGv tmp;
     if (spsr) {
@@ -3455,16 +3403,26 @@ static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
 
         tmp = load_cpu_field(spsr);
         tcg_gen_andi_i32(tmp, tmp, ~mask);
-        tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
-        tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
+        tcg_gen_andi_i32(t0, t0, mask);
+        tcg_gen_or_i32(tmp, tmp, t0);
         store_cpu_field(tmp, spsr);
     } else {
-        gen_set_cpsr(cpu_T[0], mask);
+        gen_set_cpsr(t0, mask);
     }
+    dead_tmp(t0);
     gen_lookup_tb(s);
     return 0;
 }
 
+/* Returns nonzero if access to the PSR is not permitted.  */
+static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
+{
+    TCGv tmp;
+    tmp = new_tmp();
+    tcg_gen_movi_i32(tmp, val);
+    return gen_set_psr(s, mask, spsr, tmp);
+}
+
 /* Generate an old-style exception return. Marks pc as dead. */
 static void gen_exception_return(DisasContext *s, TCGv pc)
 {
@@ -3511,31 +3469,25 @@ static void gen_nop_hint(DisasContext *s, int val)
     }
 }
 
-/* These macros help make the code more readable when migrating from the
-   old dyngen helpers.  They should probably be removed when
-   T0/T1 are removed.  */
-#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
-#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
-
 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
 
-static inline int gen_neon_add(int size)
+static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
 {
     switch (size) {
-    case 0: gen_helper_neon_add_u8(CPU_T001); break;
-    case 1: gen_helper_neon_add_u16(CPU_T001); break;
-    case 2: gen_op_addl_T0_T1(); break;
+    case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
+    case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
+    case 2: tcg_gen_add_i32(t0, t0, t1); break;
     default: return 1;
     }
     return 0;
 }
 
-static inline void gen_neon_rsb(int size)
+static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
 {
     switch (size) {
-    case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
-    case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
-    case 2: gen_op_rsbl_T0_T1(); break;
+    case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
+    case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
+    case 2: tcg_gen_sub_i32(t0, t1, t0); break;
     default: return;
     }
 }
@@ -3555,22 +3507,22 @@ static inline void gen_neon_rsb(int size)
 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
     switch ((size << 1) | u) { \
     case 0: \
-        gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
         break; \
     case 1: \
-        gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
         break; \
     case 2: \
-        gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
         break; \
     case 3: \
-        gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
         break; \
     case 4: \
-        gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
         break; \
     case 5: \
-        gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
         break; \
     default: return 1; \
     }} while (0)
@@ -3578,93 +3530,207 @@ static inline void gen_neon_rsb(int size)
 #define GEN_NEON_INTEGER_OP(name) do { \
     switch ((size << 1) | u) { \
     case 0: \
-        gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
         break; \
     case 1: \
-        gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
         break; \
     case 2: \
-        gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
         break; \
     case 3: \
-        gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
         break; \
     case 4: \
-        gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
         break; \
     case 5: \
-        gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
+        gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
         break; \
     default: return 1; \
     }} while (0)
 
-static inline void
-gen_neon_movl_scratch_T0(int scratch)
+static TCGv neon_load_scratch(int scratch)
 {
-  uint32_t offset;
-
-  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
+    TCGv tmp = new_tmp();
+    tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
+    return tmp;
 }
 
-static inline void
-gen_neon_movl_scratch_T1(int scratch)
+static void neon_store_scratch(int scratch, TCGv var)
 {
-  uint32_t offset;
+    tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
+    dead_tmp(var);
+}
 
-  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
+static inline TCGv neon_get_scalar(int size, int reg)
+{
+    TCGv tmp;
+    if (size == 1) {
+        tmp = neon_load_reg(reg >> 1, reg & 1);
+    } else {
+        tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
+        if (reg & 1) {
+            gen_neon_dup_low16(tmp);
+        } else {
+            gen_neon_dup_high16(tmp);
+        }
+    }
+    return tmp;
 }
 
-static inline void
-gen_neon_movl_T0_scratch(int scratch)
+static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
 {
-  uint32_t offset;
+    TCGv rd, rm, tmp;
 
-  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
+    rd = new_tmp();
+    rm = new_tmp();
+    tmp = new_tmp();
+
+    tcg_gen_andi_i32(rd, t0, 0xff);
+    tcg_gen_shri_i32(tmp, t0, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff00);
+    tcg_gen_or_i32(rd, rd, tmp);
+    tcg_gen_shli_i32(tmp, t1, 16);
+    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
+    tcg_gen_or_i32(rd, rd, tmp);
+    tcg_gen_shli_i32(tmp, t1, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
+    tcg_gen_or_i32(rd, rd, tmp);
+
+    tcg_gen_shri_i32(rm, t0, 8);
+    tcg_gen_andi_i32(rm, rm, 0xff);
+    tcg_gen_shri_i32(tmp, t0, 16);
+    tcg_gen_andi_i32(tmp, tmp, 0xff00);
+    tcg_gen_or_i32(rm, rm, tmp);
+    tcg_gen_shli_i32(tmp, t1, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
+    tcg_gen_or_i32(rm, rm, tmp);
+    tcg_gen_andi_i32(tmp, t1, 0xff000000);
+    tcg_gen_or_i32(t1, rm, tmp);
+    tcg_gen_mov_i32(t0, rd);
+
+    dead_tmp(tmp);
+    dead_tmp(rm);
+    dead_tmp(rd);
 }
 
-static inline void
-gen_neon_movl_T1_scratch(int scratch)
+static void gen_neon_zip_u8(TCGv t0, TCGv t1)
 {
-  uint32_t offset;
+    TCGv rd, rm, tmp;
+
+    rd = new_tmp();
+    rm = new_tmp();
+    tmp = new_tmp();
+
+    tcg_gen_andi_i32(rd, t0, 0xff);
+    tcg_gen_shli_i32(tmp, t1, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff00);
+    tcg_gen_or_i32(rd, rd, tmp);
+    tcg_gen_shli_i32(tmp, t0, 16);
+    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
+    tcg_gen_or_i32(rd, rd, tmp);
+    tcg_gen_shli_i32(tmp, t1, 24);
+    tcg_gen_andi_i32(tmp, tmp, 0xff000000);
+    tcg_gen_or_i32(rd, rd, tmp);
+
+    tcg_gen_andi_i32(rm, t1, 0xff000000);
+    tcg_gen_shri_i32(tmp, t0, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff0000);
+    tcg_gen_or_i32(rm, rm, tmp);
+    tcg_gen_shri_i32(tmp, t1, 8);
+    tcg_gen_andi_i32(tmp, tmp, 0xff00);
+    tcg_gen_or_i32(rm, rm, tmp);
+    tcg_gen_shri_i32(tmp, t0, 16);
+    tcg_gen_andi_i32(tmp, tmp, 0xff);
+    tcg_gen_or_i32(t1, rm, tmp);
+    tcg_gen_mov_i32(t0, rd);
 
-  offset = offsetof(CPUARMState, vfp.scratch[scratch]);
-  tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
+    dead_tmp(tmp);
+    dead_tmp(rm);
+    dead_tmp(rd);
 }
 
-static inline void gen_neon_get_scalar(int size, int reg)
+static void gen_neon_zip_u16(TCGv t0, TCGv t1)
 {
-    if (size == 1) {
-        NEON_GET_REG(T0, reg >> 1, reg & 1);
-    } else {
-        NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
-        if (reg & 1)
-            gen_neon_dup_low16(cpu_T[0]);
-        else
-            gen_neon_dup_high16(cpu_T[0]);
-    }
+    TCGv tmp, tmp2;
+
+    tmp = new_tmp();
+    tmp2 = new_tmp();
+
+    tcg_gen_andi_i32(tmp, t0, 0xffff);
+    tcg_gen_shli_i32(tmp2, t1, 16);
+    tcg_gen_or_i32(tmp, tmp, tmp2);
+    tcg_gen_andi_i32(t1, t1, 0xffff0000);
+    tcg_gen_shri_i32(tmp2, t0, 16);
+    tcg_gen_or_i32(t1, t1, tmp2);
+    tcg_gen_mov_i32(t0, tmp);
+
+    dead_tmp(tmp2);
+    dead_tmp(tmp);
 }
 
 static void gen_neon_unzip(int reg, int q, int tmp, int size)
 {
     int n;
+    TCGv t0, t1;
 
     for (n = 0; n < q + 1; n += 2) {
-        NEON_GET_REG(T0, reg, n);
-        NEON_GET_REG(T0, reg, n + n);
+        t0 = neon_load_reg(reg, n);
+        t1 = neon_load_reg(reg, n + 1);
         switch (size) {
-        case 0: gen_helper_neon_unzip_u8(); break;
-        case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same.  */
+        case 0: gen_neon_unzip_u8(t0, t1); break;
+        case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same.  */
         case 2: /* no-op */; break;
         default: abort();
         }
-        gen_neon_movl_scratch_T0(tmp + n);
-        gen_neon_movl_scratch_T1(tmp + n + 1);
+        neon_store_scratch(tmp + n, t0);
+        neon_store_scratch(tmp + n + 1, t1);
     }
 }
 
+static void gen_neon_trn_u8(TCGv t0, TCGv t1)
+{
+    TCGv rd, tmp;
+
+    rd = new_tmp();
+    tmp = new_tmp();
+
+    tcg_gen_shli_i32(rd, t0, 8);
+    tcg_gen_andi_i32(rd, rd, 0xff00ff00);
+    tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
+    tcg_gen_or_i32(rd, rd, tmp);
+
+    tcg_gen_shri_i32(t1, t1, 8);
+    tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
+    tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
+    tcg_gen_or_i32(t1, t1, tmp);
+    tcg_gen_mov_i32(t0, rd);
+
+    dead_tmp(tmp);
+    dead_tmp(rd);
+}
+
+static void gen_neon_trn_u16(TCGv t0, TCGv t1)
+{
+    TCGv rd, tmp;
+
+    rd = new_tmp();
+    tmp = new_tmp();
+
+    tcg_gen_shli_i32(rd, t0, 16);
+    tcg_gen_andi_i32(tmp, t1, 0xffff);
+    tcg_gen_or_i32(rd, rd, tmp);
+    tcg_gen_shri_i32(t1, t1, 16);
+    tcg_gen_andi_i32(tmp, t0, 0xffff0000);
+    tcg_gen_or_i32(t1, t1, tmp);
+    tcg_gen_mov_i32(t0, rd);
+
+    dead_tmp(tmp);
+    dead_tmp(rd);
+}
+
+
 static struct {
     int nregs;
     int interleave;
@@ -4053,10 +4119,6 @@ static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
         break;
     default: abort();
     }
-    if (size < 2) {
-        dead_tmp(b);
-        dead_tmp(a);
-    }
 }
 
 /* Translate a NEON data processing instruction.  Return nonzero if the
@@ -4189,6 +4251,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             pairwise = 0;
             break;
         }
+
         for (pass = 0; pass < (q ? 4 : 2); pass++) {
 
         if (pairwise) {
@@ -4198,16 +4261,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             else
                 n = 0;
             if (pass < q + 1) {
-                NEON_GET_REG(T0, rn, n);
-                NEON_GET_REG(T1, rn, n + 1);
+                tmp = neon_load_reg(rn, n);
+                tmp2 = neon_load_reg(rn, n + 1);
             } else {
-                NEON_GET_REG(T0, rm, n);
-                NEON_GET_REG(T1, rm, n + 1);
+                tmp = neon_load_reg(rm, n);
+                tmp2 = neon_load_reg(rm, n + 1);
             }
         } else {
             /* Elementwise.  */
-            NEON_GET_REG(T0, rn, pass);
-            NEON_GET_REG(T1, rm, pass);
+            tmp = neon_load_reg(rn, pass);
+            tmp2 = neon_load_reg(rm, pass);
         }
         switch (op) {
         case 0: /* VHADD */
@@ -4222,35 +4285,35 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 3: /* Logic ops.  */
             switch ((u << 2) | size) {
             case 0: /* VAND */
-                gen_op_andl_T0_T1();
+                tcg_gen_and_i32(tmp, tmp, tmp2);
                 break;
             case 1: /* BIC */
-                gen_op_bicl_T0_T1();
+                tcg_gen_bic_i32(tmp, tmp, tmp2);
                 break;
             case 2: /* VORR */
-                gen_op_orl_T0_T1();
+                tcg_gen_or_i32(tmp, tmp, tmp2);
                 break;
             case 3: /* VORN */
-                gen_op_notl_T1();
-                gen_op_orl_T0_T1();
+                tcg_gen_not_i32(tmp2, tmp2);
+                tcg_gen_or_i32(tmp, tmp, tmp2);
                 break;
             case 4: /* VEOR */
-                gen_op_xorl_T0_T1();
+                tcg_gen_xor_i32(tmp, tmp, tmp2);
                 break;
             case 5: /* VBSL */
-                tmp = neon_load_reg(rd, pass);
-                gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
-                dead_tmp(tmp);
+                tmp3 = neon_load_reg(rd, pass);
+                gen_neon_bsl(tmp, tmp, tmp2, tmp3);
+                dead_tmp(tmp3);
                 break;
             case 6: /* VBIT */
-                tmp = neon_load_reg(rd, pass);
-                gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
-                dead_tmp(tmp);
+                tmp3 = neon_load_reg(rd, pass);
+                gen_neon_bsl(tmp, tmp, tmp3, tmp2);
+                dead_tmp(tmp3);
                 break;
             case 7: /* VBIF */
-                tmp = neon_load_reg(rd, pass);
-                gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
-                dead_tmp(tmp);
+                tmp3 = neon_load_reg(rd, pass);
+                gen_neon_bsl(tmp, tmp3, tmp, tmp2);
+                dead_tmp(tmp3);
                 break;
             }
             break;
@@ -4289,18 +4352,19 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             break;
         case 15: /* VABA */
             GEN_NEON_INTEGER_OP(abd);
-            NEON_GET_REG(T1, rd, pass);
-            gen_neon_add(size);
+            dead_tmp(tmp2);
+            tmp2 = neon_load_reg(rd, pass);
+            gen_neon_add(size, tmp, tmp2);
             break;
         case 16:
             if (!u) { /* VADD */
-                if (gen_neon_add(size))
+                if (gen_neon_add(size, tmp, tmp2))
                     return 1;
             } else { /* VSUB */
                 switch (size) {
-                case 0: gen_helper_neon_sub_u8(CPU_T001); break;
-                case 1: gen_helper_neon_sub_u16(CPU_T001); break;
-                case 2: gen_op_subl_T0_T1(); break;
+                case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
+                case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
+                case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
                 default: return 1;
                 }
             }
@@ -4308,42 +4372,43 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 17:
             if (!u) { /* VTST */
                 switch (size) {
-                case 0: gen_helper_neon_tst_u8(CPU_T001); break;
-                case 1: gen_helper_neon_tst_u16(CPU_T001); break;
-                case 2: gen_helper_neon_tst_u32(CPU_T001); break;
+                case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
+                case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
+                case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
                 default: return 1;
                 }
             } else { /* VCEQ */
                 switch (size) {
-                case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
-                case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
-                case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
+                case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
+                case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
+                case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
                 default: return 1;
                 }
             }
             break;
         case 18: /* Multiply.  */
             switch (size) {
-            case 0: gen_helper_neon_mul_u8(CPU_T001); break;
-            case 1: gen_helper_neon_mul_u16(CPU_T001); break;
-            case 2: gen_op_mul_T0_T1(); break;
+            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
             default: return 1;
             }
-            NEON_GET_REG(T1, rd, pass);
+            dead_tmp(tmp2);
+            tmp2 = neon_load_reg(rd, pass);
             if (u) { /* VMLS */
-                gen_neon_rsb(size);
+                gen_neon_rsb(size, tmp, tmp2);
             } else { /* VMLA */
-                gen_neon_add(size);
+                gen_neon_add(size, tmp, tmp2);
             }
             break;
         case 19: /* VMUL */
             if (u) { /* polynomial */
-                gen_helper_neon_mul_p8(CPU_T001);
+                gen_helper_neon_mul_p8(tmp, tmp, tmp2);
             } else { /* Integer */
                 switch (size) {
-                case 0: gen_helper_neon_mul_u8(CPU_T001); break;
-                case 1: gen_helper_neon_mul_u16(CPU_T001); break;
-                case 2: gen_op_mul_T0_T1(); break;
+                case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+                case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+                case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
                 default: return 1;
                 }
             }
@@ -4357,14 +4422,14 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
         case 22: /* Hultiply high.  */
             if (!u) { /* VQDMULH */
                 switch (size) {
-                case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
-                case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
+                case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
+                case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
                 default: return 1;
                 }
             } else { /* VQRDHMUL */
                 switch (size) {
-                case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
-                case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
+                case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
+                case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
                 default: return 1;
                 }
             }
@@ -4373,88 +4438,91 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             if (u)
                 return 1;
             switch (size) {
-            case 0: gen_helper_neon_padd_u8(CPU_T001); break;
-            case 1: gen_helper_neon_padd_u16(CPU_T001); break;
-            case 2: gen_op_addl_T0_T1(); break;
+            case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
+            case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
+            case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
             default: return 1;
             }
             break;
         case 26: /* Floating point arithnetic.  */
             switch ((u << 2) | size) {
             case 0: /* VADD */
-                gen_helper_neon_add_f32(CPU_T001);
+                gen_helper_neon_add_f32(tmp, tmp, tmp2);
                 break;
             case 2: /* VSUB */
-                gen_helper_neon_sub_f32(CPU_T001);
+                gen_helper_neon_sub_f32(tmp, tmp, tmp2);
                 break;
             case 4: /* VPADD */
-                gen_helper_neon_add_f32(CPU_T001);
+                gen_helper_neon_add_f32(tmp, tmp, tmp2);
                 break;
             case 6: /* VABD */
-                gen_helper_neon_abd_f32(CPU_T001);
+                gen_helper_neon_abd_f32(tmp, tmp, tmp2);
                 break;
             default:
                 return 1;
             }
             break;
         case 27: /* Float multiply.  */
-            gen_helper_neon_mul_f32(CPU_T001);
+            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
             if (!u) {
-                NEON_GET_REG(T1, rd, pass);
+                dead_tmp(tmp2);
+                tmp2 = neon_load_reg(rd, pass);
                 if (size == 0) {
-                    gen_helper_neon_add_f32(CPU_T001);
+                    gen_helper_neon_add_f32(tmp, tmp, tmp2);
                 } else {
-                    gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
+                    gen_helper_neon_sub_f32(tmp, tmp2, tmp);
                 }
             }
             break;
         case 28: /* Float compare.  */
             if (!u) {
-                gen_helper_neon_ceq_f32(CPU_T001);
+                gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
             } else {
                 if (size == 0)
-                    gen_helper_neon_cge_f32(CPU_T001);
+                    gen_helper_neon_cge_f32(tmp, tmp, tmp2);
                 else
-                    gen_helper_neon_cgt_f32(CPU_T001);
+                    gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
             }
             break;
         case 29: /* Float compare absolute.  */
             if (!u)
                 return 1;
             if (size == 0)
-                gen_helper_neon_acge_f32(CPU_T001);
+                gen_helper_neon_acge_f32(tmp, tmp, tmp2);
             else
-                gen_helper_neon_acgt_f32(CPU_T001);
+                gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
             break;
         case 30: /* Float min/max.  */
             if (size == 0)
-                gen_helper_neon_max_f32(CPU_T001);
+                gen_helper_neon_max_f32(tmp, tmp, tmp2);
             else
-                gen_helper_neon_min_f32(CPU_T001);
+                gen_helper_neon_min_f32(tmp, tmp, tmp2);
             break;
         case 31:
             if (size == 0)
-                gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
+                gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
             else
-                gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
+                gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
             break;
         default:
             abort();
         }
+        dead_tmp(tmp2);
+
         /* Save the result.  For elementwise operations we can put it
            straight into the destination register.  For pairwise operations
            we have to be careful to avoid clobbering the source operands.  */
         if (pairwise && rd == rm) {
-            gen_neon_movl_scratch_T0(pass);
+            neon_store_scratch(pass, tmp);
         } else {
-            NEON_SET_REG(T0, rd, pass);
+            neon_store_reg(rd, pass, tmp);
         }
 
         } /* for pass */
         if (pairwise && rd == rm) {
             for (pass = 0; pass < (q ? 4 : 2); pass++) {
-                gen_neon_movl_T0_scratch(pass);
-                NEON_SET_REG(T0, rd, pass);
+                tmp = neon_load_scratch(pass);
+                neon_store_reg(rd, pass, tmp);
             }
         }
         /* End of 3 register same size operations.  */
@@ -4551,8 +4619,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         neon_store_reg64(cpu_V0, rd + pass);
                     } else { /* size < 3 */
                         /* Operands in T0 and T1.  */
-                        gen_op_movl_T1_im(imm);
-                        NEON_GET_REG(T0, rm, pass);
+                        tmp = neon_load_reg(rm, pass);
+                        tmp2 = new_tmp();
+                        tcg_gen_movi_i32(tmp2, imm);
                         switch (op) {
                         case 0:  /* VSHR */
                         case 1:  /* VSRA */
@@ -4569,9 +4638,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             break;
                         case 5: /* VSHL, VSLI */
                             switch (size) {
-                            case 0: gen_helper_neon_shl_u8(CPU_T001); break;
-                            case 1: gen_helper_neon_shl_u16(CPU_T001); break;
-                            case 2: gen_helper_neon_shl_u32(CPU_T001); break;
+                            case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
+                            case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
+                            case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
                             default: return 1;
                             }
                             break;
@@ -4580,18 +4649,20 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             break;
                         case 7: /* VQSHLU */
                             switch (size) {
-                            case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
-                            case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
-                            case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
+                            case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
+                            case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
+                            case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
                             default: return 1;
                             }
                             break;
                         }
+                        dead_tmp(tmp2);
 
                         if (op == 1 || op == 3) {
                             /* Accumulate.  */
-                            NEON_GET_REG(T1, rd, pass);
-                            gen_neon_add(size);
+                            tmp2 = neon_load_reg(rd, pass);
+                            gen_neon_add(size, tmp2, tmp);
+                            dead_tmp(tmp2);
                         } else if (op == 4 || (op == 5 && u)) {
                             /* Insert */
                             switch (size) {
@@ -4619,12 +4690,13 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             default:
                                 abort();
                             }
-                            tmp = neon_load_reg(rd, pass);
-                            tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
-                            tcg_gen_andi_i32(tmp, tmp, ~imm);
-                            tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
+                            tmp2 = neon_load_reg(rd, pass);
+                            tcg_gen_andi_i32(tmp, tmp, imm);
+                            tcg_gen_andi_i32(tmp2, tmp2, ~imm);
+                            tcg_gen_or_i32(tmp, tmp, tmp2);
+                            dead_tmp(tmp2);
                         }
-                        NEON_SET_REG(T0, rd, pass);
+                        neon_store_reg(rd, pass, tmp);
                     }
                 } /* for pass */
             } else if (op < 10) {
@@ -4786,9 +4858,6 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             if (invert)
                 imm = ~imm;
 
-            if (op != 14 || !invert)
-                gen_op_movl_T1_im(imm);
-
             for (pass = 0; pass < (q ? 4 : 2); pass++) {
                 if (op & 1 && op < 12) {
                     tmp = neon_load_reg(rd, pass);
@@ -4855,11 +4924,11 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                    always aligned so will never overlap with wide
                    destinations in problematic ways.  */
                 if (rd == rm && !src2_wide) {
-                    NEON_GET_REG(T0, rm, 1);
-                    gen_neon_movl_scratch_T0(2);
+                    tmp = neon_load_reg(rm, 1);
+                    neon_store_scratch(2, tmp);
                 } else if (rd == rn && !src1_wide) {
-                    NEON_GET_REG(T0, rn, 1);
-                    gen_neon_movl_scratch_T0(2);
+                    tmp = neon_load_reg(rn, 1);
+                    neon_store_scratch(2, tmp);
                 }
                 TCGV_UNUSED(tmp3);
                 for (pass = 0; pass < 2; pass++) {
@@ -4868,9 +4937,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         TCGV_UNUSED(tmp);
                     } else {
                         if (pass == 1 && rd == rn) {
-                            gen_neon_movl_T0_scratch(2);
-                            tmp = new_tmp();
-                            tcg_gen_mov_i32(tmp, cpu_T[0]);
+                            tmp = neon_load_scratch(2);
                         } else {
                             tmp = neon_load_reg(rn, pass);
                         }
@@ -4883,9 +4950,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         TCGV_UNUSED(tmp2);
                     } else {
                         if (pass == 1 && rd == rm) {
-                            gen_neon_movl_T0_scratch(2);
-                            tmp2 = new_tmp();
-                            tcg_gen_mov_i32(tmp2, cpu_T[0]);
+                            tmp2 = neon_load_scratch(2);
                         } else {
                             tmp2 = neon_load_reg(rm, pass);
                         }
@@ -4928,6 +4993,8 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     case 8: case 9: case 10: case 11: case 12: case 13:
                         /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
                         gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+                        dead_tmp(tmp2);
+                        dead_tmp(tmp);
                         break;
                     case 14: /* Polynomial VMULL */
                         cpu_abort(env, "Polynomial VMULL not implemented");
@@ -5016,55 +5083,56 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 case 9: /* Floating point VMUL scalar */
                 case 12: /* VQDMULH scalar */
                 case 13: /* VQRDMULH scalar */
-                    gen_neon_get_scalar(size, rm);
-                    gen_neon_movl_scratch_T0(0);
+                    tmp = neon_get_scalar(size, rm);
+                    neon_store_scratch(0, tmp);
                     for (pass = 0; pass < (u ? 4 : 2); pass++) {
-                        if (pass != 0)
-                            gen_neon_movl_T0_scratch(0);
-                        NEON_GET_REG(T1, rn, pass);
+                        tmp = neon_load_scratch(0);
+                        tmp2 = neon_load_reg(rn, pass);
                         if (op == 12) {
                             if (size == 1) {
-                                gen_helper_neon_qdmulh_s16(CPU_T0E01);
+                                gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
                             } else {
-                                gen_helper_neon_qdmulh_s32(CPU_T0E01);
+                                gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
                             }
                         } else if (op == 13) {
                             if (size == 1) {
-                                gen_helper_neon_qrdmulh_s16(CPU_T0E01);
+                                gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
                             } else {
-                                gen_helper_neon_qrdmulh_s32(CPU_T0E01);
+                                gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
                             }
                         } else if (op & 1) {
-                            gen_helper_neon_mul_f32(CPU_T001);
+                            gen_helper_neon_mul_f32(tmp, tmp, tmp2);
                         } else {
                             switch (size) {
-                            case 0: gen_helper_neon_mul_u8(CPU_T001); break;
-                            case 1: gen_helper_neon_mul_u16(CPU_T001); break;
-                            case 2: gen_op_mul_T0_T1(); break;
+                            case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+                            case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+                            case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
                             default: return 1;
                             }
                         }
+                        dead_tmp(tmp2);
                         if (op < 8) {
                             /* Accumulate.  */
-                            NEON_GET_REG(T1, rd, pass);
+                            tmp2 = neon_load_reg(rd, pass);
                             switch (op) {
                             case 0:
-                                gen_neon_add(size);
+                                gen_neon_add(size, tmp, tmp2);
                                 break;
                             case 1:
-                                gen_helper_neon_add_f32(CPU_T001);
+                                gen_helper_neon_add_f32(tmp, tmp, tmp2);
                                 break;
                             case 4:
-                                gen_neon_rsb(size);
+                                gen_neon_rsb(size, tmp, tmp2);
                                 break;
                             case 5:
-                                gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
+                                gen_helper_neon_sub_f32(tmp, tmp2, tmp);
                                 break;
                             default:
                                 abort();
                             }
+                            dead_tmp(tmp2);
                         }
-                        NEON_SET_REG(T0, rd, pass);
+                        neon_store_reg(rd, pass, tmp);
                     }
                     break;
                 case 2: /* VMLAL sclar */
@@ -5076,19 +5144,17 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     if (size == 0 && (op == 3 || op == 7 || op == 11))
                         return 1;
 
-                    gen_neon_get_scalar(size, rm);
-                    NEON_GET_REG(T1, rn, 1);
+                    tmp2 = neon_get_scalar(size, rm);
+                    tmp3 = neon_load_reg(rn, 1);
 
                     for (pass = 0; pass < 2; pass++) {
                         if (pass == 0) {
                             tmp = neon_load_reg(rn, 0);
                         } else {
-                            tmp = new_tmp();
-                            tcg_gen_mov_i32(tmp, cpu_T[1]);
+                            tmp = tmp3;
                         }
-                        tmp2 = new_tmp();
-                        tcg_gen_mov_i32(tmp2, cpu_T[0]);
                         gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+                        dead_tmp(tmp);
                         if (op == 6 || op == 7) {
                             gen_neon_negl(cpu_V0, size);
                         }
@@ -5114,6 +5180,9 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         }
                         neon_store_reg64(cpu_V0, rd + pass);
                     }
+
+                    dead_tmp(tmp2);
+
                     break;
                 default: /* 14 and 15 are RESERVED */
                     return 1;
@@ -5180,25 +5249,24 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                     if (size == 3)
                         return 1;
                     for (pass = 0; pass < (q ? 2 : 1); pass++) {
-                        NEON_GET_REG(T0, rm, pass * 2);
-                        NEON_GET_REG(T1, rm, pass * 2 + 1);
+                        tmp = neon_load_reg(rm, pass * 2);
+                        tmp2 = neon_load_reg(rm, pass * 2 + 1);
                         switch (size) {
-                        case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
-                        case 1: gen_swap_half(cpu_T[0]); break;
+                        case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
+                        case 1: gen_swap_half(tmp); break;
                         case 2: /* no-op */ break;
                         default: abort();
                         }
-                        NEON_SET_REG(T0, rd, pass * 2 + 1);
+                        neon_store_reg(rd, pass * 2 + 1, tmp);
                         if (size == 2) {
-                            NEON_SET_REG(T1, rd, pass * 2);
+                            neon_store_reg(rd, pass * 2, tmp2);
                         } else {
-                            gen_op_movl_T0_T1();
                             switch (size) {
-                            case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
-                            case 1: gen_swap_half(cpu_T[0]); break;
+                            case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
+                            case 1: gen_swap_half(tmp2); break;
                             default: abort();
                             }
-                            NEON_SET_REG(T0, rd, pass * 2);
+                            neon_store_reg(rd, pass * 2, tmp2);
                         }
                     }
                     break;
@@ -5228,10 +5296,10 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                 case 33: /* VTRN */
                     if (size == 2) {
                         for (n = 0; n < (q ? 4 : 2); n += 2) {
-                            NEON_GET_REG(T0, rm, n);
-                            NEON_GET_REG(T1, rd, n + 1);
-                            NEON_SET_REG(T1, rm, n);
-                            NEON_SET_REG(T0, rd, n + 1);
+                            tmp = neon_load_reg(rm, n);
+                            tmp2 = neon_load_reg(rd, n + 1);
+                            neon_store_reg(rm, n, tmp2);
+                            neon_store_reg(rd, n + 1, tmp);
                         }
                     } else {
                         goto elementwise;
@@ -5251,16 +5319,16 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             {0, 2, 4, 6, 1, 3, 5, 7};
                         for (n = 0; n < 8; n++) {
                             int reg = (n < 4) ? rd : rm;
-                            gen_neon_movl_T0_scratch(unzip_order_q[n]);
-                            NEON_SET_REG(T0, reg, n % 4);
+                            tmp = neon_load_scratch(unzip_order_q[n]);
+                            neon_store_reg(reg, n % 4, tmp);
                         }
                     } else {
                         static int unzip_order[4] =
                             {0, 4, 1, 5};
                         for (n = 0; n < 4; n++) {
                             int reg = (n < 2) ? rd : rm;
-                            gen_neon_movl_T0_scratch(unzip_order[n]);
-                            NEON_SET_REG(T0, reg, n % 2);
+                            tmp = neon_load_scratch(unzip_order[n]);
+                            neon_store_reg(reg, n % 2, tmp);
                         }
                     }
                     break;
@@ -5273,21 +5341,21 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         return 1;
                     count = (q ? 4 : 2);
                     for (n = 0; n < count; n++) {
-                        NEON_GET_REG(T0, rd, n);
-                        NEON_GET_REG(T1, rd, n);
+                        tmp = neon_load_reg(rd, n);
+                        tmp2 = neon_load_reg(rd, n);
                         switch (size) {
-                        case 0: gen_helper_neon_zip_u8(); break;
-                        case 1: gen_helper_neon_zip_u16(); break;
+                        case 0: gen_neon_zip_u8(tmp, tmp2); break;
+                        case 1: gen_neon_zip_u16(tmp, tmp2); break;
                         case 2: /* no-op */; break;
                         default: abort();
                         }
-                        gen_neon_movl_scratch_T0(n * 2);
-                        gen_neon_movl_scratch_T1(n * 2 + 1);
+                        neon_store_scratch(n * 2, tmp);
+                        neon_store_scratch(n * 2 + 1, tmp2);
                     }
                     for (n = 0; n < count * 2; n++) {
                         int reg = (n < count) ? rd : rm;
-                        gen_neon_movl_T0_scratch(n);
-                        NEON_SET_REG(T0, reg, n % count);
+                        tmp = neon_load_scratch(n);
+                        neon_store_reg(reg, n % count, tmp);
                     }
                     break;
                 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
@@ -5330,124 +5398,132 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                         if (op == 30 || op == 31 || op >= 58) {
                             tcg_gen_ld_f32(cpu_F0s, cpu_env,
                                            neon_reg_offset(rm, pass));
+                            TCGV_UNUSED(tmp);
                         } else {
-                            NEON_GET_REG(T0, rm, pass);
+                            tmp = neon_load_reg(rm, pass);
                         }
                         switch (op) {
                         case 1: /* VREV32 */
                             switch (size) {
-                            case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
-                            case 1: gen_swap_half(cpu_T[0]); break;
+                            case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
+                            case 1: gen_swap_half(tmp); break;
                             default: return 1;
                             }
                             break;
                         case 2: /* VREV16 */
                             if (size != 0)
                                 return 1;
-                            gen_rev16(cpu_T[0]);
+                            gen_rev16(tmp);
                             break;
                         case 8: /* CLS */
                             switch (size) {
-                            case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
-                            case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
-                            case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
+                            case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
+                            case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
+                            case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
                             default: return 1;
                             }
                             break;
                         case 9: /* CLZ */
                             switch (size) {
-                            case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
-                            case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
-                            case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
+                            case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
+                            case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
+                            case 2: gen_helper_clz(tmp, tmp); break;
                             default: return 1;
                             }
                             break;
                         case 10: /* CNT */
                             if (size != 0)
                                 return 1;
-                            gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
+                            gen_helper_neon_cnt_u8(tmp, tmp);
                             break;
                         case 11: /* VNOT */
                             if (size != 0)
                                 return 1;
-                            gen_op_notl_T0();
+                            tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 14: /* VQABS */
                             switch (size) {
-                            case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
-                            case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
-                            case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
+                            case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
+                            case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
                             default: return 1;
                             }
                             break;
                         case 15: /* VQNEG */
                             switch (size) {
-                            case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
-                            case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
-                            case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
+                            case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
+                            case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
+                            case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
                             default: return 1;
                             }
                             break;
                         case 16: case 19: /* VCGT #0, VCLE #0 */
-                            gen_op_movl_T1_im(0);
+                            tmp2 = tcg_const_i32(0);
                             switch(size) {
-                            case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
-                            case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
-                            case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
+                            case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
+                            case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
+                            case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
                             default: return 1;
                             }
+                            tcg_temp_free(tmp2);
                             if (op == 19)
-                                gen_op_notl_T0();
+                                tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 17: case 20: /* VCGE #0, VCLT #0 */
-                            gen_op_movl_T1_im(0);
+                            tmp2 = tcg_const_i32(0);
                             switch(size) {
-                            case 0: gen_helper_neon_cge_s8(CPU_T001); break;
-                            case 1: gen_helper_neon_cge_s16(CPU_T001); break;
-                            case 2: gen_helper_neon_cge_s32(CPU_T001); break;
+                            case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
+                            case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
+                            case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
                             default: return 1;
                             }
+                            tcg_temp_free(tmp2);
                             if (op == 20)
-                                gen_op_notl_T0();
+                                tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 18: /* VCEQ #0 */
-                            gen_op_movl_T1_im(0);
+                            tmp2 = tcg_const_i32(0);
                             switch(size) {
-                            case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
-                            case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
-                            case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
+                            case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
+                            case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
+                            case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
                             default: return 1;
                             }
+                            tcg_temp_free(tmp2);
                             break;
                         case 22: /* VABS */
                             switch(size) {
-                            case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
-                            case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
-                            case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
+                            case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
+                            case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
+                            case 2: tcg_gen_abs_i32(tmp, tmp); break;
                             default: return 1;
                             }
                             break;
                         case 23: /* VNEG */
-                            gen_op_movl_T1_im(0);
                             if (size == 3)
                                 return 1;
-                            gen_neon_rsb(size);
+                            tmp2 = tcg_const_i32(0);
+                            gen_neon_rsb(size, tmp, tmp2);
+                            tcg_temp_free(tmp2);
                             break;
                         case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
-                            gen_op_movl_T1_im(0);
-                            gen_helper_neon_cgt_f32(CPU_T001);
+                            tmp2 = tcg_const_i32(0);
+                            gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+                            tcg_temp_free(tmp2);
                             if (op == 27)
-                                gen_op_notl_T0();
+                                tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
-                            gen_op_movl_T1_im(0);
-                            gen_helper_neon_cge_f32(CPU_T001);
+                            tmp2 = tcg_const_i32(0);
+                            gen_helper_neon_cge_f32(tmp, tmp, tmp2);
+                            tcg_temp_free(tmp2);
                             if (op == 28)
-                                gen_op_notl_T0();
+                                tcg_gen_not_i32(tmp, tmp);
                             break;
                         case 26: /* Float VCEQ #0 */
-                            gen_op_movl_T1_im(0);
-                            gen_helper_neon_ceq_f32(CPU_T001);
+                            tmp2 = tcg_const_i32(0);
+                            gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+                            tcg_temp_free(tmp2);
                             break;
                         case 30: /* Float VABS */
                             gen_vfp_abs(0);
@@ -5456,24 +5532,24 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             gen_vfp_neg(0);
                             break;
                         case 32: /* VSWP */
-                            NEON_GET_REG(T1, rd, pass);
-                            NEON_SET_REG(T1, rm, pass);
+                            tmp2 = neon_load_reg(rd, pass);
+                            neon_store_reg(rm, pass, tmp2);
                             break;
                         case 33: /* VTRN */
-                            NEON_GET_REG(T1, rd, pass);
+                            tmp2 = neon_load_reg(rd, pass);
                             switch (size) {
-                            case 0: gen_helper_neon_trn_u8(); break;
-                            case 1: gen_helper_neon_trn_u16(); break;
+                            case 0: gen_neon_trn_u8(tmp, tmp2); break;
+                            case 1: gen_neon_trn_u16(tmp, tmp2); break;
                             case 2: abort();
                             default: return 1;
                             }
-                            NEON_SET_REG(T1, rm, pass);
+                            neon_store_reg(rm, pass, tmp2);
                             break;
                         case 56: /* Integer VRECPE */
-                            gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
+                            gen_helper_recpe_u32(tmp, tmp, cpu_env);
                             break;
                         case 57: /* Integer VRSQRTE */
-                            gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
+                            gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
                             break;
                         case 58: /* Float VRECPE */
                             gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
@@ -5501,7 +5577,7 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
                             tcg_gen_st_f32(cpu_F0s, cpu_env,
                                            neon_reg_offset(rd, pass));
                         } else {
-                            NEON_SET_REG(T0, rd, pass);
+                            neon_store_reg(rd, pass, tmp);
                         }
                     }
                     break;
@@ -5534,21 +5610,24 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
             } else if ((insn & 0x380) == 0) {
                 /* VDUP */
                 if (insn & (1 << 19)) {
-                    NEON_SET_REG(T0, rm, 1);
+                    tmp = neon_load_reg(rm, 1);
                 } else {
-                    NEON_SET_REG(T0, rm, 0);
+                    tmp = neon_load_reg(rm, 0);
                 }
                 if (insn & (1 << 16)) {
-                    gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
+                    gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
                 } else if (insn & (1 << 17)) {
                     if ((insn >> 18) & 1)
-                        gen_neon_dup_high16(cpu_T[0]);
+                        gen_neon_dup_high16(tmp);
                     else
-                        gen_neon_dup_low16(cpu_T[0]);
+                        gen_neon_dup_low16(tmp);
                 }
                 for (pass = 0; pass < (q ? 4 : 2); pass++) {
-                    NEON_SET_REG(T0, rd, pass);
+                    tmp2 = new_tmp();
+                    tcg_gen_mov_i32(tmp2, tmp);
+                    neon_store_reg(rd, pass, tmp2);
                 }
+                dead_tmp(tmp);
             } else {
                 return 1;
             }
@@ -5778,7 +5857,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
             }
         } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
             /* srs */
-            uint32_t offset;
+            int32_t offset;
             if (IS_USER(s))
                 goto illegal_op;
             ARCH(6);
@@ -5792,8 +5871,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
             i = (insn >> 23) & 3;
             switch (i) {
             case 0: offset = -4; break; /* DA */
-            case 1: offset = -8; break; /* DB */
-            case 2: offset = 0; break; /* IA */
+            case 1: offset = 0; break; /* IA */
+            case 2: offset = -8; break; /* DB */
             case 3: offset = 4; break; /* IB */
             default: abort();
             }
@@ -5801,32 +5880,32 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                 tcg_gen_addi_i32(addr, addr, offset);
             tmp = load_reg(s, 14);
             gen_st32(tmp, addr, 0);
-            tmp = new_tmp();
-            gen_helper_cpsr_read(tmp);
+            tmp = load_cpu_field(spsr);
             tcg_gen_addi_i32(addr, addr, 4);
             gen_st32(tmp, addr, 0);
             if (insn & (1 << 21)) {
                 /* Base writeback.  */
                 switch (i) {
                 case 0: offset = -8; break;
-                case 1: offset = -4; break;
-                case 2: offset = 4; break;
+                case 1: offset = 4; break;
+                case 2: offset = -4; break;
                 case 3: offset = 0; break;
                 default: abort();
                 }
                 if (offset)
-                    tcg_gen_addi_i32(addr, tmp, offset);
+                    tcg_gen_addi_i32(addr, addr, offset);
                 if (op1 == (env->uncached_cpsr & CPSR_M)) {
-                    gen_movl_reg_T1(s, 13);
+                    store_reg(s, 13, addr);
                 } else {
-                    gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
+                    gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), addr);
+                    dead_tmp(addr);
                 }
             } else {
                 dead_tmp(addr);
             }
         } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
             /* rfe */
-            uint32_t offset;
+            int32_t offset;
             if (IS_USER(s))
                 goto illegal_op;
             ARCH(6);
@@ -5835,8 +5914,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
             i = (insn >> 23) & 3;
             switch (i) {
             case 0: offset = -4; break; /* DA */
-            case 1: offset = -8; break; /* DB */
-            case 2: offset = 0; break; /* IA */
+            case 1: offset = 0; break; /* IA */
+            case 2: offset = -8; break; /* DB */
             case 3: offset = 4; break; /* IB */
             default: abort();
             }
@@ -5850,8 +5929,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                 /* Base writeback.  */
                 switch (i) {
                 case 0: offset = -8; break;
-                case 1: offset = -4; break;
-                case 2: offset = 4; break;
+                case 1: offset = 4; break;
+                case 2: offset = -4; break;
                 case 3: offset = 0; break;
                 default: abort();
                 }
@@ -5862,6 +5941,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                 dead_tmp(addr);
             }
             gen_rfe(s, tmp, tmp2);
+            return;
         } else if ((insn & 0x0e000000) == 0x0a000000) {
             /* branch link and change to thumb (blx <offset>) */
             int32_t offset;
@@ -5911,8 +5991,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                 val |= (insn & 0x1f);
             }
             if (mask) {
-                gen_op_movl_T0_im(val);
-                gen_set_psr_T0(s, mask, 0);
+                gen_set_psr_im(s, mask, 0, val);
             }
             return;
         }
@@ -5952,9 +6031,8 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                 shift = ((insn >> 8) & 0xf) * 2;
                 if (shift)
                     val = (val >> shift) | (val << (32 - shift));
-                gen_op_movl_T0_im(val);
                 i = ((insn & (1 << 22)) != 0);
-                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
+                if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
                     goto illegal_op;
             }
         }
@@ -5968,9 +6046,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
         case 0x0: /* move program status register */
             if (op1 & 1) {
                 /* PSR = reg */
-                gen_movl_T0_reg(s, rm);
+                tmp = load_reg(s, rm);
                 i = ((op1 & 2) != 0);
-                if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
+                if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
                     goto illegal_op;
             } else {
                 /* reg = PSR */
@@ -6341,10 +6419,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                             ARCH(6K);
                         else
                             ARCH(6);
-                        gen_movl_T1_reg(s, rn);
-                        addr = cpu_T[1];
+                        addr = tcg_temp_local_new_i32();
+                        tcg_gen_mov_i32(addr, cpu_R[rn]);
                         if (insn & (1 << 20)) {
-                            gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
+                            gen_helper_mark_exclusive(cpu_env, addr);
                             switch (op1) {
                             case 0: /* ldrex */
                                 tmp = gen_ld32(addr, IS_USER(s));
@@ -6369,9 +6447,9 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                         } else {
                             int label = gen_new_label();
                             rm = insn & 0xf;
-                            gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
-                            tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
-                                                0, label);
+                            tmp2 = tcg_temp_local_new_i32();
+                            gen_helper_test_exclusive(tmp2, cpu_env, addr);
+                            tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
                             tmp = load_reg(s,rm);
                             switch (op1) {
                             case 0:  /*  strex */
@@ -6393,8 +6471,10 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
                                 abort();
                             }
                             gen_set_label(label);
-                            gen_movl_reg_T0(s, rd);
+                            tcg_gen_mov_i32(cpu_R[rd], tmp2);
+                            tcg_temp_free(tmp2);
                         }
+                        tcg_temp_free(addr);
                     } else {
                         /* SWP instruction */
                         rm = (insn) & 0xf;
@@ -6950,70 +7030,70 @@ thumb2_logic_op(int op)
    Returns zero if the opcode is valid.  */
 
 static int
-gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
+gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
 {
     int logic_cc;
 
     logic_cc = 0;
     switch (op) {
     case 0: /* and */
-        gen_op_andl_T0_T1();
+        tcg_gen_and_i32(t0, t0, t1);
         logic_cc = conds;
         break;
     case 1: /* bic */
-        gen_op_bicl_T0_T1();
+        tcg_gen_bic_i32(t0, t0, t1);
         logic_cc = conds;
         break;
     case 2: /* orr */
-        gen_op_orl_T0_T1();
+        tcg_gen_or_i32(t0, t0, t1);
         logic_cc = conds;
         break;
     case 3: /* orn */
-        gen_op_notl_T1();
-        gen_op_orl_T0_T1();
+        tcg_gen_not_i32(t1, t1);
+        tcg_gen_or_i32(t0, t0, t1);
         logic_cc = conds;
         break;
     case 4: /* eor */
-        gen_op_xorl_T0_T1();
+        tcg_gen_xor_i32(t0, t0, t1);
         logic_cc = conds;
         break;
     case 8: /* add */
         if (conds)
-            gen_op_addl_T0_T1_cc();
+            gen_helper_add_cc(t0, t0, t1);
         else
-            gen_op_addl_T0_T1();
+            tcg_gen_add_i32(t0, t0, t1);
         break;
     case 10: /* adc */
         if (conds)
-            gen_op_adcl_T0_T1_cc();
+            gen_helper_adc_cc(t0, t0, t1);
         else
-            gen_adc_T0_T1();
+            gen_adc(t0, t1);
         break;
     case 11: /* sbc */
         if (conds)
-            gen_op_sbcl_T0_T1_cc();
+            gen_helper_sbc_cc(t0, t0, t1);
         else
-            gen_sbc_T0_T1();
+            gen_sub_carry(t0, t0, t1);
         break;
     case 13: /* sub */
         if (conds)
-            gen_op_subl_T0_T1_cc();
+            gen_helper_sub_cc(t0, t0, t1);
         else
-            gen_op_subl_T0_T1();
+            tcg_gen_sub_i32(t0, t0, t1);
         break;
     case 14: /* rsb */
         if (conds)
-            gen_op_rsbl_T0_T1_cc();
+            gen_helper_sub_cc(t0, t1, t0);
         else
-            gen_op_rsbl_T0_T1();
+            tcg_gen_sub_i32(t0, t1, t0);
         break;
     default: /* 5, 6, 7, 9, 12, 15. */
         return 1;
     }
     if (logic_cc) {
-        gen_op_logic_T0_cc();
+        gen_logic_CC(t0);
         if (shifter_out)
-            gen_set_CF_bit31(cpu_T[1]);
+            gen_set_CF_bit31(t1);
     }
     return 0;
 }
@@ -7069,8 +7149,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                16-bit instructions in case the second half causes an
                prefetch abort.  */
             offset = ((int32_t)insn << 21) >> 9;
-            gen_op_movl_T0_im(s->pc + 2 + offset);
-            gen_movl_reg_T0(s, 14);
+            tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
             return 0;
         }
         /* Fall through to 32-bit decode.  */
@@ -7136,22 +7215,24 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                 }
             } else if ((insn & (1 << 23)) == 0) {
                 /* Load/store exclusive word.  */
-                gen_movl_T1_reg(s, rn);
-                addr = cpu_T[1];
+                addr = tcg_temp_local_new();
+                tcg_gen_mov_i32(addr, cpu_R[rn]);
                 if (insn & (1 << 20)) {
-                    gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
+                    gen_helper_mark_exclusive(cpu_env, addr);
                     tmp = gen_ld32(addr, IS_USER(s));
                     store_reg(s, rd, tmp);
                 } else {
                     int label = gen_new_label();
-                    gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
-                    tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
-                                        0, label);
+                    tmp2 = tcg_temp_local_new();
+                    gen_helper_test_exclusive(tmp2, cpu_env, addr);
+                    tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
                     tmp = load_reg(s, rs);
-                    gen_st32(tmp, cpu_T[1], IS_USER(s));
+                    gen_st32(tmp, addr, IS_USER(s));
                     gen_set_label(label);
-                    gen_movl_reg_T0(s, rd);
+                    tcg_gen_mov_i32(cpu_R[rd], tmp2);
+                    tcg_temp_free(tmp2);
                 }
+                tcg_temp_free(addr);
             } else if ((insn & (1 << 6)) == 0) {
                 /* Table Branch.  */
                 if (rn == 15) {
@@ -7181,10 +7262,8 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                    we never have multiple CPUs running in parallel,
                    so it is good enough.  */
                 op = (insn >> 4) & 0x3;
-                /* Must use a global reg for the address because we have
-                   a conditional branch in the store instruction.  */
-                gen_movl_T1_reg(s, rn);
-                addr = cpu_T[1];
+                addr = tcg_temp_local_new();
+                tcg_gen_mov_i32(addr, cpu_R[rn]);
                 if (insn & (1 << 20)) {
                     gen_helper_mark_exclusive(cpu_env, addr);
                     switch (op) {
@@ -7206,9 +7285,9 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                     store_reg(s, rs, tmp);
                 } else {
                     int label = gen_new_label();
-                    /* Must use a global that is not killed by the branch.  */
-                    gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
-                    tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
+                    tmp2 = tcg_temp_local_new();
+                    gen_helper_test_exclusive(tmp2, cpu_env, addr);
+                    tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
                     tmp = load_reg(s, rs);
                     switch (op) {
                     case 0:
@@ -7227,8 +7306,10 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                         goto illegal_op;
                     }
                     gen_set_label(label);
-                    gen_movl_reg_T0(s, rm);
+                    tcg_gen_mov_i32(cpu_R[rm], tmp2);
+                    tcg_temp_free(tmp2);
                 }
+                tcg_temp_free(addr);
             }
         } else {
             /* Load/store multiple, RFE, SRS.  */
@@ -7338,21 +7419,27 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
         }
         break;
     case 5: /* Data processing register constant shift.  */
-        if (rn == 15)
-            gen_op_movl_T0_im(0);
-        else
-            gen_movl_T0_reg(s, rn);
-        gen_movl_T1_reg(s, rm);
+        if (rn == 15) {
+            tmp = new_tmp();
+            tcg_gen_movi_i32(tmp, 0);
+        } else {
+            tmp = load_reg(s, rn);
+        }
+        tmp2 = load_reg(s, rm);
         op = (insn >> 21) & 0xf;
         shiftop = (insn >> 4) & 3;
         shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
         conds = (insn & (1 << 20)) != 0;
         logic_cc = (conds && thumb2_logic_op(op));
-        gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
-        if (gen_thumb2_data_op(s, op, conds, 0))
+        gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
+        if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
             goto illegal_op;
-        if (rd != 15)
-            gen_movl_reg_T0(s, rd);
+        dead_tmp(tmp2);
+        if (rd != 15) {
+            store_reg(s, rd, tmp);
+        } else {
+            dead_tmp(tmp);
+        }
         break;
     case 13: /* Misc data processing.  */
         op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
@@ -7639,8 +7726,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
 
                 if (insn & (1 << 14)) {
                     /* Branch and link.  */
-                    gen_op_movl_T1_im(s->pc | 1);
-                    gen_movl_reg_T1(s, 14);
+                    tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
                 }
 
                 offset += s->pc;
@@ -7675,10 +7761,10 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                     case 1: /* msr spsr.  */
                         if (IS_M(env))
                             goto illegal_op;
-                        gen_movl_T0_reg(s, rn);
-                        if (gen_set_psr_T0(s,
+                        tmp = load_reg(s, rn);
+                        if (gen_set_psr(s,
                               msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
-                              op == 1))
+                              op == 1, tmp))
                             goto illegal_op;
                         break;
                     case 2: /* cps, nop-hint.  */
@@ -7705,8 +7791,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                             imm |= (insn & 0x1f);
                         }
                         if (offset) {
-                            gen_op_movl_T0_im(imm);
-                            gen_set_psr_T0(s, offset, 0);
+                            gen_set_psr_im(s, offset, 0, imm);
                         }
                         break;
                     case 3: /* Special control operations.  */
@@ -7904,19 +7989,25 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
                     shifter_out = 1;
                     break;
                 }
-                gen_op_movl_T1_im(imm);
+                tmp2 = new_tmp();
+                tcg_gen_movi_i32(tmp2, imm);
                 rn = (insn >> 16) & 0xf;
-                if (rn == 15)
-                    gen_op_movl_T0_im(0);
-                else
-                    gen_movl_T0_reg(s, rn);
+                if (rn == 15) {
+                    tmp = new_tmp();
+                    tcg_gen_movi_i32(tmp, 0);
+                } else {
+                    tmp = load_reg(s, rn);
+                }
                 op = (insn >> 21) & 0xf;
                 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
-                                       shifter_out))
+                                       shifter_out, tmp, tmp2))
                     goto illegal_op;
+                dead_tmp(tmp2);
                 rd = (insn >> 8) & 0xf;
                 if (rd != 15) {
-                    gen_movl_reg_T0(s, rd);
+                    store_reg(s, rd, tmp);
+                } else {
+                    dead_tmp(tmp);
                 }
             }
         }
@@ -8060,32 +8151,35 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
 
     switch (insn >> 12) {
     case 0: case 1:
+
         rd = insn & 7;
         op = (insn >> 11) & 3;
         if (op == 3) {
             /* add/subtract */
             rn = (insn >> 3) & 7;
-            gen_movl_T0_reg(s, rn);
+            tmp = load_reg(s, rn);
             if (insn & (1 << 10)) {
                 /* immediate */
-                gen_op_movl_T1_im((insn >> 6) & 7);
+                tmp2 = new_tmp();
+                tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
             } else {
                 /* reg */
                 rm = (insn >> 6) & 7;
-                gen_movl_T1_reg(s, rm);
+                tmp2 = load_reg(s, rm);
             }
             if (insn & (1 << 9)) {
                 if (s->condexec_mask)
-                    gen_op_subl_T0_T1();
+                    tcg_gen_sub_i32(tmp, tmp, tmp2);
                 else
-                    gen_op_subl_T0_T1_cc();
+                    gen_helper_sub_cc(tmp, tmp, tmp2);
             } else {
                 if (s->condexec_mask)
-                    gen_op_addl_T0_T1();
+                    tcg_gen_add_i32(tmp, tmp, tmp2);
                 else
-                    gen_op_addl_T0_T1_cc();
+                    gen_helper_add_cc(tmp, tmp, tmp2);
             }
-            gen_movl_reg_T0(s, rd);
+            dead_tmp(tmp2);
+            store_reg(s, rd, tmp);
         } else {
             /* shift immediate */
             rm = (insn >> 3) & 7;
@@ -8101,35 +8195,40 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
         /* arithmetic large immediate */
         op = (insn >> 11) & 3;
         rd = (insn >> 8) & 0x7;
-        if (op == 0) {
-            gen_op_movl_T0_im(insn & 0xff);
-        } else {
-            gen_movl_T0_reg(s, rd);
-            gen_op_movl_T1_im(insn & 0xff);
-        }
-        switch (op) {
-        case 0: /* mov */
+        if (op == 0) { /* mov */
+            tmp = new_tmp();
+            tcg_gen_movi_i32(tmp, insn & 0xff);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
-            break;
-        case 1: /* cmp */
-            gen_op_subl_T0_T1_cc();
-            break;
-        case 2: /* add */
-            if (s->condexec_mask)
-                gen_op_addl_T0_T1();
-            else
-                gen_op_addl_T0_T1_cc();
-            break;
-        case 3: /* sub */
-            if (s->condexec_mask)
-                gen_op_subl_T0_T1();
-            else
-                gen_op_subl_T0_T1_cc();
-            break;
+                gen_logic_CC(tmp);
+            store_reg(s, rd, tmp);
+        } else {
+            tmp = load_reg(s, rd);
+            tmp2 = new_tmp();
+            tcg_gen_movi_i32(tmp2, insn & 0xff);
+            switch (op) {
+            case 1: /* cmp */
+                gen_helper_sub_cc(tmp, tmp, tmp2);
+                dead_tmp(tmp);
+                dead_tmp(tmp2);
+                break;
+            case 2: /* add */
+                if (s->condexec_mask)
+                    tcg_gen_add_i32(tmp, tmp, tmp2);
+                else
+                    gen_helper_add_cc(tmp, tmp, tmp2);
+                dead_tmp(tmp2);
+                store_reg(s, rd, tmp);
+                break;
+            case 3: /* sub */
+                if (s->condexec_mask)
+                    tcg_gen_sub_i32(tmp, tmp, tmp2);
+                else
+                    gen_helper_sub_cc(tmp, tmp, tmp2);
+                dead_tmp(tmp2);
+                store_reg(s, rd, tmp);
+                break;
+            }
         }
-        if (op != 1)
-            gen_movl_reg_T0(s, rd);
         break;
     case 4:
         if (insn & (1 << 11)) {
@@ -8151,19 +8250,22 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
             op = (insn >> 8) & 3;
             switch (op) {
             case 0: /* add */
-                gen_movl_T0_reg(s, rd);
-                gen_movl_T1_reg(s, rm);
-                gen_op_addl_T0_T1();
-                gen_movl_reg_T0(s, rd);
+                tmp = load_reg(s, rd);
+                tmp2 = load_reg(s, rm);
+                tcg_gen_add_i32(tmp, tmp, tmp2);
+                dead_tmp(tmp2);
+                store_reg(s, rd, tmp);
                 break;
             case 1: /* cmp */
-                gen_movl_T0_reg(s, rd);
-                gen_movl_T1_reg(s, rm);
-                gen_op_subl_T0_T1_cc();
+                tmp = load_reg(s, rd);
+                tmp2 = load_reg(s, rm);
+                gen_helper_sub_cc(tmp, tmp, tmp2);
+                dead_tmp(tmp2);
+                dead_tmp(tmp);
                 break;
             case 2: /* mov/cpy */
-                gen_movl_T0_reg(s, rm);
-                gen_movl_reg_T0(s, rd);
+                tmp = load_reg(s, rm);
+                store_reg(s, rd, tmp);
                 break;
             case 3:/* branch [and link] exchange thumb register */
                 tmp = load_reg(s, rm);
@@ -8193,114 +8295,125 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
             val = 0;
         }
 
-        if (op == 9) /* neg */
-            gen_op_movl_T0_im(0);
-        else if (op != 0xf) /* mvn doesn't read its first operand */
-            gen_movl_T0_reg(s, rd);
+        if (op == 9) { /* neg */
+            tmp = new_tmp();
+            tcg_gen_movi_i32(tmp, 0);
+        } else if (op != 0xf) { /* mvn doesn't read its first operand */
+            tmp = load_reg(s, rd);
+        } else {
+            TCGV_UNUSED(tmp);
+        }
 
-        gen_movl_T1_reg(s, rm);
+        tmp2 = load_reg(s, rm);
         switch (op) {
         case 0x0: /* and */
-            gen_op_andl_T0_T1();
+            tcg_gen_and_i32(tmp, tmp, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
+                gen_logic_CC(tmp);
             break;
         case 0x1: /* eor */
-            gen_op_xorl_T0_T1();
+            tcg_gen_xor_i32(tmp, tmp, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
+                gen_logic_CC(tmp);
             break;
         case 0x2: /* lsl */
             if (s->condexec_mask) {
-                gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
+                gen_helper_shl(tmp2, tmp2, tmp);
             } else {
-                gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
-                gen_op_logic_T1_cc();
+                gen_helper_shl_cc(tmp2, tmp2, tmp);
+                gen_logic_CC(tmp2);
             }
             break;
         case 0x3: /* lsr */
             if (s->condexec_mask) {
-                gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
+                gen_helper_shr(tmp2, tmp2, tmp);
             } else {
-                gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
-                gen_op_logic_T1_cc();
+                gen_helper_shr_cc(tmp2, tmp2, tmp);
+                gen_logic_CC(tmp2);
             }
             break;
         case 0x4: /* asr */
             if (s->condexec_mask) {
-                gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
+                gen_helper_sar(tmp2, tmp2, tmp);
             } else {
-                gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
-                gen_op_logic_T1_cc();
+                gen_helper_sar_cc(tmp2, tmp2, tmp);
+                gen_logic_CC(tmp2);
             }
             break;
         case 0x5: /* adc */
             if (s->condexec_mask)
-                gen_adc_T0_T1();
+                gen_adc(tmp, tmp2);
             else
-                gen_op_adcl_T0_T1_cc();
+                gen_helper_adc_cc(tmp, tmp, tmp2);
             break;
         case 0x6: /* sbc */
             if (s->condexec_mask)
-                gen_sbc_T0_T1();
+                gen_sub_carry(tmp, tmp, tmp2);
             else
-                gen_op_sbcl_T0_T1_cc();
+                gen_helper_sbc_cc(tmp, tmp, tmp2);
             break;
         case 0x7: /* ror */
             if (s->condexec_mask) {
-                gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
+                gen_helper_ror(tmp2, tmp2, tmp);
             } else {
-                gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
-                gen_op_logic_T1_cc();
+                gen_helper_ror_cc(tmp2, tmp2, tmp);
+                gen_logic_CC(tmp2);
             }
             break;
         case 0x8: /* tst */
-            gen_op_andl_T0_T1();
-            gen_op_logic_T0_cc();
+            tcg_gen_and_i32(tmp, tmp, tmp2);
+            gen_logic_CC(tmp);
             rd = 16;
             break;
         case 0x9: /* neg */
             if (s->condexec_mask)
-                tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
+                tcg_gen_neg_i32(tmp, tmp2);
             else
-                gen_op_subl_T0_T1_cc();
+                gen_helper_sub_cc(tmp, tmp, tmp2);
             break;
         case 0xa: /* cmp */
-            gen_op_subl_T0_T1_cc();
+            gen_helper_sub_cc(tmp, tmp, tmp2);
             rd = 16;
             break;
         case 0xb: /* cmn */
-            gen_op_addl_T0_T1_cc();
+            gen_helper_add_cc(tmp, tmp, tmp2);
             rd = 16;
             break;
         case 0xc: /* orr */
-            gen_op_orl_T0_T1();
+            tcg_gen_or_i32(tmp, tmp, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
+                gen_logic_CC(tmp);
             break;
         case 0xd: /* mul */
-            gen_op_mull_T0_T1();
+            gen_mull(tmp, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
+                gen_logic_CC(tmp);
             break;
         case 0xe: /* bic */
-            gen_op_bicl_T0_T1();
+            tcg_gen_bic_i32(tmp, tmp, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T0_cc();
+                gen_logic_CC(tmp);
             break;
         case 0xf: /* mvn */
-            gen_op_notl_T1();
+            tcg_gen_not_i32(tmp2, tmp2);
             if (!s->condexec_mask)
-                gen_op_logic_T1_cc();
+                gen_logic_CC(tmp2);
             val = 1;
             rm = rd;
             break;
         }
         if (rd != 16) {
-            if (val)
-                gen_movl_reg_T1(s, rm);
-            else
-                gen_movl_reg_T0(s, rd);
+            if (val) {
+                store_reg(s, rm, tmp2);
+                if (op != 0xf)
+                    dead_tmp(tmp);
+            } else {
+                store_reg(s, rd, tmp);
+                dead_tmp(tmp2);
+            }
+        } else {
+            dead_tmp(tmp);
+            dead_tmp(tmp2);
         }
         break;
 
@@ -8594,10 +8707,7 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
                     shift = CPSR_A | CPSR_I | CPSR_F;
                 else
                     shift = 0;
-
-                val = ((insn & 7) << 6) & shift;
-                gen_op_movl_T0_im(val);
-                gen_set_psr_T0(s, shift, 0);
+                gen_set_psr_im(s, shift, 0, ((insn & 7) << 6) & shift);
             }
             break;
 
@@ -8650,7 +8760,6 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
         s->condlabel = gen_new_label();
         gen_test_cc(cond ^ 1, s->condlabel);
         s->condjmp = 1;
-        gen_movl_T1_reg(s, 15);
 
         /* jump to the offset */
         val = (uint32_t)s->pc + 2;
@@ -8710,7 +8819,6 @@ static inline void gen_intermediate_code_internal(CPUState *env,
 
     /* generate intermediate code */
     num_temps = 0;
-    memset(temps, 0, sizeof(temps));
 
     pc_start = tb->pc;