]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/arm/tcg-target.c
tcg-arm: Cleanup most primitive load store subroutines
[mirror_qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f 24
ac34fb5c
AJ
25#if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30#define USE_ARMV7_INSTRUCTIONS
31#endif
32
33#if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39#define USE_ARMV6_INSTRUCTIONS
40#endif
41
42#if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46#define USE_ARMV5_INSTRUCTIONS
47#endif
48
49#ifdef USE_ARMV5_INSTRUCTIONS
50static const int use_armv5_instructions = 1;
51#else
52static const int use_armv5_instructions = 0;
53#endif
54#undef USE_ARMV5_INSTRUCTIONS
55
56#ifdef USE_ARMV6_INSTRUCTIONS
57static const int use_armv6_instructions = 1;
58#else
59static const int use_armv6_instructions = 0;
60#endif
61#undef USE_ARMV6_INSTRUCTIONS
62
63#ifdef USE_ARMV7_INSTRUCTIONS
64static const int use_armv7_instructions = 1;
65#else
66static const int use_armv7_instructions = 0;
67#endif
68#undef USE_ARMV7_INSTRUCTIONS
69
d4a9eb1f
BS
70#ifndef NDEBUG
71static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
e4a7d5e8 87 "%pc",
811d4cf4 88};
d4a9eb1f 89#endif
811d4cf4 90
d4a9eb1f 91static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
92 TCG_REG_R4,
93 TCG_REG_R5,
94 TCG_REG_R6,
95 TCG_REG_R7,
96 TCG_REG_R8,
97 TCG_REG_R9,
98 TCG_REG_R10,
99 TCG_REG_R11,
811d4cf4 100 TCG_REG_R13,
914ccf51
AJ
101 TCG_REG_R0,
102 TCG_REG_R1,
103 TCG_REG_R2,
104 TCG_REG_R3,
105 TCG_REG_R12,
811d4cf4
AZ
106 TCG_REG_R14,
107};
108
d4a9eb1f 109static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111};
d4a9eb1f 112static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
113 TCG_REG_R0, TCG_REG_R1
114};
115
13dd6fb9 116#define TCG_REG_TMP TCG_REG_R12
4346457a 117
c69806ab
AJ
118static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
119{
120 *(uint32_t *) code_ptr = target;
121}
122
123static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
124{
125 uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
126
127 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
128 | (offset & 0xffffff);
129}
130
650bbb36 131static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
132 tcg_target_long value, tcg_target_long addend)
133{
134 switch (type) {
135 case R_ARM_ABS32:
c69806ab 136 reloc_abs32(code_ptr, value);
811d4cf4
AZ
137 break;
138
139 case R_ARM_CALL:
140 case R_ARM_JUMP24:
141 default:
142 tcg_abort();
143
144 case R_ARM_PC24:
c69806ab 145 reloc_pc24(code_ptr, value);
811d4cf4
AZ
146 break;
147 }
148}
149
b6b24cb0
RH
150#define TCG_CT_CONST_ARM 0x100
151#define TCG_CT_CONST_INV 0x200
152#define TCG_CT_CONST_NEG 0x400
153#define TCG_CT_CONST_ZERO 0x800
19b62bf4 154
811d4cf4 155/* parse target specific constraints */
d4a9eb1f 156static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
157{
158 const char *ct_str;
159
160 ct_str = *pct_str;
161 switch (ct_str[0]) {
cb4e581f 162 case 'I':
19b62bf4
RH
163 ct->ct |= TCG_CT_CONST_ARM;
164 break;
165 case 'K':
166 ct->ct |= TCG_CT_CONST_INV;
167 break;
a9a86ae9
RH
168 case 'N': /* The gcc constraint letter is L, already used here. */
169 ct->ct |= TCG_CT_CONST_NEG;
170 break;
b6b24cb0
RH
171 case 'Z':
172 ct->ct |= TCG_CT_CONST_ZERO;
173 break;
cb4e581f 174
811d4cf4 175 case 'r':
811d4cf4
AZ
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
178 break;
179
67dcab73
AJ
180 /* qemu_ld address */
181 case 'l':
811d4cf4
AZ
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
184#ifdef CONFIG_SOFTMMU
185 /* r0 and r1 will be overwritten when reading the tlb entry,
186 so don't use these. */
811d4cf4
AZ
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
89c33337 189#if TARGET_LONG_BITS == 64
9716ef3b
PM
190 /* If we're passing env to the helper as r0 and need a regpair
191 * for the address then r2 will be overwritten as we're setting
192 * up the args to the helper.
193 */
194 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
195#endif
67dcab73 196#endif
811d4cf4 197 break;
67dcab73 198 case 'L':
d0660ed4
AZ
199 ct->ct |= TCG_CT_REG;
200 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
201#ifdef CONFIG_SOFTMMU
202 /* r1 is still needed to load data_reg or data_reg2,
203 so don't use it. */
d0660ed4 204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73 205#endif
d0660ed4
AZ
206 break;
207
67dcab73
AJ
208 /* qemu_st address & data_reg */
209 case 's':
811d4cf4
AZ
210 ct->ct |= TCG_CT_REG;
211 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
212 /* r0 and r1 will be overwritten when reading the tlb entry
213 (softmmu only) and doing the byte swapping, so don't
214 use these. */
811d4cf4
AZ
215 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
216 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
89c33337 217#if defined(CONFIG_SOFTMMU) && (TARGET_LONG_BITS == 64)
9716ef3b
PM
218 /* Avoid clashes with registers being used for helper args */
219 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
220 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
221#endif
811d4cf4 222 break;
67dcab73
AJ
223 /* qemu_st64 data_reg2 */
224 case 'S':
811d4cf4
AZ
225 ct->ct |= TCG_CT_REG;
226 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
227 /* r0 and r1 will be overwritten when reading the tlb entry
228 (softmmu only) and doing the byte swapping, so don't
229 use these. */
811d4cf4 230 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 231 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73
AJ
232#ifdef CONFIG_SOFTMMU
233 /* r2 is still needed to load data_reg, so don't use it. */
234 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
89c33337 235#if TARGET_LONG_BITS == 64
9716ef3b
PM
236 /* Avoid clashes with registers being used for helper args */
237 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
238#endif
811d4cf4 239#endif
67dcab73 240 break;
811d4cf4 241
811d4cf4
AZ
242 default:
243 return -1;
244 }
245 ct_str++;
246 *pct_str = ct_str;
247
248 return 0;
249}
250
94953e6d
LD
251static inline uint32_t rotl(uint32_t val, int n)
252{
253 return (val << n) | (val >> (32 - n));
254}
255
256/* ARM immediates for ALU instructions are made of an unsigned 8-bit
257 right-rotated by an even amount between 0 and 30. */
258static inline int encode_imm(uint32_t imm)
259{
4e6f6d4c
LD
260 int shift;
261
94953e6d
LD
262 /* simple case, only lower bits */
263 if ((imm & ~0xff) == 0)
264 return 0;
265 /* then try a simple even shift */
266 shift = ctz32(imm) & ~1;
267 if (((imm >> shift) & ~0xff) == 0)
268 return 32 - shift;
269 /* now try harder with rotations */
270 if ((rotl(imm, 2) & ~0xff) == 0)
271 return 2;
272 if ((rotl(imm, 4) & ~0xff) == 0)
273 return 4;
274 if ((rotl(imm, 6) & ~0xff) == 0)
275 return 6;
276 /* imm can't be encoded */
277 return -1;
278}
cb4e581f
LD
279
280static inline int check_fit_imm(uint32_t imm)
281{
94953e6d 282 return encode_imm(imm) >= 0;
cb4e581f
LD
283}
284
811d4cf4
AZ
285/* Test if a constant matches the constraint.
286 * TODO: define constraints for:
287 *
288 * ldr/str offset: between -0xfff and 0xfff
289 * ldrh/strh offset: between -0xff and 0xff
290 * mov operand2: values represented with x << (2 * y), x < 0x100
291 * add, sub, eor...: ditto
292 */
293static inline int tcg_target_const_match(tcg_target_long val,
19b62bf4 294 const TCGArgConstraint *arg_ct)
811d4cf4
AZ
295{
296 int ct;
297 ct = arg_ct->ct;
19b62bf4 298 if (ct & TCG_CT_CONST) {
811d4cf4 299 return 1;
19b62bf4 300 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
cb4e581f 301 return 1;
19b62bf4
RH
302 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
303 return 1;
a9a86ae9
RH
304 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
305 return 1;
b6b24cb0
RH
306 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
307 return 1;
19b62bf4 308 } else {
811d4cf4 309 return 0;
19b62bf4 310 }
811d4cf4
AZ
311}
312
2df3f1ee
RH
313#define TO_CPSR (1 << 20)
314
9feac1d7 315typedef enum {
2df3f1ee
RH
316 ARITH_AND = 0x0 << 21,
317 ARITH_EOR = 0x1 << 21,
318 ARITH_SUB = 0x2 << 21,
319 ARITH_RSB = 0x3 << 21,
320 ARITH_ADD = 0x4 << 21,
321 ARITH_ADC = 0x5 << 21,
322 ARITH_SBC = 0x6 << 21,
323 ARITH_RSC = 0x7 << 21,
324 ARITH_TST = 0x8 << 21 | TO_CPSR,
325 ARITH_CMP = 0xa << 21 | TO_CPSR,
326 ARITH_CMN = 0xb << 21 | TO_CPSR,
327 ARITH_ORR = 0xc << 21,
328 ARITH_MOV = 0xd << 21,
329 ARITH_BIC = 0xe << 21,
330 ARITH_MVN = 0xf << 21,
9feac1d7
RH
331
332 INSN_LDR_IMM = 0x04100000,
333 INSN_LDR_REG = 0x06100000,
334 INSN_STR_IMM = 0x04000000,
335 INSN_STR_REG = 0x06000000,
336
337 INSN_LDRH_IMM = 0x005000b0,
338 INSN_LDRH_REG = 0x001000b0,
339 INSN_LDRSH_IMM = 0x005000f0,
340 INSN_LDRSH_REG = 0x001000f0,
341 INSN_STRH_IMM = 0x004000b0,
342 INSN_STRH_REG = 0x000000b0,
343
344 INSN_LDRB_IMM = 0x04500000,
345 INSN_LDRB_REG = 0x06500000,
346 INSN_LDRSB_IMM = 0x005000d0,
347 INSN_LDRSB_REG = 0x001000d0,
348 INSN_STRB_IMM = 0x04400000,
349 INSN_STRB_REG = 0x06400000,
350} ARMInsn;
811d4cf4 351
811d4cf4
AZ
352#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
353#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
354#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
355#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
356#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
357#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
358#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
359#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
360
361enum arm_cond_code_e {
362 COND_EQ = 0x0,
363 COND_NE = 0x1,
364 COND_CS = 0x2, /* Unsigned greater or equal */
365 COND_CC = 0x3, /* Unsigned less than */
366 COND_MI = 0x4, /* Negative */
367 COND_PL = 0x5, /* Zero or greater */
368 COND_VS = 0x6, /* Overflow */
369 COND_VC = 0x7, /* No overflow */
370 COND_HI = 0x8, /* Unsigned greater than */
371 COND_LS = 0x9, /* Unsigned less or equal */
372 COND_GE = 0xa,
373 COND_LT = 0xb,
374 COND_GT = 0xc,
375 COND_LE = 0xd,
376 COND_AL = 0xe,
377};
378
0aed257f 379static const uint8_t tcg_cond_to_arm_cond[] = {
811d4cf4
AZ
380 [TCG_COND_EQ] = COND_EQ,
381 [TCG_COND_NE] = COND_NE,
382 [TCG_COND_LT] = COND_LT,
383 [TCG_COND_GE] = COND_GE,
384 [TCG_COND_LE] = COND_LE,
385 [TCG_COND_GT] = COND_GT,
386 /* unsigned */
387 [TCG_COND_LTU] = COND_CC,
388 [TCG_COND_GEU] = COND_CS,
389 [TCG_COND_LEU] = COND_LS,
390 [TCG_COND_GTU] = COND_HI,
391};
392
393static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
394{
395 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
396}
397
398static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
399{
400 tcg_out32(s, (cond << 28) | 0x0a000000 |
401 (((offset - 8) >> 2) & 0x00ffffff));
402}
403
e936243a
AZ
404static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
405{
56779034
AJ
406 /* We pay attention here to not modify the branch target by skipping
407 the corresponding bytes. This ensure that caches and memory are
408 kept coherent during retranslation. */
e2542fe2 409#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
410 tcg_out8(s, (cond << 4) | 0x0a);
411 s->code_ptr += 3;
412#else
413 s->code_ptr += 3;
414 tcg_out8(s, (cond << 4) | 0x0a);
415#endif
416}
417
811d4cf4
AZ
418static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
419{
420 tcg_out32(s, (cond << 28) | 0x0b000000 |
421 (((offset - 8) >> 2) & 0x00ffffff));
422}
423
23401b58
AJ
424static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
425{
426 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
427}
428
24e838b7
PM
429static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
430{
431 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
432 (((offset - 8) >> 2) & 0x00ffffff));
433}
434
811d4cf4
AZ
435static inline void tcg_out_dat_reg(TCGContext *s,
436 int cond, int opc, int rd, int rn, int rm, int shift)
437{
2df3f1ee 438 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
811d4cf4
AZ
439 (rn << 16) | (rd << 12) | shift | rm);
440}
441
9716ef3b
PM
442static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
443{
444 /* Simple reg-reg move, optimising out the 'do nothing' case */
445 if (rd != rm) {
446 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
447 }
448}
449
811d4cf4
AZ
450static inline void tcg_out_dat_imm(TCGContext *s,
451 int cond, int opc, int rd, int rn, int im)
452{
2df3f1ee 453 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
811d4cf4
AZ
454 (rn << 16) | (rd << 12) | im);
455}
456
e86e0f28 457static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
811d4cf4 458{
e86e0f28
RH
459 int rot, opc, rn;
460
461 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
462 Speed things up by only checking when movt would be required.
463 Prior to armv7, have one go at fully rotated immediates before
464 doing the decomposition thing below. */
465 if (!use_armv7_instructions || (arg & 0xffff0000)) {
466 rot = encode_imm(arg);
467 if (rot >= 0) {
468 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
469 rotl(arg, rot) | (rot << 7));
470 return;
471 }
472 rot = encode_imm(~arg);
473 if (rot >= 0) {
474 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
475 rotl(~arg, rot) | (rot << 7));
476 return;
477 }
478 }
479
480 /* Use movw + movt. */
481 if (use_armv7_instructions) {
ac34fb5c
AJ
482 /* movw */
483 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
484 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
0f11f25a 485 if (arg & 0xffff0000) {
ac34fb5c
AJ
486 /* movt */
487 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
488 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
ac34fb5c 489 }
e86e0f28
RH
490 return;
491 }
0f11f25a 492
e86e0f28
RH
493 /* TODO: This is very suboptimal, we can easily have a constant
494 pool somewhere after all the instructions. */
495 opc = ARITH_MOV;
496 rn = 0;
497 /* If we have lots of leading 1's, we can shorten the sequence by
498 beginning with mvn and then clearing higher bits with eor. */
499 if (clz32(~arg) > clz32(arg)) {
500 opc = ARITH_MVN, arg = ~arg;
0f11f25a 501 }
e86e0f28
RH
502 do {
503 int i = ctz32(arg) & ~1;
504 rot = ((32 - i) << 7) & 0xf00;
505 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
506 arg &= ~(0xff << i);
507
508 opc = ARITH_EOR;
509 rn = rd;
510 } while (arg);
811d4cf4
AZ
511}
512
7fc645bf
PM
513static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
514 TCGArg lhs, TCGArg rhs, int rhs_is_const)
515{
516 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
517 * rhs must satisfy the "rI" constraint.
518 */
519 if (rhs_is_const) {
520 int rot = encode_imm(rhs);
521 assert(rot >= 0);
522 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
523 } else {
524 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
525 }
526}
527
19b62bf4
RH
528static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
529 TCGReg dst, TCGReg lhs, TCGArg rhs,
530 bool rhs_is_const)
531{
532 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
533 * rhs must satisfy the "rIK" constraint.
534 */
535 if (rhs_is_const) {
536 int rot = encode_imm(rhs);
537 if (rot < 0) {
538 rhs = ~rhs;
539 rot = encode_imm(rhs);
540 assert(rot >= 0);
541 opc = opinv;
542 }
543 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
544 } else {
545 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
546 }
547}
548
a9a86ae9
RH
549static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
550 TCGArg dst, TCGArg lhs, TCGArg rhs,
551 bool rhs_is_const)
552{
553 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
554 * rhs must satisfy the "rIN" constraint.
555 */
556 if (rhs_is_const) {
557 int rot = encode_imm(rhs);
558 if (rot < 0) {
559 rhs = -rhs;
560 rot = encode_imm(rhs);
561 assert(rot >= 0);
562 opc = opneg;
563 }
564 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
565 } else {
566 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
567 }
568}
569
34358a12
RH
570static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
571 TCGReg rn, TCGReg rm)
811d4cf4 572{
34358a12
RH
573 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
574 if (!use_armv6_instructions && rd == rn) {
575 if (rd == rm) {
576 /* rd == rn == rm; copy an input to tmp first. */
577 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
578 rm = rn = TCG_REG_TMP;
579 } else {
580 rn = rm;
581 rm = rd;
582 }
811d4cf4 583 }
34358a12
RH
584 /* mul */
585 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
811d4cf4
AZ
586}
587
34358a12
RH
588static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
589 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 590{
34358a12
RH
591 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
592 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
593 if (rd0 == rm || rd1 == rm) {
594 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
595 rn = TCG_REG_TMP;
596 } else {
597 TCGReg t = rn;
598 rn = rm;
599 rm = t;
600 }
811d4cf4 601 }
34358a12
RH
602 /* umull */
603 tcg_out32(s, (cond << 28) | 0x00800090 |
604 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
605}
606
34358a12
RH
607static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
608 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 609{
34358a12
RH
610 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
611 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
612 if (rd0 == rm || rd1 == rm) {
613 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
614 rn = TCG_REG_TMP;
615 } else {
616 TCGReg t = rn;
617 rn = rm;
618 rm = t;
619 }
811d4cf4 620 }
34358a12
RH
621 /* smull */
622 tcg_out32(s, (cond << 28) | 0x00c00090 |
623 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
624}
625
0637c56c
RH
626static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
627{
628 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
629}
630
631static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
632{
633 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
634}
635
9517094f
AJ
636static inline void tcg_out_ext8s(TCGContext *s, int cond,
637 int rd, int rn)
638{
639 if (use_armv6_instructions) {
640 /* sxtb */
641 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
642 } else {
e23886a9 643 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 644 rd, 0, rn, SHIFT_IMM_LSL(24));
e23886a9 645 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
646 rd, 0, rd, SHIFT_IMM_ASR(24));
647 }
648}
649
e854b6d3
AJ
650static inline void tcg_out_ext8u(TCGContext *s, int cond,
651 int rd, int rn)
652{
653 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
654}
655
9517094f
AJ
656static inline void tcg_out_ext16s(TCGContext *s, int cond,
657 int rd, int rn)
658{
659 if (use_armv6_instructions) {
660 /* sxth */
661 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
662 } else {
e23886a9 663 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 664 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 665 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
666 rd, 0, rd, SHIFT_IMM_ASR(16));
667 }
668}
669
670static inline void tcg_out_ext16u(TCGContext *s, int cond,
671 int rd, int rn)
672{
673 if (use_armv6_instructions) {
674 /* uxth */
675 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
676 } else {
e23886a9 677 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 678 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 679 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
680 rd, 0, rd, SHIFT_IMM_LSR(16));
681 }
682}
683
67dcab73
AJ
684static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
685{
686 if (use_armv6_instructions) {
687 /* revsh */
688 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
689 } else {
690 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 691 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
67dcab73 692 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 693 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
67dcab73 694 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 695 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
67dcab73
AJ
696 }
697}
698
244b1e81
AJ
699static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
700{
701 if (use_armv6_instructions) {
702 /* rev16 */
703 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
704 } else {
705 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 706 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
244b1e81 707 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 708 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
244b1e81 709 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 710 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
244b1e81
AJ
711 }
712}
713
7aab08aa
AJ
714/* swap the two low bytes assuming that the two high input bytes and the
715 two high output bit can hold any value. */
716static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
717{
718 if (use_armv6_instructions) {
719 /* rev16 */
720 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
721 } else {
722 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a
RH
723 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
724 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
7aab08aa 725 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 726 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
7aab08aa
AJ
727 }
728}
729
244b1e81
AJ
730static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
731{
732 if (use_armv6_instructions) {
733 /* rev */
734 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
735 } else {
736 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 737 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
244b1e81 738 tcg_out_dat_imm(s, cond, ARITH_BIC,
4346457a 739 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
244b1e81
AJ
740 tcg_out_dat_reg(s, cond, ARITH_MOV,
741 rd, 0, rn, SHIFT_IMM_ROR(8));
742 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 743 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
244b1e81
AJ
744 }
745}
746
b6b24cb0
RH
747bool tcg_target_deposit_valid(int ofs, int len)
748{
749 /* ??? Without bfi, we could improve over generic code by combining
750 the right-shift from a non-zero ofs with the orr. We do run into
751 problems when rd == rs, and the mask generated from ofs+len doesn't
752 fit into an immediate. We would have to be careful not to pessimize
753 wrt the optimizations performed on the expanded code. */
754 return use_armv7_instructions;
755}
756
757static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
758 TCGArg a1, int ofs, int len, bool const_a1)
759{
760 if (const_a1) {
761 /* bfi becomes bfc with rn == 15. */
762 a1 = 15;
763 }
764 /* bfi/bfc */
765 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
766 | (ofs << 7) | ((ofs + len - 1) << 16));
767}
768
9feac1d7
RH
769/* Note that this routine is used for both LDR and LDRH formats, so we do
770 not wish to include an immediate shift at this point. */
771static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
772 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
773{
774 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
775 | (w << 21) | (rn << 16) | (rt << 12) | rm);
776}
777
778static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
779 TCGReg rn, int imm8, bool p, bool w)
780{
781 bool u = 1;
782 if (imm8 < 0) {
783 imm8 = -imm8;
784 u = 0;
785 }
786 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
787 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
788}
789
790static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
791 TCGReg rn, int imm12, bool p, bool w)
811d4cf4 792{
9feac1d7
RH
793 bool u = 1;
794 if (imm12 < 0) {
795 imm12 = -imm12;
796 u = 0;
797 }
798 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
799 (rn << 16) | (rt << 12) | imm12);
800}
801
802static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
803 TCGReg rn, int imm12)
804{
805 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
806}
807
d17bd1d8 808/* Offset pre-increment with base writeback. */
9feac1d7
RH
809static inline void tcg_out_ld32_12wb(TCGContext *s, int cond, TCGReg rt,
810 TCGReg rn, int imm12)
d17bd1d8
AJ
811{
812 /* ldr with writeback and both register equals is UNPREDICTABLE */
813 assert(rd != rn);
9feac1d7 814 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 1);
d17bd1d8
AJ
815}
816
9feac1d7
RH
817static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
818 TCGReg rn, int imm12)
811d4cf4 819{
9feac1d7 820 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
821}
822
9feac1d7
RH
823static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
824 TCGReg rn, TCGReg rm)
811d4cf4 825{
9feac1d7 826 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
827}
828
9feac1d7
RH
829static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
830 TCGReg rn, TCGReg rm)
811d4cf4 831{
9feac1d7 832 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
833}
834
3979144c 835/* Register pre-increment with base writeback. */
9feac1d7
RH
836static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
837 TCGReg rn, TCGReg rm)
3979144c 838{
9feac1d7 839 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
840}
841
9feac1d7
RH
842static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
843 TCGReg rn, TCGReg rm)
3979144c 844{
9feac1d7 845 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
846}
847
9feac1d7
RH
848static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
849 TCGReg rn, int imm8)
811d4cf4 850{
9feac1d7 851 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
852}
853
9feac1d7
RH
854static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
855 TCGReg rn, int imm8)
811d4cf4 856{
9feac1d7 857 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
858}
859
9feac1d7
RH
860static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
861 TCGReg rn, TCGReg rm)
811d4cf4 862{
9feac1d7 863 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
864}
865
9feac1d7
RH
866static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
867 TCGReg rn, TCGReg rm)
811d4cf4 868{
9feac1d7 869 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
870}
871
9feac1d7
RH
872static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
873 TCGReg rn, int imm8)
811d4cf4 874{
9feac1d7 875 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
876}
877
9feac1d7
RH
878static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
879 TCGReg rn, TCGReg rm)
811d4cf4 880{
9feac1d7 881 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
882}
883
9feac1d7
RH
884static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
885 TCGReg rn, int imm12)
811d4cf4 886{
9feac1d7 887 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
888}
889
9feac1d7
RH
890static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
891 TCGReg rn, int imm12)
811d4cf4 892{
9feac1d7 893 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
894}
895
9feac1d7
RH
896static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
897 TCGReg rn, TCGReg rm)
811d4cf4 898{
9feac1d7 899 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
900}
901
9feac1d7
RH
902static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
903 TCGReg rn, TCGReg rm)
811d4cf4 904{
9feac1d7 905 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
906}
907
9feac1d7
RH
908static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
909 TCGReg rn, int imm8)
811d4cf4 910{
9feac1d7 911 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
912}
913
9feac1d7
RH
914static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
915 TCGReg rn, TCGReg rm)
811d4cf4 916{
9feac1d7 917 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
918}
919
811d4cf4
AZ
920static inline void tcg_out_ld32u(TCGContext *s, int cond,
921 int rd, int rn, int32_t offset)
922{
923 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
924 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
925 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
926 } else
927 tcg_out_ld32_12(s, cond, rd, rn, offset);
928}
929
930static inline void tcg_out_st32(TCGContext *s, int cond,
931 int rd, int rn, int32_t offset)
932{
933 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
934 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
935 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
936 } else
937 tcg_out_st32_12(s, cond, rd, rn, offset);
938}
939
940static inline void tcg_out_ld16u(TCGContext *s, int cond,
941 int rd, int rn, int32_t offset)
942{
943 if (offset > 0xff || offset < -0xff) {
4346457a
RH
944 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
945 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
946 } else
947 tcg_out_ld16u_8(s, cond, rd, rn, offset);
948}
949
950static inline void tcg_out_ld16s(TCGContext *s, int cond,
951 int rd, int rn, int32_t offset)
952{
953 if (offset > 0xff || offset < -0xff) {
4346457a
RH
954 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
955 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
956 } else
957 tcg_out_ld16s_8(s, cond, rd, rn, offset);
958}
959
f694a27e 960static inline void tcg_out_st16(TCGContext *s, int cond,
811d4cf4
AZ
961 int rd, int rn, int32_t offset)
962{
963 if (offset > 0xff || offset < -0xff) {
4346457a
RH
964 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
965 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4 966 } else
f694a27e 967 tcg_out_st16_8(s, cond, rd, rn, offset);
811d4cf4
AZ
968}
969
970static inline void tcg_out_ld8u(TCGContext *s, int cond,
971 int rd, int rn, int32_t offset)
972{
973 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
974 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
975 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
976 } else
977 tcg_out_ld8_12(s, cond, rd, rn, offset);
978}
979
980static inline void tcg_out_ld8s(TCGContext *s, int cond,
981 int rd, int rn, int32_t offset)
982{
983 if (offset > 0xff || offset < -0xff) {
4346457a
RH
984 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
985 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
986 } else
987 tcg_out_ld8s_8(s, cond, rd, rn, offset);
988}
989
f694a27e 990static inline void tcg_out_st8(TCGContext *s, int cond,
811d4cf4
AZ
991 int rd, int rn, int32_t offset)
992{
993 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
994 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
995 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
996 } else
997 tcg_out_st8_12(s, cond, rd, rn, offset);
998}
999
222f23f5 1000/* The _goto case is normally between TBs within the same code buffer,
5c84bd90 1001 * and with the code buffer limited to 16MB we shouldn't need the long
222f23f5
DDAG
1002 * case.
1003 *
1004 * .... except to the prologue that is in its own buffer.
1005 */
811d4cf4
AZ
1006static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
1007{
1008 int32_t val;
1009
24e838b7
PM
1010 if (addr & 1) {
1011 /* goto to a Thumb destination isn't supported */
1012 tcg_abort();
1013 }
1014
811d4cf4
AZ
1015 val = addr - (tcg_target_long) s->code_ptr;
1016 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
1017 tcg_out_b(s, cond, val);
1018 else {
811d4cf4 1019 if (cond == COND_AL) {
c8d80cef 1020 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
222f23f5 1021 tcg_out32(s, addr);
811d4cf4 1022 } else {
4346457a 1023 tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
811d4cf4 1024 tcg_out_dat_reg(s, cond, ARITH_ADD,
c8d80cef 1025 TCG_REG_PC, TCG_REG_PC,
4346457a 1026 TCG_REG_TMP, SHIFT_IMM_LSL(0));
811d4cf4 1027 }
811d4cf4
AZ
1028 }
1029}
1030
222f23f5
DDAG
1031/* The call case is mostly used for helpers - so it's not unreasonable
1032 * for them to be beyond branch range */
24e838b7 1033static inline void tcg_out_call(TCGContext *s, uint32_t addr)
811d4cf4
AZ
1034{
1035 int32_t val;
1036
811d4cf4 1037 val = addr - (tcg_target_long) s->code_ptr;
24e838b7
PM
1038 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1039 if (addr & 1) {
1040 /* Use BLX if the target is in Thumb mode */
1041 if (!use_armv5_instructions) {
1042 tcg_abort();
1043 }
1044 tcg_out_blx_imm(s, val);
1045 } else {
1046 tcg_out_bl(s, COND_AL, val);
1047 }
1048 } else {
222f23f5
DDAG
1049 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1050 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1051 tcg_out32(s, addr);
811d4cf4 1052 }
811d4cf4
AZ
1053}
1054
1055static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1056{
23401b58
AJ
1057 if (use_armv5_instructions) {
1058 tcg_out_blx(s, cond, arg);
1059 } else {
1060 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1061 TCG_REG_PC, SHIFT_IMM_LSL(0));
1062 tcg_out_bx(s, cond, arg);
1063 }
811d4cf4
AZ
1064}
1065
1066static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1067{
1068 TCGLabel *l = &s->labels[label_index];
1069
1070 if (l->has_value)
1071 tcg_out_goto(s, cond, l->u.value);
1072 else if (cond == COND_AL) {
c8d80cef 1073 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1074 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
1075 s->code_ptr += 4;
1076 } else {
1077 /* Probably this should be preferred even for COND_AL... */
1078 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 1079 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
1080 }
1081}
1082
811d4cf4 1083#ifdef CONFIG_SOFTMMU
79383c9c 1084
022c62cb 1085#include "exec/softmmu_defs.h"
811d4cf4 1086
e141ab52
BS
1087/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1088 int mmu_idx) */
1089static const void * const qemu_ld_helpers[4] = {
1090 helper_ldb_mmu,
1091 helper_ldw_mmu,
1092 helper_ldl_mmu,
1093 helper_ldq_mmu,
1094};
1095
1096/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1097 uintxx_t val, int mmu_idx) */
1098static const void * const qemu_st_helpers[4] = {
1099 helper_stb_mmu,
1100 helper_stw_mmu,
1101 helper_stl_mmu,
1102 helper_stq_mmu,
1103};
9716ef3b
PM
1104
1105/* Helper routines for marshalling helper function arguments into
1106 * the correct registers and stack.
1107 * argreg is where we want to put this argument, arg is the argument itself.
1108 * Return value is the updated argreg ready for the next call.
1109 * Note that argreg 0..3 is real registers, 4+ on stack.
9716ef3b
PM
1110 *
1111 * We provide routines for arguments which are: immediate, 32 bit
1112 * value in register, 16 and 8 bit values in register (which must be zero
1113 * extended before use) and 64 bit value in a lo:hi register pair.
1114 */
fc4d60ee
RH
1115#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1116static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1117{ \
1118 if (argreg < 4) { \
1119 MOV_ARG(s, COND_AL, argreg, arg); \
1120 } else { \
1121 int ofs = (argreg - 4) * 4; \
1122 EXT_ARG; \
1123 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1124 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1125 } \
1126 return argreg + 1; \
1127}
1128
1129DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
4346457a 1130 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1131DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
4346457a 1132 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1133DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
4346457a 1134 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee
RH
1135DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1136
1137static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1138 TCGReg arglo, TCGReg arghi)
9716ef3b
PM
1139{
1140 /* 64 bit arguments must go in even/odd register pairs
1141 * and in 8-aligned stack slots.
1142 */
1143 if (argreg & 1) {
1144 argreg++;
1145 }
1146 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1147 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1148 return argreg;
1149}
fc4d60ee 1150#endif /* SOFTMMU */
811d4cf4 1151
3979144c
PB
1152#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1153
7e0d9562 1154static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1155{
67dcab73 1156 int addr_reg, data_reg, data_reg2, bswap;
811d4cf4 1157#ifdef CONFIG_SOFTMMU
d17bd1d8 1158 int mem_index, s_bits, tlb_offset;
9716ef3b 1159 TCGReg argreg;
811d4cf4
AZ
1160# if TARGET_LONG_BITS == 64
1161 int addr_reg2;
1162# endif
811d4cf4 1163 uint32_t *label_ptr;
811d4cf4
AZ
1164#endif
1165
67dcab73
AJ
1166#ifdef TARGET_WORDS_BIGENDIAN
1167 bswap = 1;
1168#else
1169 bswap = 0;
1170#endif
811d4cf4
AZ
1171 data_reg = *args++;
1172 if (opc == 3)
1173 data_reg2 = *args++;
1174 else
d89c682f 1175 data_reg2 = 0; /* suppress warning */
811d4cf4 1176 addr_reg = *args++;
811d4cf4 1177#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1178# if TARGET_LONG_BITS == 64
1179 addr_reg2 = *args++;
1180# endif
811d4cf4
AZ
1181 mem_index = *args;
1182 s_bits = opc & 3;
1183
91a3c1b0 1184 /* Should generate something like the following:
3979144c 1185 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1186 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1187 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0
AZ
1188 */
1189# if CPU_TLB_BITS > 8
1190# error
1191# endif
4346457a 1192 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
c8d80cef 1193 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 1194 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1195 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
c8d80cef
AJ
1196 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
1197 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
d17bd1d8
AJ
1198 /* We assume that the offset is contained within 20 bits. */
1199 tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
5256a720 1200 assert((tlb_offset & ~0xfffff) == 0);
d17bd1d8 1201 if (tlb_offset > 0xfff) {
c8d80cef 1202 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
d17bd1d8
AJ
1203 0xa00 | (tlb_offset >> 12));
1204 tlb_offset &= 0xfff;
1205 }
1206 tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
c8d80cef 1207 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
4346457a 1208 TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1209 /* Check alignment. */
1210 if (s_bits)
1211 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1212 0, addr_reg, (1 << s_bits) - 1);
811d4cf4 1213# if TARGET_LONG_BITS == 64
d17bd1d8
AJ
1214 /* XXX: possibly we could use a block data load in the first access. */
1215 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
c8d80cef
AJ
1216 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1217 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 1218# endif
c8d80cef 1219 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
d17bd1d8
AJ
1220 offsetof(CPUTLBEntry, addend)
1221 - offsetof(CPUTLBEntry, addr_read));
811d4cf4
AZ
1222
1223 switch (opc) {
1224 case 0:
c8d80cef 1225 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1226 break;
1227 case 0 | 4:
c8d80cef 1228 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1229 break;
1230 case 1:
c8d80cef 1231 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
67dcab73
AJ
1232 if (bswap) {
1233 tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1234 }
811d4cf4
AZ
1235 break;
1236 case 1 | 4:
67dcab73
AJ
1237 if (bswap) {
1238 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1239 tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1240 } else {
1241 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1242 }
811d4cf4
AZ
1243 break;
1244 case 2:
1245 default:
c8d80cef 1246 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
67dcab73
AJ
1247 if (bswap) {
1248 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1249 }
811d4cf4
AZ
1250 break;
1251 case 3:
67dcab73
AJ
1252 if (bswap) {
1253 tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1254 tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1255 tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1256 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1257 } else {
1258 tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1259 tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1260 }
811d4cf4
AZ
1261 break;
1262 }
1263
1264 label_ptr = (void *) s->code_ptr;
c69806ab 1265 tcg_out_b_noaddr(s, COND_EQ);
811d4cf4 1266
811d4cf4 1267 /* TODO: move this code to where the constants pool will be */
9716ef3b
PM
1268 /* Note that this code relies on the constraints we set in arm_op_defs[]
1269 * to ensure that later arguments are not passed to us in registers we
1270 * trash by moving the earlier arguments into them.
1271 */
1272 argreg = TCG_REG_R0;
9716ef3b 1273 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
9716ef3b
PM
1274#if TARGET_LONG_BITS == 64
1275 argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1276#else
1277 argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
e141ab52 1278#endif
9716ef3b 1279 argreg = tcg_out_arg_imm32(s, argreg, mem_index);
24e838b7 1280 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
811d4cf4
AZ
1281
1282 switch (opc) {
1283 case 0 | 4:
e854b6d3 1284 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1285 break;
1286 case 1 | 4:
e854b6d3 1287 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1288 break;
1289 case 0:
1290 case 1:
1291 case 2:
1292 default:
f97713ff 1293 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1294 break;
1295 case 3:
f97713ff
PM
1296 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1297 tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
811d4cf4
AZ
1298 break;
1299 }
1300
c69806ab 1301 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
379f6698
PB
1302#else /* !CONFIG_SOFTMMU */
1303 if (GUEST_BASE) {
1304 uint32_t offset = GUEST_BASE;
1305 int i;
1306 int rot;
1307
1308 while (offset) {
1309 i = ctz32(offset) & ~1;
1310 rot = ((32 - i) << 7) & 0xf00;
1311
4346457a 1312 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
379f6698 1313 ((offset >> i) & 0xff) | rot);
4346457a 1314 addr_reg = TCG_REG_TMP;
379f6698
PB
1315 offset &= ~(0xff << i);
1316 }
1317 }
811d4cf4
AZ
1318 switch (opc) {
1319 case 0:
1320 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1321 break;
1322 case 0 | 4:
1323 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1324 break;
1325 case 1:
1326 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1327 if (bswap) {
1328 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1329 }
811d4cf4
AZ
1330 break;
1331 case 1 | 4:
67dcab73
AJ
1332 if (bswap) {
1333 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1334 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1335 } else {
1336 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1337 }
811d4cf4
AZ
1338 break;
1339 case 2:
1340 default:
1341 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1342 if (bswap) {
1343 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1344 }
811d4cf4
AZ
1345 break;
1346 case 3:
eae6ce52
AZ
1347 /* TODO: use block load -
1348 * check that data_reg2 > data_reg or the other way */
419bafa5 1349 if (data_reg == addr_reg) {
67dcab73
AJ
1350 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1351 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
419bafa5 1352 } else {
67dcab73
AJ
1353 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1354 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1355 }
1356 if (bswap) {
1357 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1358 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
419bafa5 1359 }
811d4cf4
AZ
1360 break;
1361 }
1362#endif
1363}
1364
7e0d9562 1365static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1366{
67dcab73 1367 int addr_reg, data_reg, data_reg2, bswap;
811d4cf4 1368#ifdef CONFIG_SOFTMMU
d17bd1d8 1369 int mem_index, s_bits, tlb_offset;
9716ef3b 1370 TCGReg argreg;
811d4cf4
AZ
1371# if TARGET_LONG_BITS == 64
1372 int addr_reg2;
1373# endif
811d4cf4 1374 uint32_t *label_ptr;
811d4cf4
AZ
1375#endif
1376
67dcab73
AJ
1377#ifdef TARGET_WORDS_BIGENDIAN
1378 bswap = 1;
1379#else
1380 bswap = 0;
1381#endif
811d4cf4
AZ
1382 data_reg = *args++;
1383 if (opc == 3)
1384 data_reg2 = *args++;
1385 else
d89c682f 1386 data_reg2 = 0; /* suppress warning */
811d4cf4 1387 addr_reg = *args++;
811d4cf4 1388#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1389# if TARGET_LONG_BITS == 64
1390 addr_reg2 = *args++;
1391# endif
811d4cf4
AZ
1392 mem_index = *args;
1393 s_bits = opc & 3;
1394
91a3c1b0 1395 /* Should generate something like the following:
3979144c 1396 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1397 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1398 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0 1399 */
811d4cf4 1400 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
4346457a 1401 TCG_REG_TMP, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 1402 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1403 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
c8d80cef
AJ
1404 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1405 TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
d17bd1d8
AJ
1406 /* We assume that the offset is contained within 20 bits. */
1407 tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
5256a720 1408 assert((tlb_offset & ~0xfffff) == 0);
d17bd1d8 1409 if (tlb_offset > 0xfff) {
c8d80cef 1410 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
d17bd1d8
AJ
1411 0xa00 | (tlb_offset >> 12));
1412 tlb_offset &= 0xfff;
1413 }
1414 tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
c8d80cef 1415 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
4346457a 1416 TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1417 /* Check alignment. */
1418 if (s_bits)
1419 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1420 0, addr_reg, (1 << s_bits) - 1);
811d4cf4 1421# if TARGET_LONG_BITS == 64
d17bd1d8
AJ
1422 /* XXX: possibly we could use a block data load in the first access. */
1423 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
c8d80cef
AJ
1424 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1425 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 1426# endif
c8d80cef 1427 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
d17bd1d8
AJ
1428 offsetof(CPUTLBEntry, addend)
1429 - offsetof(CPUTLBEntry, addr_write));
811d4cf4
AZ
1430
1431 switch (opc) {
1432 case 0:
c8d80cef 1433 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4 1434 break;
811d4cf4 1435 case 1:
67dcab73 1436 if (bswap) {
7aab08aa 1437 tcg_out_bswap16st(s, COND_EQ, TCG_REG_R0, data_reg);
67dcab73
AJ
1438 tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1439 } else {
1440 tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1441 }
811d4cf4
AZ
1442 break;
1443 case 2:
1444 default:
67dcab73
AJ
1445 if (bswap) {
1446 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1447 tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1448 } else {
1449 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1450 }
811d4cf4
AZ
1451 break;
1452 case 3:
67dcab73
AJ
1453 if (bswap) {
1454 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1455 tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1456 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
9a3abc21 1457 tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
67dcab73
AJ
1458 } else {
1459 tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1460 tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1461 }
811d4cf4
AZ
1462 break;
1463 }
1464
1465 label_ptr = (void *) s->code_ptr;
c69806ab 1466 tcg_out_b_noaddr(s, COND_EQ);
811d4cf4 1467
811d4cf4 1468 /* TODO: move this code to where the constants pool will be */
9716ef3b
PM
1469 /* Note that this code relies on the constraints we set in arm_op_defs[]
1470 * to ensure that later arguments are not passed to us in registers we
1471 * trash by moving the earlier arguments into them.
1472 */
1473 argreg = TCG_REG_R0;
9716ef3b 1474 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
9716ef3b
PM
1475#if TARGET_LONG_BITS == 64
1476 argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1477#else
1478 argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1479#endif
1480
811d4cf4
AZ
1481 switch (opc) {
1482 case 0:
9716ef3b 1483 argreg = tcg_out_arg_reg8(s, argreg, data_reg);
811d4cf4
AZ
1484 break;
1485 case 1:
9716ef3b 1486 argreg = tcg_out_arg_reg16(s, argreg, data_reg);
811d4cf4
AZ
1487 break;
1488 case 2:
9716ef3b 1489 argreg = tcg_out_arg_reg32(s, argreg, data_reg);
811d4cf4
AZ
1490 break;
1491 case 3:
9716ef3b 1492 argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
811d4cf4
AZ
1493 break;
1494 }
811d4cf4 1495
9716ef3b 1496 argreg = tcg_out_arg_imm32(s, argreg, mem_index);
24e838b7 1497 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
811d4cf4 1498
c69806ab 1499 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
379f6698
PB
1500#else /* !CONFIG_SOFTMMU */
1501 if (GUEST_BASE) {
1502 uint32_t offset = GUEST_BASE;
1503 int i;
1504 int rot;
1505
1506 while (offset) {
1507 i = ctz32(offset) & ~1;
1508 rot = ((32 - i) << 7) & 0xf00;
1509
67dcab73 1510 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
379f6698 1511 ((offset >> i) & 0xff) | rot);
67dcab73 1512 addr_reg = TCG_REG_R1;
379f6698
PB
1513 offset &= ~(0xff << i);
1514 }
1515 }
811d4cf4
AZ
1516 switch (opc) {
1517 case 0:
1518 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1519 break;
811d4cf4 1520 case 1:
67dcab73 1521 if (bswap) {
7aab08aa 1522 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
67dcab73
AJ
1523 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1524 } else {
1525 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1526 }
811d4cf4
AZ
1527 break;
1528 case 2:
1529 default:
67dcab73
AJ
1530 if (bswap) {
1531 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1532 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1533 } else {
1534 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1535 }
811d4cf4
AZ
1536 break;
1537 case 3:
eae6ce52
AZ
1538 /* TODO: use block store -
1539 * check that data_reg2 > data_reg or the other way */
67dcab73
AJ
1540 if (bswap) {
1541 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1542 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1543 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1544 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1545 } else {
1546 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1547 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1548 }
811d4cf4
AZ
1549 break;
1550 }
1551#endif
1552}
1553
811d4cf4
AZ
1554static uint8_t *tb_ret_addr;
1555
a9751609 1556static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
811d4cf4
AZ
1557 const TCGArg *args, const int *const_args)
1558{
2df3f1ee 1559 TCGArg a0, a1, a2, a3, a4, a5;
811d4cf4
AZ
1560 int c;
1561
1562 switch (opc) {
1563 case INDEX_op_exit_tb:
fe33867b
AZ
1564 {
1565 uint8_t *ld_ptr = s->code_ptr;
1566 if (args[0] >> 8)
c8d80cef 1567 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
fe33867b 1568 else
c8d80cef 1569 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
fe33867b
AZ
1570 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1571 if (args[0] >> 8) {
1572 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1573 tcg_out32(s, args[0]);
1574 }
1575 }
811d4cf4
AZ
1576 break;
1577 case INDEX_op_goto_tb:
1578 if (s->tb_jmp_offset) {
1579 /* Direct jump method */
fe33867b 1580#if defined(USE_DIRECT_JUMP)
811d4cf4 1581 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
c69806ab 1582 tcg_out_b_noaddr(s, COND_AL);
811d4cf4 1583#else
c8d80cef 1584 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1585 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1586 tcg_out32(s, 0);
1587#endif
1588 } else {
1589 /* Indirect jump method */
1590#if 1
1591 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1592 if (c > 0xfff || c < -0xfff) {
1593 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1594 (tcg_target_long) (s->tb_next + args[0]));
c8d80cef 1595 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4 1596 } else
c8d80cef 1597 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
811d4cf4 1598#else
c8d80cef
AJ
1599 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1600 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4
AZ
1601 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1602#endif
1603 }
1604 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1605 break;
1606 case INDEX_op_call:
1607 if (const_args[0])
24e838b7 1608 tcg_out_call(s, args[0]);
811d4cf4
AZ
1609 else
1610 tcg_out_callr(s, COND_AL, args[0]);
1611 break;
811d4cf4
AZ
1612 case INDEX_op_br:
1613 tcg_out_goto_label(s, COND_AL, args[0]);
1614 break;
1615
1616 case INDEX_op_ld8u_i32:
1617 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1618 break;
1619 case INDEX_op_ld8s_i32:
1620 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1621 break;
1622 case INDEX_op_ld16u_i32:
1623 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1624 break;
1625 case INDEX_op_ld16s_i32:
1626 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1627 break;
1628 case INDEX_op_ld_i32:
1629 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1630 break;
1631 case INDEX_op_st8_i32:
f694a27e 1632 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1633 break;
1634 case INDEX_op_st16_i32:
f694a27e 1635 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1636 break;
1637 case INDEX_op_st_i32:
1638 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1639 break;
1640
1641 case INDEX_op_mov_i32:
1642 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1643 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1644 break;
1645 case INDEX_op_movi_i32:
1646 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1647 break;
4a1d241e
PM
1648 case INDEX_op_movcond_i32:
1649 /* Constraints mean that v2 is always in the same register as dest,
1650 * so we only need to do "if condition passed, move v1 to dest".
1651 */
5d53b4c9
RH
1652 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1653 args[1], args[2], const_args[2]);
1654 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1655 ARITH_MVN, args[0], 0, args[3], const_args[3]);
4a1d241e 1656 break;
811d4cf4 1657 case INDEX_op_add_i32:
a9a86ae9
RH
1658 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1659 args[0], args[1], args[2], const_args[2]);
1660 break;
811d4cf4 1661 case INDEX_op_sub_i32:
d9fda575
RH
1662 if (const_args[1]) {
1663 if (const_args[2]) {
1664 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1665 } else {
1666 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1667 args[0], args[2], args[1], 1);
1668 }
1669 } else {
1670 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1671 args[0], args[1], args[2], const_args[2]);
1672 }
a9a86ae9 1673 break;
811d4cf4 1674 case INDEX_op_and_i32:
19b62bf4
RH
1675 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1676 args[0], args[1], args[2], const_args[2]);
1677 break;
932234f6 1678 case INDEX_op_andc_i32:
19b62bf4
RH
1679 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1680 args[0], args[1], args[2], const_args[2]);
1681 break;
811d4cf4
AZ
1682 case INDEX_op_or_i32:
1683 c = ARITH_ORR;
1684 goto gen_arith;
1685 case INDEX_op_xor_i32:
1686 c = ARITH_EOR;
1687 /* Fall through. */
1688 gen_arith:
7fc645bf 1689 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
811d4cf4
AZ
1690 break;
1691 case INDEX_op_add2_i32:
2df3f1ee
RH
1692 a0 = args[0], a1 = args[1], a2 = args[2];
1693 a3 = args[3], a4 = args[4], a5 = args[5];
1694 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
4346457a 1695 a0 = TCG_REG_TMP;
2df3f1ee
RH
1696 }
1697 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1698 a0, a2, a4, const_args[4]);
1699 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1700 a1, a3, a5, const_args[5]);
1701 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4
AZ
1702 break;
1703 case INDEX_op_sub2_i32:
2df3f1ee
RH
1704 a0 = args[0], a1 = args[1], a2 = args[2];
1705 a3 = args[3], a4 = args[4], a5 = args[5];
1706 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
4346457a 1707 a0 = TCG_REG_TMP;
2df3f1ee
RH
1708 }
1709 if (const_args[2]) {
1710 if (const_args[4]) {
1711 tcg_out_movi32(s, COND_AL, a0, a4);
1712 a4 = a0;
1713 }
1714 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1715 } else {
1716 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1717 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1718 }
1719 if (const_args[3]) {
1720 if (const_args[5]) {
1721 tcg_out_movi32(s, COND_AL, a1, a5);
1722 a5 = a1;
1723 }
1724 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1725 } else {
1726 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1727 a1, a3, a5, const_args[5]);
1728 }
1729 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4 1730 break;
650bbb36
AZ
1731 case INDEX_op_neg_i32:
1732 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1733 break;
f878d2d2
LD
1734 case INDEX_op_not_i32:
1735 tcg_out_dat_reg(s, COND_AL,
1736 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1737 break;
811d4cf4
AZ
1738 case INDEX_op_mul_i32:
1739 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1740 break;
1741 case INDEX_op_mulu2_i32:
1742 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1743 break;
d693e147
RH
1744 case INDEX_op_muls2_i32:
1745 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1746 break;
811d4cf4
AZ
1747 /* XXX: Perhaps args[2] & 0x1f is wrong */
1748 case INDEX_op_shl_i32:
1749 c = const_args[2] ?
1750 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1751 goto gen_shift32;
1752 case INDEX_op_shr_i32:
1753 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1754 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1755 goto gen_shift32;
1756 case INDEX_op_sar_i32:
1757 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1758 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
293579e5
AJ
1759 goto gen_shift32;
1760 case INDEX_op_rotr_i32:
1761 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1762 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
811d4cf4
AZ
1763 /* Fall through. */
1764 gen_shift32:
1765 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1766 break;
1767
293579e5
AJ
1768 case INDEX_op_rotl_i32:
1769 if (const_args[2]) {
1770 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1771 ((0x20 - args[2]) & 0x1f) ?
1772 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1773 SHIFT_IMM_LSL(0));
1774 } else {
4346457a 1775 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
293579e5 1776 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
4346457a 1777 SHIFT_REG_ROR(TCG_REG_TMP));
293579e5
AJ
1778 }
1779 break;
1780
811d4cf4 1781 case INDEX_op_brcond_i32:
5d53b4c9 1782 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
7fc645bf 1783 args[0], args[1], const_args[1]);
811d4cf4
AZ
1784 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1785 break;
1786 case INDEX_op_brcond2_i32:
1787 /* The resulting conditions are:
1788 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1789 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1790 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1791 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1792 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1793 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1794 */
5d53b4c9
RH
1795 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1796 args[1], args[3], const_args[3]);
1797 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1798 args[0], args[2], const_args[2]);
811d4cf4
AZ
1799 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1800 break;
f72a6cd7 1801 case INDEX_op_setcond_i32:
5d53b4c9
RH
1802 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1803 args[1], args[2], const_args[2]);
f72a6cd7
AJ
1804 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1805 ARITH_MOV, args[0], 0, 1);
1806 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1807 ARITH_MOV, args[0], 0, 0);
1808 break;
e0404769
AJ
1809 case INDEX_op_setcond2_i32:
1810 /* See brcond2_i32 comment */
5d53b4c9
RH
1811 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1812 args[2], args[4], const_args[4]);
1813 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1814 args[1], args[3], const_args[3]);
e0404769
AJ
1815 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1816 ARITH_MOV, args[0], 0, 1);
1817 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1818 ARITH_MOV, args[0], 0, 0);
b525f0a9 1819 break;
811d4cf4
AZ
1820
1821 case INDEX_op_qemu_ld8u:
7e0d9562 1822 tcg_out_qemu_ld(s, args, 0);
811d4cf4
AZ
1823 break;
1824 case INDEX_op_qemu_ld8s:
7e0d9562 1825 tcg_out_qemu_ld(s, args, 0 | 4);
811d4cf4
AZ
1826 break;
1827 case INDEX_op_qemu_ld16u:
7e0d9562 1828 tcg_out_qemu_ld(s, args, 1);
811d4cf4
AZ
1829 break;
1830 case INDEX_op_qemu_ld16s:
7e0d9562 1831 tcg_out_qemu_ld(s, args, 1 | 4);
811d4cf4 1832 break;
86feb1c8 1833 case INDEX_op_qemu_ld32:
7e0d9562 1834 tcg_out_qemu_ld(s, args, 2);
811d4cf4
AZ
1835 break;
1836 case INDEX_op_qemu_ld64:
7e0d9562 1837 tcg_out_qemu_ld(s, args, 3);
811d4cf4 1838 break;
650bbb36 1839
811d4cf4 1840 case INDEX_op_qemu_st8:
7e0d9562 1841 tcg_out_qemu_st(s, args, 0);
811d4cf4
AZ
1842 break;
1843 case INDEX_op_qemu_st16:
7e0d9562 1844 tcg_out_qemu_st(s, args, 1);
811d4cf4
AZ
1845 break;
1846 case INDEX_op_qemu_st32:
7e0d9562 1847 tcg_out_qemu_st(s, args, 2);
811d4cf4
AZ
1848 break;
1849 case INDEX_op_qemu_st64:
7e0d9562 1850 tcg_out_qemu_st(s, args, 3);
811d4cf4
AZ
1851 break;
1852
244b1e81
AJ
1853 case INDEX_op_bswap16_i32:
1854 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1855 break;
1856 case INDEX_op_bswap32_i32:
1857 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1858 break;
1859
811d4cf4 1860 case INDEX_op_ext8s_i32:
9517094f 1861 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1862 break;
1863 case INDEX_op_ext16s_i32:
9517094f
AJ
1864 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1865 break;
1866 case INDEX_op_ext16u_i32:
1867 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1868 break;
1869
b6b24cb0
RH
1870 case INDEX_op_deposit_i32:
1871 tcg_out_deposit(s, COND_AL, args[0], args[2],
1872 args[3], args[4], const_args[2]);
1873 break;
1874
0637c56c
RH
1875 case INDEX_op_div_i32:
1876 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1877 break;
1878 case INDEX_op_divu_i32:
1879 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1880 break;
1881 case INDEX_op_rem_i32:
4346457a
RH
1882 tcg_out_sdiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1883 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1884 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1885 SHIFT_IMM_LSL(0));
1886 break;
1887 case INDEX_op_remu_i32:
4346457a
RH
1888 tcg_out_udiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1889 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1890 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1891 SHIFT_IMM_LSL(0));
1892 break;
1893
811d4cf4
AZ
1894 default:
1895 tcg_abort();
1896 }
1897}
1898
1899static const TCGTargetOpDef arm_op_defs[] = {
1900 { INDEX_op_exit_tb, { } },
1901 { INDEX_op_goto_tb, { } },
1902 { INDEX_op_call, { "ri" } },
811d4cf4
AZ
1903 { INDEX_op_br, { } },
1904
1905 { INDEX_op_mov_i32, { "r", "r" } },
1906 { INDEX_op_movi_i32, { "r" } },
1907
1908 { INDEX_op_ld8u_i32, { "r", "r" } },
1909 { INDEX_op_ld8s_i32, { "r", "r" } },
1910 { INDEX_op_ld16u_i32, { "r", "r" } },
1911 { INDEX_op_ld16s_i32, { "r", "r" } },
1912 { INDEX_op_ld_i32, { "r", "r" } },
1913 { INDEX_op_st8_i32, { "r", "r" } },
1914 { INDEX_op_st16_i32, { "r", "r" } },
1915 { INDEX_op_st_i32, { "r", "r" } },
1916
1917 /* TODO: "r", "r", "ri" */
a9a86ae9 1918 { INDEX_op_add_i32, { "r", "r", "rIN" } },
d9fda575 1919 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
811d4cf4
AZ
1920 { INDEX_op_mul_i32, { "r", "r", "r" } },
1921 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
d693e147 1922 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
19b62bf4
RH
1923 { INDEX_op_and_i32, { "r", "r", "rIK" } },
1924 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
cb4e581f
LD
1925 { INDEX_op_or_i32, { "r", "r", "rI" } },
1926 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1927 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1928 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1929
1930 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1931 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1932 { INDEX_op_sar_i32, { "r", "r", "ri" } },
293579e5
AJ
1933 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1934 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
811d4cf4 1935
5d53b4c9
RH
1936 { INDEX_op_brcond_i32, { "r", "rIN" } },
1937 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
1938 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
811d4cf4 1939
2df3f1ee
RH
1940 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
1941 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
5d53b4c9
RH
1942 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
1943 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
811d4cf4 1944
26c5d372 1945#if TARGET_LONG_BITS == 32
67dcab73
AJ
1946 { INDEX_op_qemu_ld8u, { "r", "l" } },
1947 { INDEX_op_qemu_ld8s, { "r", "l" } },
1948 { INDEX_op_qemu_ld16u, { "r", "l" } },
1949 { INDEX_op_qemu_ld16s, { "r", "l" } },
1950 { INDEX_op_qemu_ld32, { "r", "l" } },
1951 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1952
1953 { INDEX_op_qemu_st8, { "s", "s" } },
1954 { INDEX_op_qemu_st16, { "s", "s" } },
1955 { INDEX_op_qemu_st32, { "s", "s" } },
bf5675ef 1956 { INDEX_op_qemu_st64, { "S", "S", "s" } },
26c5d372 1957#else
67dcab73
AJ
1958 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1959 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1960 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1961 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1962 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1963 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1964
1965 { INDEX_op_qemu_st8, { "s", "s", "s" } },
1966 { INDEX_op_qemu_st16, { "s", "s", "s" } },
1967 { INDEX_op_qemu_st32, { "s", "s", "s" } },
bf5675ef 1968 { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
26c5d372 1969#endif
811d4cf4 1970
244b1e81
AJ
1971 { INDEX_op_bswap16_i32, { "r", "r" } },
1972 { INDEX_op_bswap32_i32, { "r", "r" } },
1973
811d4cf4
AZ
1974 { INDEX_op_ext8s_i32, { "r", "r" } },
1975 { INDEX_op_ext16s_i32, { "r", "r" } },
9517094f 1976 { INDEX_op_ext16u_i32, { "r", "r" } },
811d4cf4 1977
b6b24cb0
RH
1978 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1979
0637c56c
RH
1980#if TCG_TARGET_HAS_div_i32
1981 { INDEX_op_div_i32, { "r", "r", "r" } },
1982 { INDEX_op_rem_i32, { "r", "r", "r" } },
1983 { INDEX_op_divu_i32, { "r", "r", "r" } },
1984 { INDEX_op_remu_i32, { "r", "r", "r" } },
1985#endif
1986
811d4cf4
AZ
1987 { -1 },
1988};
1989
e4d58b41 1990static void tcg_target_init(TCGContext *s)
811d4cf4 1991{
20cb400d 1992#if !defined(CONFIG_USER_ONLY)
811d4cf4
AZ
1993 /* fail safe */
1994 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1995 tcg_abort();
20cb400d 1996#endif
811d4cf4 1997
e4a7d5e8 1998 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
811d4cf4 1999 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
e4a7d5e8
AJ
2000 (1 << TCG_REG_R0) |
2001 (1 << TCG_REG_R1) |
2002 (1 << TCG_REG_R2) |
2003 (1 << TCG_REG_R3) |
2004 (1 << TCG_REG_R12) |
2005 (1 << TCG_REG_R14));
811d4cf4
AZ
2006
2007 tcg_regset_clear(s->reserved_regs);
811d4cf4 2008 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4346457a 2009 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
e4a7d5e8 2010 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
811d4cf4
AZ
2011
2012 tcg_add_target_add_op_defs(arm_op_defs);
2013}
2014
2a534aff
RH
2015static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2016 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2017{
2018 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2019}
2020
2a534aff
RH
2021static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2022 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2023{
2024 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2025}
2026
2a534aff
RH
2027static inline void tcg_out_mov(TCGContext *s, TCGType type,
2028 TCGReg ret, TCGReg arg)
811d4cf4
AZ
2029{
2030 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2031}
2032
2033static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 2034 TCGReg ret, tcg_target_long arg)
811d4cf4
AZ
2035{
2036 tcg_out_movi32(s, COND_AL, ret, arg);
2037}
2038
e4d58b41 2039static void tcg_target_qemu_prologue(TCGContext *s)
811d4cf4 2040{
fc4d60ee
RH
2041 int frame_size;
2042
2043 /* Calling convention requires us to save r4-r11 and lr. */
2044 /* stmdb sp!, { r4 - r11, lr } */
2045 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
cea5f9a2 2046
fc4d60ee
RH
2047 /* Allocate the local stack frame. */
2048 frame_size = TCG_STATIC_CALL_ARGS_SIZE;
2049 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2050 /* We saved an odd number of registers above; keep an 8 aligned stack. */
2051 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
2052 & -TCG_TARGET_STACK_ALIGN) + 4;
2053
2054 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2055 TCG_REG_CALL_STACK, frame_size, 1);
2056 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2057 CPU_TEMP_BUF_NLONGS * sizeof(long));
4e17eae9 2058
cea5f9a2 2059 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
811d4cf4 2060
cea5f9a2 2061 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
811d4cf4
AZ
2062 tb_ret_addr = s->code_ptr;
2063
fc4d60ee
RH
2064 /* Epilogue. We branch here via tb_ret_addr. */
2065 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2066 TCG_REG_CALL_STACK, frame_size, 1);
2067
2068 /* ldmia sp!, { r4 - r11, pc } */
2069 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
811d4cf4 2070}