]> git.proxmox.com Git - qemu.git/blame - tcg/arm/tcg-target.c
tcg-arm: Rename use_armv5_instructions to use_armvt5_instructions
[qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f 24
ac34fb5c
AJ
25#if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30#define USE_ARMV7_INSTRUCTIONS
31#endif
32
33#if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39#define USE_ARMV6_INSTRUCTIONS
40#endif
41
42#if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
fb822738 46#define USE_ARMV5T_INSTRUCTIONS
ac34fb5c
AJ
47#endif
48
fb822738
RH
49#ifdef USE_ARMV5T_INSTRUCTIONS
50static const int use_armv5t_instructions = 1;
ac34fb5c 51#else
fb822738 52static const int use_armv5t_instructions = 0;
ac34fb5c
AJ
53#endif
54#undef USE_ARMV5_INSTRUCTIONS
55
56#ifdef USE_ARMV6_INSTRUCTIONS
57static const int use_armv6_instructions = 1;
58#else
59static const int use_armv6_instructions = 0;
60#endif
61#undef USE_ARMV6_INSTRUCTIONS
62
63#ifdef USE_ARMV7_INSTRUCTIONS
64static const int use_armv7_instructions = 1;
65#else
66static const int use_armv7_instructions = 0;
67#endif
68#undef USE_ARMV7_INSTRUCTIONS
69
72e1ccfc
RH
70#ifndef use_idiv_instructions
71bool use_idiv_instructions;
72#endif
73#ifdef CONFIG_GETAUXVAL
74# include <sys/auxv.h>
75#endif
76
d4a9eb1f
BS
77#ifndef NDEBUG
78static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
79 "%r0",
80 "%r1",
81 "%r2",
82 "%r3",
83 "%r4",
84 "%r5",
85 "%r6",
86 "%r7",
87 "%r8",
88 "%r9",
89 "%r10",
90 "%r11",
91 "%r12",
92 "%r13",
93 "%r14",
e4a7d5e8 94 "%pc",
811d4cf4 95};
d4a9eb1f 96#endif
811d4cf4 97
d4a9eb1f 98static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
99 TCG_REG_R4,
100 TCG_REG_R5,
101 TCG_REG_R6,
102 TCG_REG_R7,
103 TCG_REG_R8,
104 TCG_REG_R9,
105 TCG_REG_R10,
106 TCG_REG_R11,
811d4cf4 107 TCG_REG_R13,
914ccf51
AJ
108 TCG_REG_R0,
109 TCG_REG_R1,
110 TCG_REG_R2,
111 TCG_REG_R3,
112 TCG_REG_R12,
811d4cf4
AZ
113 TCG_REG_R14,
114};
115
d4a9eb1f 116static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
117 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
118};
d4a9eb1f 119static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
120 TCG_REG_R0, TCG_REG_R1
121};
122
13dd6fb9 123#define TCG_REG_TMP TCG_REG_R12
4346457a 124
c69806ab
AJ
125static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
126{
127 *(uint32_t *) code_ptr = target;
128}
129
130static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
131{
132 uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
133
134 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
135 | (offset & 0xffffff);
136}
137
650bbb36 138static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
139 tcg_target_long value, tcg_target_long addend)
140{
141 switch (type) {
142 case R_ARM_ABS32:
c69806ab 143 reloc_abs32(code_ptr, value);
811d4cf4
AZ
144 break;
145
146 case R_ARM_CALL:
147 case R_ARM_JUMP24:
148 default:
149 tcg_abort();
150
151 case R_ARM_PC24:
c69806ab 152 reloc_pc24(code_ptr, value);
811d4cf4
AZ
153 break;
154 }
155}
156
b6b24cb0
RH
157#define TCG_CT_CONST_ARM 0x100
158#define TCG_CT_CONST_INV 0x200
159#define TCG_CT_CONST_NEG 0x400
160#define TCG_CT_CONST_ZERO 0x800
19b62bf4 161
811d4cf4 162/* parse target specific constraints */
d4a9eb1f 163static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
164{
165 const char *ct_str;
166
167 ct_str = *pct_str;
168 switch (ct_str[0]) {
cb4e581f 169 case 'I':
19b62bf4
RH
170 ct->ct |= TCG_CT_CONST_ARM;
171 break;
172 case 'K':
173 ct->ct |= TCG_CT_CONST_INV;
174 break;
a9a86ae9
RH
175 case 'N': /* The gcc constraint letter is L, already used here. */
176 ct->ct |= TCG_CT_CONST_NEG;
177 break;
b6b24cb0
RH
178 case 'Z':
179 ct->ct |= TCG_CT_CONST_ZERO;
180 break;
cb4e581f 181
811d4cf4 182 case 'r':
811d4cf4
AZ
183 ct->ct |= TCG_CT_REG;
184 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
185 break;
186
67dcab73
AJ
187 /* qemu_ld address */
188 case 'l':
811d4cf4
AZ
189 ct->ct |= TCG_CT_REG;
190 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73 191#ifdef CONFIG_SOFTMMU
702b33b1 192 /* r0-r2 will be overwritten when reading the tlb entry,
67dcab73 193 so don't use these. */
811d4cf4
AZ
194 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
195 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
9716ef3b 196 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
67dcab73 197#endif
811d4cf4 198 break;
67dcab73 199 case 'L':
d0660ed4
AZ
200 ct->ct |= TCG_CT_REG;
201 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
202#ifdef CONFIG_SOFTMMU
203 /* r1 is still needed to load data_reg or data_reg2,
204 so don't use it. */
d0660ed4 205 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73 206#endif
d0660ed4
AZ
207 break;
208
67dcab73
AJ
209 /* qemu_st address & data_reg */
210 case 's':
811d4cf4
AZ
211 ct->ct |= TCG_CT_REG;
212 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
702b33b1
RH
213 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
214 and r0-r1 doing the byte swapping, so don't use these. */
811d4cf4 215 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 216 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
702b33b1
RH
217#if defined(CONFIG_SOFTMMU)
218 /* Avoid clashes with registers being used for helper args */
67dcab73 219 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
89c33337 220#if TARGET_LONG_BITS == 64
9716ef3b
PM
221 /* Avoid clashes with registers being used for helper args */
222 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
223#endif
811d4cf4 224#endif
67dcab73 225 break;
811d4cf4 226
811d4cf4
AZ
227 default:
228 return -1;
229 }
230 ct_str++;
231 *pct_str = ct_str;
232
233 return 0;
234}
235
94953e6d
LD
236static inline uint32_t rotl(uint32_t val, int n)
237{
238 return (val << n) | (val >> (32 - n));
239}
240
241/* ARM immediates for ALU instructions are made of an unsigned 8-bit
242 right-rotated by an even amount between 0 and 30. */
243static inline int encode_imm(uint32_t imm)
244{
4e6f6d4c
LD
245 int shift;
246
94953e6d
LD
247 /* simple case, only lower bits */
248 if ((imm & ~0xff) == 0)
249 return 0;
250 /* then try a simple even shift */
251 shift = ctz32(imm) & ~1;
252 if (((imm >> shift) & ~0xff) == 0)
253 return 32 - shift;
254 /* now try harder with rotations */
255 if ((rotl(imm, 2) & ~0xff) == 0)
256 return 2;
257 if ((rotl(imm, 4) & ~0xff) == 0)
258 return 4;
259 if ((rotl(imm, 6) & ~0xff) == 0)
260 return 6;
261 /* imm can't be encoded */
262 return -1;
263}
cb4e581f
LD
264
265static inline int check_fit_imm(uint32_t imm)
266{
94953e6d 267 return encode_imm(imm) >= 0;
cb4e581f
LD
268}
269
811d4cf4
AZ
270/* Test if a constant matches the constraint.
271 * TODO: define constraints for:
272 *
273 * ldr/str offset: between -0xfff and 0xfff
274 * ldrh/strh offset: between -0xff and 0xff
275 * mov operand2: values represented with x << (2 * y), x < 0x100
276 * add, sub, eor...: ditto
277 */
278static inline int tcg_target_const_match(tcg_target_long val,
19b62bf4 279 const TCGArgConstraint *arg_ct)
811d4cf4
AZ
280{
281 int ct;
282 ct = arg_ct->ct;
19b62bf4 283 if (ct & TCG_CT_CONST) {
811d4cf4 284 return 1;
19b62bf4 285 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
cb4e581f 286 return 1;
19b62bf4
RH
287 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
288 return 1;
a9a86ae9
RH
289 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
290 return 1;
b6b24cb0
RH
291 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
292 return 1;
19b62bf4 293 } else {
811d4cf4 294 return 0;
19b62bf4 295 }
811d4cf4
AZ
296}
297
2df3f1ee
RH
298#define TO_CPSR (1 << 20)
299
9feac1d7 300typedef enum {
2df3f1ee
RH
301 ARITH_AND = 0x0 << 21,
302 ARITH_EOR = 0x1 << 21,
303 ARITH_SUB = 0x2 << 21,
304 ARITH_RSB = 0x3 << 21,
305 ARITH_ADD = 0x4 << 21,
306 ARITH_ADC = 0x5 << 21,
307 ARITH_SBC = 0x6 << 21,
308 ARITH_RSC = 0x7 << 21,
309 ARITH_TST = 0x8 << 21 | TO_CPSR,
310 ARITH_CMP = 0xa << 21 | TO_CPSR,
311 ARITH_CMN = 0xb << 21 | TO_CPSR,
312 ARITH_ORR = 0xc << 21,
313 ARITH_MOV = 0xd << 21,
314 ARITH_BIC = 0xe << 21,
315 ARITH_MVN = 0xf << 21,
9feac1d7
RH
316
317 INSN_LDR_IMM = 0x04100000,
318 INSN_LDR_REG = 0x06100000,
319 INSN_STR_IMM = 0x04000000,
320 INSN_STR_REG = 0x06000000,
321
322 INSN_LDRH_IMM = 0x005000b0,
323 INSN_LDRH_REG = 0x001000b0,
324 INSN_LDRSH_IMM = 0x005000f0,
325 INSN_LDRSH_REG = 0x001000f0,
326 INSN_STRH_IMM = 0x004000b0,
327 INSN_STRH_REG = 0x000000b0,
328
329 INSN_LDRB_IMM = 0x04500000,
330 INSN_LDRB_REG = 0x06500000,
331 INSN_LDRSB_IMM = 0x005000d0,
332 INSN_LDRSB_REG = 0x001000d0,
333 INSN_STRB_IMM = 0x04400000,
334 INSN_STRB_REG = 0x06400000,
702b33b1
RH
335
336 INSN_LDRD_IMM = 0x004000d0,
9feac1d7 337} ARMInsn;
811d4cf4 338
811d4cf4
AZ
339#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
340#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
341#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
342#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
343#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
344#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
345#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
346#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
347
348enum arm_cond_code_e {
349 COND_EQ = 0x0,
350 COND_NE = 0x1,
351 COND_CS = 0x2, /* Unsigned greater or equal */
352 COND_CC = 0x3, /* Unsigned less than */
353 COND_MI = 0x4, /* Negative */
354 COND_PL = 0x5, /* Zero or greater */
355 COND_VS = 0x6, /* Overflow */
356 COND_VC = 0x7, /* No overflow */
357 COND_HI = 0x8, /* Unsigned greater than */
358 COND_LS = 0x9, /* Unsigned less or equal */
359 COND_GE = 0xa,
360 COND_LT = 0xb,
361 COND_GT = 0xc,
362 COND_LE = 0xd,
363 COND_AL = 0xe,
364};
365
0aed257f 366static const uint8_t tcg_cond_to_arm_cond[] = {
811d4cf4
AZ
367 [TCG_COND_EQ] = COND_EQ,
368 [TCG_COND_NE] = COND_NE,
369 [TCG_COND_LT] = COND_LT,
370 [TCG_COND_GE] = COND_GE,
371 [TCG_COND_LE] = COND_LE,
372 [TCG_COND_GT] = COND_GT,
373 /* unsigned */
374 [TCG_COND_LTU] = COND_CC,
375 [TCG_COND_GEU] = COND_CS,
376 [TCG_COND_LEU] = COND_LS,
377 [TCG_COND_GTU] = COND_HI,
378};
379
380static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
381{
382 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
383}
384
385static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
386{
387 tcg_out32(s, (cond << 28) | 0x0a000000 |
388 (((offset - 8) >> 2) & 0x00ffffff));
389}
390
e936243a
AZ
391static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
392{
56779034
AJ
393 /* We pay attention here to not modify the branch target by skipping
394 the corresponding bytes. This ensure that caches and memory are
395 kept coherent during retranslation. */
e2542fe2 396#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
397 tcg_out8(s, (cond << 4) | 0x0a);
398 s->code_ptr += 3;
399#else
400 s->code_ptr += 3;
401 tcg_out8(s, (cond << 4) | 0x0a);
402#endif
403}
404
811d4cf4
AZ
405static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
406{
407 tcg_out32(s, (cond << 28) | 0x0b000000 |
408 (((offset - 8) >> 2) & 0x00ffffff));
409}
410
23401b58
AJ
411static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
412{
413 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
414}
415
24e838b7
PM
416static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
417{
418 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
419 (((offset - 8) >> 2) & 0x00ffffff));
420}
421
811d4cf4
AZ
422static inline void tcg_out_dat_reg(TCGContext *s,
423 int cond, int opc, int rd, int rn, int rm, int shift)
424{
2df3f1ee 425 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
811d4cf4
AZ
426 (rn << 16) | (rd << 12) | shift | rm);
427}
428
df5e0ef7
RH
429static inline void tcg_out_nop(TCGContext *s)
430{
431 if (use_armv7_instructions) {
432 /* Architected nop introduced in v6k. */
433 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
434 also Just So Happened to do nothing on pre-v6k so that we
435 don't need to conditionalize it? */
436 tcg_out32(s, 0xe320f000);
437 } else {
438 /* Prior to that the assembler uses mov r0, r0. */
439 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0));
440 }
441}
442
9716ef3b
PM
443static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
444{
445 /* Simple reg-reg move, optimising out the 'do nothing' case */
446 if (rd != rm) {
447 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
448 }
449}
450
811d4cf4
AZ
451static inline void tcg_out_dat_imm(TCGContext *s,
452 int cond, int opc, int rd, int rn, int im)
453{
2df3f1ee 454 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
811d4cf4
AZ
455 (rn << 16) | (rd << 12) | im);
456}
457
e86e0f28 458static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
811d4cf4 459{
e86e0f28
RH
460 int rot, opc, rn;
461
462 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
463 Speed things up by only checking when movt would be required.
464 Prior to armv7, have one go at fully rotated immediates before
465 doing the decomposition thing below. */
466 if (!use_armv7_instructions || (arg & 0xffff0000)) {
467 rot = encode_imm(arg);
468 if (rot >= 0) {
469 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
470 rotl(arg, rot) | (rot << 7));
471 return;
472 }
473 rot = encode_imm(~arg);
474 if (rot >= 0) {
475 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
476 rotl(~arg, rot) | (rot << 7));
477 return;
478 }
479 }
480
481 /* Use movw + movt. */
482 if (use_armv7_instructions) {
ac34fb5c
AJ
483 /* movw */
484 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
485 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
0f11f25a 486 if (arg & 0xffff0000) {
ac34fb5c
AJ
487 /* movt */
488 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
489 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
ac34fb5c 490 }
e86e0f28
RH
491 return;
492 }
0f11f25a 493
e86e0f28
RH
494 /* TODO: This is very suboptimal, we can easily have a constant
495 pool somewhere after all the instructions. */
496 opc = ARITH_MOV;
497 rn = 0;
498 /* If we have lots of leading 1's, we can shorten the sequence by
499 beginning with mvn and then clearing higher bits with eor. */
500 if (clz32(~arg) > clz32(arg)) {
501 opc = ARITH_MVN, arg = ~arg;
0f11f25a 502 }
e86e0f28
RH
503 do {
504 int i = ctz32(arg) & ~1;
505 rot = ((32 - i) << 7) & 0xf00;
506 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
507 arg &= ~(0xff << i);
508
509 opc = ARITH_EOR;
510 rn = rd;
511 } while (arg);
811d4cf4
AZ
512}
513
7fc645bf
PM
514static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
515 TCGArg lhs, TCGArg rhs, int rhs_is_const)
516{
517 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
518 * rhs must satisfy the "rI" constraint.
519 */
520 if (rhs_is_const) {
521 int rot = encode_imm(rhs);
522 assert(rot >= 0);
523 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
524 } else {
525 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
526 }
527}
528
19b62bf4
RH
529static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
530 TCGReg dst, TCGReg lhs, TCGArg rhs,
531 bool rhs_is_const)
532{
533 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
534 * rhs must satisfy the "rIK" constraint.
535 */
536 if (rhs_is_const) {
537 int rot = encode_imm(rhs);
538 if (rot < 0) {
539 rhs = ~rhs;
540 rot = encode_imm(rhs);
541 assert(rot >= 0);
542 opc = opinv;
543 }
544 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
545 } else {
546 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
547 }
548}
549
a9a86ae9
RH
550static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
551 TCGArg dst, TCGArg lhs, TCGArg rhs,
552 bool rhs_is_const)
553{
554 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
555 * rhs must satisfy the "rIN" constraint.
556 */
557 if (rhs_is_const) {
558 int rot = encode_imm(rhs);
559 if (rot < 0) {
560 rhs = -rhs;
561 rot = encode_imm(rhs);
562 assert(rot >= 0);
563 opc = opneg;
564 }
565 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
566 } else {
567 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
568 }
569}
570
34358a12
RH
571static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
572 TCGReg rn, TCGReg rm)
811d4cf4 573{
34358a12
RH
574 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
575 if (!use_armv6_instructions && rd == rn) {
576 if (rd == rm) {
577 /* rd == rn == rm; copy an input to tmp first. */
578 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
579 rm = rn = TCG_REG_TMP;
580 } else {
581 rn = rm;
582 rm = rd;
583 }
811d4cf4 584 }
34358a12
RH
585 /* mul */
586 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
811d4cf4
AZ
587}
588
34358a12
RH
589static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
590 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 591{
34358a12
RH
592 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
593 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
594 if (rd0 == rm || rd1 == rm) {
595 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
596 rn = TCG_REG_TMP;
597 } else {
598 TCGReg t = rn;
599 rn = rm;
600 rm = t;
601 }
811d4cf4 602 }
34358a12
RH
603 /* umull */
604 tcg_out32(s, (cond << 28) | 0x00800090 |
605 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
606}
607
34358a12
RH
608static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
609 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 610{
34358a12
RH
611 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
612 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
613 if (rd0 == rm || rd1 == rm) {
614 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
615 rn = TCG_REG_TMP;
616 } else {
617 TCGReg t = rn;
618 rn = rm;
619 rm = t;
620 }
811d4cf4 621 }
34358a12
RH
622 /* smull */
623 tcg_out32(s, (cond << 28) | 0x00c00090 |
624 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
625}
626
0637c56c
RH
627static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
628{
629 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
630}
631
632static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
633{
634 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
635}
636
9517094f
AJ
637static inline void tcg_out_ext8s(TCGContext *s, int cond,
638 int rd, int rn)
639{
640 if (use_armv6_instructions) {
641 /* sxtb */
642 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
643 } else {
e23886a9 644 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 645 rd, 0, rn, SHIFT_IMM_LSL(24));
e23886a9 646 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
647 rd, 0, rd, SHIFT_IMM_ASR(24));
648 }
649}
650
e854b6d3
AJ
651static inline void tcg_out_ext8u(TCGContext *s, int cond,
652 int rd, int rn)
653{
654 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
655}
656
9517094f
AJ
657static inline void tcg_out_ext16s(TCGContext *s, int cond,
658 int rd, int rn)
659{
660 if (use_armv6_instructions) {
661 /* sxth */
662 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
663 } else {
e23886a9 664 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 665 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 666 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
667 rd, 0, rd, SHIFT_IMM_ASR(16));
668 }
669}
670
671static inline void tcg_out_ext16u(TCGContext *s, int cond,
672 int rd, int rn)
673{
674 if (use_armv6_instructions) {
675 /* uxth */
676 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
677 } else {
e23886a9 678 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 679 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 680 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
681 rd, 0, rd, SHIFT_IMM_LSR(16));
682 }
683}
684
67dcab73
AJ
685static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
686{
687 if (use_armv6_instructions) {
688 /* revsh */
689 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
690 } else {
691 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 692 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
67dcab73 693 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 694 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
67dcab73 695 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 696 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
67dcab73
AJ
697 }
698}
699
244b1e81
AJ
700static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
701{
702 if (use_armv6_instructions) {
703 /* rev16 */
704 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
705 } else {
706 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 707 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
244b1e81 708 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 709 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
244b1e81 710 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 711 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
244b1e81
AJ
712 }
713}
714
7aab08aa
AJ
715/* swap the two low bytes assuming that the two high input bytes and the
716 two high output bit can hold any value. */
717static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
718{
719 if (use_armv6_instructions) {
720 /* rev16 */
721 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
722 } else {
723 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a
RH
724 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
725 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
7aab08aa 726 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 727 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
7aab08aa
AJ
728 }
729}
730
244b1e81
AJ
731static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
732{
733 if (use_armv6_instructions) {
734 /* rev */
735 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
736 } else {
737 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 738 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
244b1e81 739 tcg_out_dat_imm(s, cond, ARITH_BIC,
4346457a 740 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
244b1e81
AJ
741 tcg_out_dat_reg(s, cond, ARITH_MOV,
742 rd, 0, rn, SHIFT_IMM_ROR(8));
743 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 744 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
244b1e81
AJ
745 }
746}
747
b6b24cb0
RH
748bool tcg_target_deposit_valid(int ofs, int len)
749{
750 /* ??? Without bfi, we could improve over generic code by combining
751 the right-shift from a non-zero ofs with the orr. We do run into
752 problems when rd == rs, and the mask generated from ofs+len doesn't
753 fit into an immediate. We would have to be careful not to pessimize
754 wrt the optimizations performed on the expanded code. */
755 return use_armv7_instructions;
756}
757
758static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
759 TCGArg a1, int ofs, int len, bool const_a1)
760{
761 if (const_a1) {
762 /* bfi becomes bfc with rn == 15. */
763 a1 = 15;
764 }
765 /* bfi/bfc */
766 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
767 | (ofs << 7) | ((ofs + len - 1) << 16));
768}
769
9feac1d7
RH
770/* Note that this routine is used for both LDR and LDRH formats, so we do
771 not wish to include an immediate shift at this point. */
772static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
773 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
774{
775 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
776 | (w << 21) | (rn << 16) | (rt << 12) | rm);
777}
778
779static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
780 TCGReg rn, int imm8, bool p, bool w)
781{
782 bool u = 1;
783 if (imm8 < 0) {
784 imm8 = -imm8;
785 u = 0;
786 }
787 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
788 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
789}
790
791static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
792 TCGReg rn, int imm12, bool p, bool w)
811d4cf4 793{
9feac1d7
RH
794 bool u = 1;
795 if (imm12 < 0) {
796 imm12 = -imm12;
797 u = 0;
798 }
799 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
800 (rn << 16) | (rt << 12) | imm12);
801}
802
803static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
804 TCGReg rn, int imm12)
805{
806 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
807}
808
9feac1d7
RH
809static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
810 TCGReg rn, int imm12)
811d4cf4 811{
9feac1d7 812 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
813}
814
9feac1d7
RH
815static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
816 TCGReg rn, TCGReg rm)
811d4cf4 817{
9feac1d7 818 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
819}
820
9feac1d7
RH
821static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
822 TCGReg rn, TCGReg rm)
811d4cf4 823{
9feac1d7 824 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
825}
826
3979144c 827/* Register pre-increment with base writeback. */
9feac1d7
RH
828static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
829 TCGReg rn, TCGReg rm)
3979144c 830{
9feac1d7 831 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
832}
833
9feac1d7
RH
834static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
835 TCGReg rn, TCGReg rm)
3979144c 836{
9feac1d7 837 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
3979144c
PB
838}
839
9feac1d7
RH
840static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
841 TCGReg rn, int imm8)
811d4cf4 842{
9feac1d7 843 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
844}
845
9feac1d7
RH
846static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
847 TCGReg rn, int imm8)
811d4cf4 848{
9feac1d7 849 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
850}
851
9feac1d7
RH
852static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
853 TCGReg rn, TCGReg rm)
811d4cf4 854{
9feac1d7 855 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
856}
857
9feac1d7
RH
858static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
859 TCGReg rn, TCGReg rm)
811d4cf4 860{
9feac1d7 861 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
862}
863
9feac1d7
RH
864static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
865 TCGReg rn, int imm8)
811d4cf4 866{
9feac1d7 867 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
868}
869
9feac1d7
RH
870static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
871 TCGReg rn, TCGReg rm)
811d4cf4 872{
9feac1d7 873 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
874}
875
9feac1d7
RH
876static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
877 TCGReg rn, int imm12)
811d4cf4 878{
9feac1d7 879 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
880}
881
9feac1d7
RH
882static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
883 TCGReg rn, int imm12)
811d4cf4 884{
9feac1d7 885 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
811d4cf4
AZ
886}
887
9feac1d7
RH
888static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
889 TCGReg rn, TCGReg rm)
811d4cf4 890{
9feac1d7 891 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
892}
893
9feac1d7
RH
894static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
895 TCGReg rn, TCGReg rm)
811d4cf4 896{
9feac1d7 897 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
898}
899
9feac1d7
RH
900static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
901 TCGReg rn, int imm8)
811d4cf4 902{
9feac1d7 903 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
811d4cf4
AZ
904}
905
9feac1d7
RH
906static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
907 TCGReg rn, TCGReg rm)
811d4cf4 908{
9feac1d7 909 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
811d4cf4
AZ
910}
911
811d4cf4
AZ
912static inline void tcg_out_ld32u(TCGContext *s, int cond,
913 int rd, int rn, int32_t offset)
914{
915 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
916 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
917 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
918 } else
919 tcg_out_ld32_12(s, cond, rd, rn, offset);
920}
921
922static inline void tcg_out_st32(TCGContext *s, int cond,
923 int rd, int rn, int32_t offset)
924{
925 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
926 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
927 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
928 } else
929 tcg_out_st32_12(s, cond, rd, rn, offset);
930}
931
932static inline void tcg_out_ld16u(TCGContext *s, int cond,
933 int rd, int rn, int32_t offset)
934{
935 if (offset > 0xff || offset < -0xff) {
4346457a
RH
936 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
937 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
938 } else
939 tcg_out_ld16u_8(s, cond, rd, rn, offset);
940}
941
942static inline void tcg_out_ld16s(TCGContext *s, int cond,
943 int rd, int rn, int32_t offset)
944{
945 if (offset > 0xff || offset < -0xff) {
4346457a
RH
946 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
947 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
948 } else
949 tcg_out_ld16s_8(s, cond, rd, rn, offset);
950}
951
f694a27e 952static inline void tcg_out_st16(TCGContext *s, int cond,
811d4cf4
AZ
953 int rd, int rn, int32_t offset)
954{
955 if (offset > 0xff || offset < -0xff) {
4346457a
RH
956 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
957 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4 958 } else
f694a27e 959 tcg_out_st16_8(s, cond, rd, rn, offset);
811d4cf4
AZ
960}
961
962static inline void tcg_out_ld8u(TCGContext *s, int cond,
963 int rd, int rn, int32_t offset)
964{
965 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
966 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
967 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
968 } else
969 tcg_out_ld8_12(s, cond, rd, rn, offset);
970}
971
972static inline void tcg_out_ld8s(TCGContext *s, int cond,
973 int rd, int rn, int32_t offset)
974{
975 if (offset > 0xff || offset < -0xff) {
4346457a
RH
976 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
977 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
978 } else
979 tcg_out_ld8s_8(s, cond, rd, rn, offset);
980}
981
f694a27e 982static inline void tcg_out_st8(TCGContext *s, int cond,
811d4cf4
AZ
983 int rd, int rn, int32_t offset)
984{
985 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
986 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
987 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
988 } else
989 tcg_out_st8_12(s, cond, rd, rn, offset);
990}
991
222f23f5 992/* The _goto case is normally between TBs within the same code buffer,
5c84bd90 993 * and with the code buffer limited to 16MB we shouldn't need the long
222f23f5
DDAG
994 * case.
995 *
996 * .... except to the prologue that is in its own buffer.
997 */
811d4cf4
AZ
998static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
999{
1000 int32_t val;
1001
24e838b7
PM
1002 if (addr & 1) {
1003 /* goto to a Thumb destination isn't supported */
1004 tcg_abort();
1005 }
1006
811d4cf4
AZ
1007 val = addr - (tcg_target_long) s->code_ptr;
1008 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
1009 tcg_out_b(s, cond, val);
1010 else {
811d4cf4 1011 if (cond == COND_AL) {
c8d80cef 1012 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
222f23f5 1013 tcg_out32(s, addr);
811d4cf4 1014 } else {
4346457a 1015 tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
811d4cf4 1016 tcg_out_dat_reg(s, cond, ARITH_ADD,
c8d80cef 1017 TCG_REG_PC, TCG_REG_PC,
4346457a 1018 TCG_REG_TMP, SHIFT_IMM_LSL(0));
811d4cf4 1019 }
811d4cf4
AZ
1020 }
1021}
1022
222f23f5
DDAG
1023/* The call case is mostly used for helpers - so it's not unreasonable
1024 * for them to be beyond branch range */
24e838b7 1025static inline void tcg_out_call(TCGContext *s, uint32_t addr)
811d4cf4
AZ
1026{
1027 int32_t val;
1028
811d4cf4 1029 val = addr - (tcg_target_long) s->code_ptr;
24e838b7
PM
1030 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1031 if (addr & 1) {
1032 /* Use BLX if the target is in Thumb mode */
fb822738 1033 if (!use_armv5t_instructions) {
24e838b7
PM
1034 tcg_abort();
1035 }
1036 tcg_out_blx_imm(s, val);
1037 } else {
1038 tcg_out_bl(s, COND_AL, val);
1039 }
302fdde7
RH
1040 } else if (use_armv7_instructions) {
1041 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
1042 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
24e838b7 1043 } else {
222f23f5
DDAG
1044 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1045 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1046 tcg_out32(s, addr);
811d4cf4 1047 }
811d4cf4
AZ
1048}
1049
1050static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1051{
fb822738 1052 if (use_armv5t_instructions) {
23401b58
AJ
1053 tcg_out_blx(s, cond, arg);
1054 } else {
1055 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1056 TCG_REG_PC, SHIFT_IMM_LSL(0));
1057 tcg_out_bx(s, cond, arg);
1058 }
811d4cf4
AZ
1059}
1060
1061static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1062{
1063 TCGLabel *l = &s->labels[label_index];
1064
96fbd7de 1065 if (l->has_value) {
811d4cf4 1066 tcg_out_goto(s, cond, l->u.value);
811d4cf4 1067 } else {
811d4cf4 1068 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 1069 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
1070 }
1071}
1072
811d4cf4 1073#ifdef CONFIG_SOFTMMU
79383c9c 1074
022c62cb 1075#include "exec/softmmu_defs.h"
811d4cf4 1076
e141ab52
BS
1077/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1078 int mmu_idx) */
1079static const void * const qemu_ld_helpers[4] = {
1080 helper_ldb_mmu,
1081 helper_ldw_mmu,
1082 helper_ldl_mmu,
1083 helper_ldq_mmu,
1084};
1085
1086/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1087 uintxx_t val, int mmu_idx) */
1088static const void * const qemu_st_helpers[4] = {
1089 helper_stb_mmu,
1090 helper_stw_mmu,
1091 helper_stl_mmu,
1092 helper_stq_mmu,
1093};
9716ef3b
PM
1094
1095/* Helper routines for marshalling helper function arguments into
1096 * the correct registers and stack.
1097 * argreg is where we want to put this argument, arg is the argument itself.
1098 * Return value is the updated argreg ready for the next call.
1099 * Note that argreg 0..3 is real registers, 4+ on stack.
9716ef3b
PM
1100 *
1101 * We provide routines for arguments which are: immediate, 32 bit
1102 * value in register, 16 and 8 bit values in register (which must be zero
1103 * extended before use) and 64 bit value in a lo:hi register pair.
1104 */
fc4d60ee
RH
1105#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1106static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1107{ \
1108 if (argreg < 4) { \
1109 MOV_ARG(s, COND_AL, argreg, arg); \
1110 } else { \
1111 int ofs = (argreg - 4) * 4; \
1112 EXT_ARG; \
1113 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1114 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1115 } \
1116 return argreg + 1; \
1117}
1118
1119DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
4346457a 1120 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1121DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
4346457a 1122 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1123DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
4346457a 1124 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee
RH
1125DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1126
1127static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1128 TCGReg arglo, TCGReg arghi)
9716ef3b
PM
1129{
1130 /* 64 bit arguments must go in even/odd register pairs
1131 * and in 8-aligned stack slots.
1132 */
1133 if (argreg & 1) {
1134 argreg++;
1135 }
1136 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1137 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1138 return argreg;
1139}
811d4cf4 1140
3979144c
PB
1141#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1142
702b33b1 1143/* Load and compare a TLB entry, leaving the flags set. Leaves R2 pointing
cee87be8 1144 to the tlb entry. Clobbers R1 and TMP. */
811d4cf4 1145
cee87be8
RH
1146static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1147 int s_bits, int tlb_offset)
1148{
702b33b1
RH
1149 TCGReg base = TCG_AREG0;
1150
91a3c1b0 1151 /* Should generate something like the following:
702b33b1
RH
1152 * pre-v7:
1153 * shr tmp, addr_reg, #TARGET_PAGE_BITS (1)
1154 * add r2, env, #off & 0xff00
1155 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
1156 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
1157 * ldr r0, [r2, #off & 0xff]! (4)
1158 * tst addr_reg, #s_mask
1159 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS (5)
1160 *
1161 * v7 (not implemented yet):
1162 * ubfx r2, addr_reg, #TARGET_PAGE_BITS, #CPU_TLB_BITS (1)
1163 * movw tmp, #~TARGET_PAGE_MASK & ~s_mask
1164 * movw r0, #off
1165 * add r2, env, r2, lsl #CPU_TLB_ENTRY_BITS (2)
1166 * bic tmp, addr_reg, tmp
1167 * ldr r0, [r2, r0]! (3)
1168 * cmp r0, tmp (4)
91a3c1b0
AZ
1169 */
1170# if CPU_TLB_BITS > 8
1171# error
1172# endif
4346457a 1173 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
cee87be8 1174 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
702b33b1
RH
1175
1176 /* We assume that the offset is contained within 16 bits. */
1177 assert((tlb_offset & ~0xffff) == 0);
1178 if (tlb_offset > 0xff) {
1179 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1180 (24 << 7) | (tlb_offset >> 8));
1181 tlb_offset &= 0xff;
1182 base = TCG_REG_R2;
1183 }
1184
811d4cf4 1185 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1186 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
702b33b1 1187 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
c8d80cef 1188 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
cee87be8 1189
702b33b1
RH
1190 /* Load the tlb comparator. Use ldrd if needed and available,
1191 but due to how the pointer needs setting up, ldm isn't useful.
1192 Base arm5 doesn't have ldrd, but armv5te does. */
1193 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1194 tcg_out_memop_8(s, COND_AL, INSN_LDRD_IMM, TCG_REG_R0,
1195 TCG_REG_R2, tlb_offset, 1, 1);
1196 } else {
1197 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R0,
1198 TCG_REG_R2, tlb_offset, 1, 1);
1199 if (TARGET_LONG_BITS == 64) {
8ddaeb1b 1200 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R1,
702b33b1
RH
1201 TCG_REG_R2, 4, 1, 0);
1202 }
d17bd1d8 1203 }
cee87be8 1204
3979144c 1205 /* Check alignment. */
cee87be8 1206 if (s_bits) {
702b33b1 1207 tcg_out_dat_imm(s, COND_AL, ARITH_TST,
cee87be8
RH
1208 0, addrlo, (1 << s_bits) - 1);
1209 }
1210
702b33b1
RH
1211 tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0,
1212 TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1213
cee87be8 1214 if (TARGET_LONG_BITS == 64) {
cee87be8
RH
1215 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1216 TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
1217 }
1218}
df5e0ef7
RH
1219
1220/* Record the context of a call to the out of line helper code for the slow
1221 path for a load or store, so that we can later generate the correct
1222 helper code. */
1223static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
1224 int data_reg, int data_reg2, int addrlo_reg,
1225 int addrhi_reg, int mem_index,
1226 uint8_t *raddr, uint8_t *label_ptr)
1227{
1228 int idx;
1229 TCGLabelQemuLdst *label;
1230
1231 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
1232 tcg_abort();
1233 }
1234
1235 idx = s->nb_qemu_ldst_labels++;
1236 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
1237 label->is_ld = is_ld;
1238 label->opc = opc;
1239 label->datalo_reg = data_reg;
1240 label->datahi_reg = data_reg2;
1241 label->addrlo_reg = addrlo_reg;
1242 label->addrhi_reg = addrhi_reg;
1243 label->mem_index = mem_index;
1244 label->raddr = raddr;
1245 label->label_ptr[0] = label_ptr;
1246}
1247
1248static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1249{
1250 TCGReg argreg, data_reg, data_reg2;
1251 uint8_t *start;
1252
1253 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1254
1255 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1256 if (TARGET_LONG_BITS == 64) {
1257 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1258 } else {
1259 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1260 }
1261 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1262 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[lb->opc & 3]);
1263
1264 data_reg = lb->datalo_reg;
1265 data_reg2 = lb->datahi_reg;
1266
1267 start = s->code_ptr;
1268 switch (lb->opc) {
1269 case 0 | 4:
1270 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1271 break;
1272 case 1 | 4:
1273 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1274 break;
1275 case 0:
1276 case 1:
1277 case 2:
1278 default:
1279 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1280 break;
1281 case 3:
1282 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1283 tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
1284 break;
1285 }
1286
1287 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1288 the call and the branch back to straight-line code. Note that the
1289 moves above could be elided by register allocation, nor do we know
1290 which code alternative we chose for extension. */
1291 switch (s->code_ptr - start) {
1292 case 0:
1293 tcg_out_nop(s);
1294 /* FALLTHRU */
1295 case 4:
1296 tcg_out_nop(s);
1297 /* FALLTHRU */
1298 case 8:
1299 break;
1300 default:
1301 abort();
1302 }
1303
1304 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1305}
1306
1307static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1308{
1309 TCGReg argreg, data_reg, data_reg2;
1310
1311 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1312
1313 argreg = TCG_REG_R0;
1314 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1315 if (TARGET_LONG_BITS == 64) {
1316 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1317 } else {
1318 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1319 }
1320
1321 data_reg = lb->datalo_reg;
1322 data_reg2 = lb->datahi_reg;
1323 switch (lb->opc) {
1324 case 0:
1325 argreg = tcg_out_arg_reg8(s, argreg, data_reg);
1326 break;
1327 case 1:
1328 argreg = tcg_out_arg_reg16(s, argreg, data_reg);
1329 break;
1330 case 2:
1331 argreg = tcg_out_arg_reg32(s, argreg, data_reg);
1332 break;
1333 case 3:
1334 argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
1335 break;
1336 }
1337
1338 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1339 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[lb->opc & 3]);
1340
1341 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1342 the call and the branch back to straight-line code. */
1343 tcg_out_nop(s);
1344 tcg_out_nop(s);
1345 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1346}
cee87be8
RH
1347#endif /* SOFTMMU */
1348
1349static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1350{
1351 TCGReg addr_reg, data_reg, data_reg2;
1352 bool bswap;
1353#ifdef CONFIG_SOFTMMU
1354 int mem_index, s_bits;
df5e0ef7
RH
1355 TCGReg addr_reg2;
1356 uint8_t *label_ptr;
cee87be8
RH
1357#endif
1358#ifdef TARGET_WORDS_BIGENDIAN
1359 bswap = 1;
1360#else
1361 bswap = 0;
1362#endif
1363
1364 data_reg = *args++;
1365 data_reg2 = (opc == 3 ? *args++ : 0);
1366 addr_reg = *args++;
1367#ifdef CONFIG_SOFTMMU
1368 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1369 mem_index = *args;
1370 s_bits = opc & 3;
1371
1372 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1373 offsetof(CPUArchState, tlb_table[mem_index][0].addr_read));
1374
df5e0ef7
RH
1375 label_ptr = s->code_ptr;
1376 tcg_out_b_noaddr(s, COND_NE);
1377
1378 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
d17bd1d8
AJ
1379 offsetof(CPUTLBEntry, addend)
1380 - offsetof(CPUTLBEntry, addr_read));
811d4cf4
AZ
1381
1382 switch (opc) {
1383 case 0:
df5e0ef7 1384 tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1385 break;
1386 case 0 | 4:
df5e0ef7 1387 tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1388 break;
1389 case 1:
df5e0ef7 1390 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1391 if (bswap) {
df5e0ef7 1392 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
67dcab73 1393 }
811d4cf4
AZ
1394 break;
1395 case 1 | 4:
67dcab73 1396 if (bswap) {
df5e0ef7
RH
1397 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1398 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
67dcab73 1399 } else {
df5e0ef7 1400 tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1401 }
811d4cf4
AZ
1402 break;
1403 case 2:
1404 default:
df5e0ef7 1405 tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1406 if (bswap) {
df5e0ef7 1407 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
67dcab73 1408 }
811d4cf4
AZ
1409 break;
1410 case 3:
67dcab73 1411 if (bswap) {
df5e0ef7
RH
1412 tcg_out_ld32_rwb(s, COND_AL, data_reg2, TCG_REG_R1, addr_reg);
1413 tcg_out_ld32_12(s, COND_AL, data_reg, TCG_REG_R1, 4);
1414 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1415 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
67dcab73 1416 } else {
df5e0ef7
RH
1417 tcg_out_ld32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1418 tcg_out_ld32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
67dcab73 1419 }
811d4cf4
AZ
1420 break;
1421 }
1422
df5e0ef7
RH
1423 add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1424 mem_index, s->code_ptr, label_ptr);
379f6698
PB
1425#else /* !CONFIG_SOFTMMU */
1426 if (GUEST_BASE) {
1427 uint32_t offset = GUEST_BASE;
cee87be8 1428 int i, rot;
379f6698
PB
1429
1430 while (offset) {
1431 i = ctz32(offset) & ~1;
1432 rot = ((32 - i) << 7) & 0xf00;
1433
4346457a 1434 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
379f6698 1435 ((offset >> i) & 0xff) | rot);
4346457a 1436 addr_reg = TCG_REG_TMP;
379f6698
PB
1437 offset &= ~(0xff << i);
1438 }
1439 }
811d4cf4
AZ
1440 switch (opc) {
1441 case 0:
1442 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1443 break;
1444 case 0 | 4:
1445 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1446 break;
1447 case 1:
1448 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1449 if (bswap) {
1450 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1451 }
811d4cf4
AZ
1452 break;
1453 case 1 | 4:
67dcab73
AJ
1454 if (bswap) {
1455 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1456 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1457 } else {
1458 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1459 }
811d4cf4
AZ
1460 break;
1461 case 2:
1462 default:
1463 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1464 if (bswap) {
1465 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1466 }
811d4cf4
AZ
1467 break;
1468 case 3:
eae6ce52
AZ
1469 /* TODO: use block load -
1470 * check that data_reg2 > data_reg or the other way */
419bafa5 1471 if (data_reg == addr_reg) {
67dcab73
AJ
1472 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1473 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
419bafa5 1474 } else {
67dcab73
AJ
1475 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1476 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1477 }
1478 if (bswap) {
1479 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1480 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
419bafa5 1481 }
811d4cf4
AZ
1482 break;
1483 }
1484#endif
1485}
1486
cee87be8 1487static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1488{
cee87be8
RH
1489 TCGReg addr_reg, data_reg, data_reg2;
1490 bool bswap;
811d4cf4 1491#ifdef CONFIG_SOFTMMU
cee87be8 1492 int mem_index, s_bits;
df5e0ef7
RH
1493 TCGReg addr_reg2;
1494 uint8_t *label_ptr;
811d4cf4 1495#endif
67dcab73
AJ
1496#ifdef TARGET_WORDS_BIGENDIAN
1497 bswap = 1;
1498#else
1499 bswap = 0;
1500#endif
cee87be8 1501
811d4cf4 1502 data_reg = *args++;
cee87be8 1503 data_reg2 = (opc == 3 ? *args++ : 0);
811d4cf4 1504 addr_reg = *args++;
811d4cf4 1505#ifdef CONFIG_SOFTMMU
cee87be8 1506 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
811d4cf4
AZ
1507 mem_index = *args;
1508 s_bits = opc & 3;
1509
cee87be8
RH
1510 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1511 offsetof(CPUArchState,
1512 tlb_table[mem_index][0].addr_write));
1513
df5e0ef7
RH
1514 label_ptr = s->code_ptr;
1515 tcg_out_b_noaddr(s, COND_NE);
1516
1517 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
d17bd1d8
AJ
1518 offsetof(CPUTLBEntry, addend)
1519 - offsetof(CPUTLBEntry, addr_write));
811d4cf4
AZ
1520
1521 switch (opc) {
1522 case 0:
df5e0ef7 1523 tcg_out_st8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
811d4cf4 1524 break;
811d4cf4 1525 case 1:
67dcab73 1526 if (bswap) {
df5e0ef7
RH
1527 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
1528 tcg_out_st16_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
67dcab73 1529 } else {
df5e0ef7 1530 tcg_out_st16_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1531 }
811d4cf4
AZ
1532 break;
1533 case 2:
1534 default:
67dcab73 1535 if (bswap) {
df5e0ef7
RH
1536 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1537 tcg_out_st32_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
67dcab73 1538 } else {
df5e0ef7 1539 tcg_out_st32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
67dcab73 1540 }
811d4cf4
AZ
1541 break;
1542 case 3:
67dcab73 1543 if (bswap) {
df5e0ef7
RH
1544 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1545 tcg_out_st32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R1, addr_reg);
1546 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1547 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R1, 4);
67dcab73 1548 } else {
df5e0ef7
RH
1549 tcg_out_st32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1550 tcg_out_st32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
67dcab73 1551 }
811d4cf4
AZ
1552 break;
1553 }
1554
df5e0ef7
RH
1555 add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1556 mem_index, s->code_ptr, label_ptr);
379f6698
PB
1557#else /* !CONFIG_SOFTMMU */
1558 if (GUEST_BASE) {
1559 uint32_t offset = GUEST_BASE;
1560 int i;
1561 int rot;
1562
1563 while (offset) {
1564 i = ctz32(offset) & ~1;
1565 rot = ((32 - i) << 7) & 0xf00;
1566
67dcab73 1567 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
379f6698 1568 ((offset >> i) & 0xff) | rot);
67dcab73 1569 addr_reg = TCG_REG_R1;
379f6698
PB
1570 offset &= ~(0xff << i);
1571 }
1572 }
811d4cf4
AZ
1573 switch (opc) {
1574 case 0:
1575 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1576 break;
811d4cf4 1577 case 1:
67dcab73 1578 if (bswap) {
7aab08aa 1579 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
67dcab73
AJ
1580 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1581 } else {
1582 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1583 }
811d4cf4
AZ
1584 break;
1585 case 2:
1586 default:
67dcab73
AJ
1587 if (bswap) {
1588 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1589 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1590 } else {
1591 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1592 }
811d4cf4
AZ
1593 break;
1594 case 3:
eae6ce52
AZ
1595 /* TODO: use block store -
1596 * check that data_reg2 > data_reg or the other way */
67dcab73
AJ
1597 if (bswap) {
1598 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1599 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1600 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1601 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1602 } else {
1603 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1604 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1605 }
811d4cf4
AZ
1606 break;
1607 }
1608#endif
1609}
1610
811d4cf4
AZ
1611static uint8_t *tb_ret_addr;
1612
a9751609 1613static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
811d4cf4
AZ
1614 const TCGArg *args, const int *const_args)
1615{
2df3f1ee 1616 TCGArg a0, a1, a2, a3, a4, a5;
811d4cf4
AZ
1617 int c;
1618
1619 switch (opc) {
1620 case INDEX_op_exit_tb:
c9e53a4c
RH
1621 if (use_armv7_instructions || check_fit_imm(args[0])) {
1622 tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1623 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1624 } else {
fe33867b 1625 uint8_t *ld_ptr = s->code_ptr;
c9e53a4c 1626 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
fe33867b 1627 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
c9e53a4c
RH
1628 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1629 tcg_out32(s, args[0]);
fe33867b 1630 }
811d4cf4
AZ
1631 break;
1632 case INDEX_op_goto_tb:
1633 if (s->tb_jmp_offset) {
1634 /* Direct jump method */
fe33867b 1635#if defined(USE_DIRECT_JUMP)
811d4cf4 1636 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
c69806ab 1637 tcg_out_b_noaddr(s, COND_AL);
811d4cf4 1638#else
c8d80cef 1639 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1640 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1641 tcg_out32(s, 0);
1642#endif
1643 } else {
1644 /* Indirect jump method */
1645#if 1
1646 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1647 if (c > 0xfff || c < -0xfff) {
1648 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1649 (tcg_target_long) (s->tb_next + args[0]));
c8d80cef 1650 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4 1651 } else
c8d80cef 1652 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
811d4cf4 1653#else
c8d80cef
AJ
1654 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1655 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4
AZ
1656 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1657#endif
1658 }
1659 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1660 break;
1661 case INDEX_op_call:
1662 if (const_args[0])
24e838b7 1663 tcg_out_call(s, args[0]);
811d4cf4
AZ
1664 else
1665 tcg_out_callr(s, COND_AL, args[0]);
1666 break;
811d4cf4
AZ
1667 case INDEX_op_br:
1668 tcg_out_goto_label(s, COND_AL, args[0]);
1669 break;
1670
1671 case INDEX_op_ld8u_i32:
1672 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1673 break;
1674 case INDEX_op_ld8s_i32:
1675 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1676 break;
1677 case INDEX_op_ld16u_i32:
1678 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1679 break;
1680 case INDEX_op_ld16s_i32:
1681 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1682 break;
1683 case INDEX_op_ld_i32:
1684 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1685 break;
1686 case INDEX_op_st8_i32:
f694a27e 1687 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1688 break;
1689 case INDEX_op_st16_i32:
f694a27e 1690 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1691 break;
1692 case INDEX_op_st_i32:
1693 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1694 break;
1695
1696 case INDEX_op_mov_i32:
1697 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1698 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1699 break;
1700 case INDEX_op_movi_i32:
1701 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1702 break;
4a1d241e
PM
1703 case INDEX_op_movcond_i32:
1704 /* Constraints mean that v2 is always in the same register as dest,
1705 * so we only need to do "if condition passed, move v1 to dest".
1706 */
5d53b4c9
RH
1707 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1708 args[1], args[2], const_args[2]);
1709 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1710 ARITH_MVN, args[0], 0, args[3], const_args[3]);
4a1d241e 1711 break;
811d4cf4 1712 case INDEX_op_add_i32:
a9a86ae9
RH
1713 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1714 args[0], args[1], args[2], const_args[2]);
1715 break;
811d4cf4 1716 case INDEX_op_sub_i32:
d9fda575
RH
1717 if (const_args[1]) {
1718 if (const_args[2]) {
1719 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1720 } else {
1721 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1722 args[0], args[2], args[1], 1);
1723 }
1724 } else {
1725 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1726 args[0], args[1], args[2], const_args[2]);
1727 }
a9a86ae9 1728 break;
811d4cf4 1729 case INDEX_op_and_i32:
19b62bf4
RH
1730 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1731 args[0], args[1], args[2], const_args[2]);
1732 break;
932234f6 1733 case INDEX_op_andc_i32:
19b62bf4
RH
1734 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1735 args[0], args[1], args[2], const_args[2]);
1736 break;
811d4cf4
AZ
1737 case INDEX_op_or_i32:
1738 c = ARITH_ORR;
1739 goto gen_arith;
1740 case INDEX_op_xor_i32:
1741 c = ARITH_EOR;
1742 /* Fall through. */
1743 gen_arith:
7fc645bf 1744 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
811d4cf4
AZ
1745 break;
1746 case INDEX_op_add2_i32:
2df3f1ee
RH
1747 a0 = args[0], a1 = args[1], a2 = args[2];
1748 a3 = args[3], a4 = args[4], a5 = args[5];
1749 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
4346457a 1750 a0 = TCG_REG_TMP;
2df3f1ee
RH
1751 }
1752 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1753 a0, a2, a4, const_args[4]);
1754 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1755 a1, a3, a5, const_args[5]);
1756 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4
AZ
1757 break;
1758 case INDEX_op_sub2_i32:
2df3f1ee
RH
1759 a0 = args[0], a1 = args[1], a2 = args[2];
1760 a3 = args[3], a4 = args[4], a5 = args[5];
1761 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
4346457a 1762 a0 = TCG_REG_TMP;
2df3f1ee
RH
1763 }
1764 if (const_args[2]) {
1765 if (const_args[4]) {
1766 tcg_out_movi32(s, COND_AL, a0, a4);
1767 a4 = a0;
1768 }
1769 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1770 } else {
1771 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1772 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1773 }
1774 if (const_args[3]) {
1775 if (const_args[5]) {
1776 tcg_out_movi32(s, COND_AL, a1, a5);
1777 a5 = a1;
1778 }
1779 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1780 } else {
1781 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1782 a1, a3, a5, const_args[5]);
1783 }
1784 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4 1785 break;
650bbb36
AZ
1786 case INDEX_op_neg_i32:
1787 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1788 break;
f878d2d2
LD
1789 case INDEX_op_not_i32:
1790 tcg_out_dat_reg(s, COND_AL,
1791 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1792 break;
811d4cf4
AZ
1793 case INDEX_op_mul_i32:
1794 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1795 break;
1796 case INDEX_op_mulu2_i32:
1797 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1798 break;
d693e147
RH
1799 case INDEX_op_muls2_i32:
1800 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1801 break;
811d4cf4
AZ
1802 /* XXX: Perhaps args[2] & 0x1f is wrong */
1803 case INDEX_op_shl_i32:
1804 c = const_args[2] ?
1805 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1806 goto gen_shift32;
1807 case INDEX_op_shr_i32:
1808 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1809 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1810 goto gen_shift32;
1811 case INDEX_op_sar_i32:
1812 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1813 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
293579e5
AJ
1814 goto gen_shift32;
1815 case INDEX_op_rotr_i32:
1816 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1817 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
811d4cf4
AZ
1818 /* Fall through. */
1819 gen_shift32:
1820 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1821 break;
1822
293579e5
AJ
1823 case INDEX_op_rotl_i32:
1824 if (const_args[2]) {
1825 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1826 ((0x20 - args[2]) & 0x1f) ?
1827 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1828 SHIFT_IMM_LSL(0));
1829 } else {
4346457a 1830 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
293579e5 1831 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
4346457a 1832 SHIFT_REG_ROR(TCG_REG_TMP));
293579e5
AJ
1833 }
1834 break;
1835
811d4cf4 1836 case INDEX_op_brcond_i32:
5d53b4c9 1837 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
7fc645bf 1838 args[0], args[1], const_args[1]);
811d4cf4
AZ
1839 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1840 break;
1841 case INDEX_op_brcond2_i32:
1842 /* The resulting conditions are:
1843 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1844 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1845 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1846 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1847 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1848 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1849 */
5d53b4c9
RH
1850 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1851 args[1], args[3], const_args[3]);
1852 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1853 args[0], args[2], const_args[2]);
811d4cf4
AZ
1854 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1855 break;
f72a6cd7 1856 case INDEX_op_setcond_i32:
5d53b4c9
RH
1857 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1858 args[1], args[2], const_args[2]);
f72a6cd7
AJ
1859 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1860 ARITH_MOV, args[0], 0, 1);
1861 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1862 ARITH_MOV, args[0], 0, 0);
1863 break;
e0404769
AJ
1864 case INDEX_op_setcond2_i32:
1865 /* See brcond2_i32 comment */
5d53b4c9
RH
1866 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1867 args[2], args[4], const_args[4]);
1868 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1869 args[1], args[3], const_args[3]);
e0404769
AJ
1870 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1871 ARITH_MOV, args[0], 0, 1);
1872 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1873 ARITH_MOV, args[0], 0, 0);
b525f0a9 1874 break;
811d4cf4
AZ
1875
1876 case INDEX_op_qemu_ld8u:
7e0d9562 1877 tcg_out_qemu_ld(s, args, 0);
811d4cf4
AZ
1878 break;
1879 case INDEX_op_qemu_ld8s:
7e0d9562 1880 tcg_out_qemu_ld(s, args, 0 | 4);
811d4cf4
AZ
1881 break;
1882 case INDEX_op_qemu_ld16u:
7e0d9562 1883 tcg_out_qemu_ld(s, args, 1);
811d4cf4
AZ
1884 break;
1885 case INDEX_op_qemu_ld16s:
7e0d9562 1886 tcg_out_qemu_ld(s, args, 1 | 4);
811d4cf4 1887 break;
86feb1c8 1888 case INDEX_op_qemu_ld32:
7e0d9562 1889 tcg_out_qemu_ld(s, args, 2);
811d4cf4
AZ
1890 break;
1891 case INDEX_op_qemu_ld64:
7e0d9562 1892 tcg_out_qemu_ld(s, args, 3);
811d4cf4 1893 break;
650bbb36 1894
811d4cf4 1895 case INDEX_op_qemu_st8:
7e0d9562 1896 tcg_out_qemu_st(s, args, 0);
811d4cf4
AZ
1897 break;
1898 case INDEX_op_qemu_st16:
7e0d9562 1899 tcg_out_qemu_st(s, args, 1);
811d4cf4
AZ
1900 break;
1901 case INDEX_op_qemu_st32:
7e0d9562 1902 tcg_out_qemu_st(s, args, 2);
811d4cf4
AZ
1903 break;
1904 case INDEX_op_qemu_st64:
7e0d9562 1905 tcg_out_qemu_st(s, args, 3);
811d4cf4
AZ
1906 break;
1907
244b1e81
AJ
1908 case INDEX_op_bswap16_i32:
1909 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1910 break;
1911 case INDEX_op_bswap32_i32:
1912 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1913 break;
1914
811d4cf4 1915 case INDEX_op_ext8s_i32:
9517094f 1916 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1917 break;
1918 case INDEX_op_ext16s_i32:
9517094f
AJ
1919 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1920 break;
1921 case INDEX_op_ext16u_i32:
1922 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1923 break;
1924
b6b24cb0
RH
1925 case INDEX_op_deposit_i32:
1926 tcg_out_deposit(s, COND_AL, args[0], args[2],
1927 args[3], args[4], const_args[2]);
1928 break;
1929
0637c56c
RH
1930 case INDEX_op_div_i32:
1931 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1932 break;
1933 case INDEX_op_divu_i32:
1934 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1935 break;
0637c56c 1936
811d4cf4
AZ
1937 default:
1938 tcg_abort();
1939 }
1940}
1941
df5e0ef7
RH
1942#ifdef CONFIG_SOFTMMU
1943/* Generate TB finalization at the end of block. */
1944void tcg_out_tb_finalize(TCGContext *s)
1945{
1946 int i;
1947 for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
1948 TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
1949 if (label->is_ld) {
1950 tcg_out_qemu_ld_slow_path(s, label);
1951 } else {
1952 tcg_out_qemu_st_slow_path(s, label);
1953 }
1954 }
1955}
1956#endif /* SOFTMMU */
1957
811d4cf4
AZ
1958static const TCGTargetOpDef arm_op_defs[] = {
1959 { INDEX_op_exit_tb, { } },
1960 { INDEX_op_goto_tb, { } },
1961 { INDEX_op_call, { "ri" } },
811d4cf4
AZ
1962 { INDEX_op_br, { } },
1963
1964 { INDEX_op_mov_i32, { "r", "r" } },
1965 { INDEX_op_movi_i32, { "r" } },
1966
1967 { INDEX_op_ld8u_i32, { "r", "r" } },
1968 { INDEX_op_ld8s_i32, { "r", "r" } },
1969 { INDEX_op_ld16u_i32, { "r", "r" } },
1970 { INDEX_op_ld16s_i32, { "r", "r" } },
1971 { INDEX_op_ld_i32, { "r", "r" } },
1972 { INDEX_op_st8_i32, { "r", "r" } },
1973 { INDEX_op_st16_i32, { "r", "r" } },
1974 { INDEX_op_st_i32, { "r", "r" } },
1975
1976 /* TODO: "r", "r", "ri" */
a9a86ae9 1977 { INDEX_op_add_i32, { "r", "r", "rIN" } },
d9fda575 1978 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
811d4cf4
AZ
1979 { INDEX_op_mul_i32, { "r", "r", "r" } },
1980 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
d693e147 1981 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
19b62bf4
RH
1982 { INDEX_op_and_i32, { "r", "r", "rIK" } },
1983 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
cb4e581f
LD
1984 { INDEX_op_or_i32, { "r", "r", "rI" } },
1985 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1986 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1987 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1988
1989 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1990 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1991 { INDEX_op_sar_i32, { "r", "r", "ri" } },
293579e5
AJ
1992 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1993 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
811d4cf4 1994
5d53b4c9
RH
1995 { INDEX_op_brcond_i32, { "r", "rIN" } },
1996 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
1997 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
811d4cf4 1998
2df3f1ee
RH
1999 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
2000 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
5d53b4c9
RH
2001 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
2002 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
811d4cf4 2003
26c5d372 2004#if TARGET_LONG_BITS == 32
67dcab73
AJ
2005 { INDEX_op_qemu_ld8u, { "r", "l" } },
2006 { INDEX_op_qemu_ld8s, { "r", "l" } },
2007 { INDEX_op_qemu_ld16u, { "r", "l" } },
2008 { INDEX_op_qemu_ld16s, { "r", "l" } },
2009 { INDEX_op_qemu_ld32, { "r", "l" } },
2010 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
2011
2012 { INDEX_op_qemu_st8, { "s", "s" } },
2013 { INDEX_op_qemu_st16, { "s", "s" } },
2014 { INDEX_op_qemu_st32, { "s", "s" } },
595b5397 2015 { INDEX_op_qemu_st64, { "s", "s", "s" } },
26c5d372 2016#else
67dcab73
AJ
2017 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
2018 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
2019 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
2020 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
2021 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
2022 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
2023
2024 { INDEX_op_qemu_st8, { "s", "s", "s" } },
2025 { INDEX_op_qemu_st16, { "s", "s", "s" } },
2026 { INDEX_op_qemu_st32, { "s", "s", "s" } },
595b5397 2027 { INDEX_op_qemu_st64, { "s", "s", "s", "s" } },
26c5d372 2028#endif
811d4cf4 2029
244b1e81
AJ
2030 { INDEX_op_bswap16_i32, { "r", "r" } },
2031 { INDEX_op_bswap32_i32, { "r", "r" } },
2032
811d4cf4
AZ
2033 { INDEX_op_ext8s_i32, { "r", "r" } },
2034 { INDEX_op_ext16s_i32, { "r", "r" } },
9517094f 2035 { INDEX_op_ext16u_i32, { "r", "r" } },
811d4cf4 2036
b6b24cb0
RH
2037 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2038
0637c56c 2039 { INDEX_op_div_i32, { "r", "r", "r" } },
0637c56c 2040 { INDEX_op_divu_i32, { "r", "r", "r" } },
0637c56c 2041
811d4cf4
AZ
2042 { -1 },
2043};
2044
e4d58b41 2045static void tcg_target_init(TCGContext *s)
811d4cf4 2046{
72e1ccfc
RH
2047#if defined(CONFIG_GETAUXVAL) && !defined(use_idiv_instructions)
2048 {
2049 unsigned long hwcap = getauxval(AT_HWCAP);
2050 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2051 }
2052#endif
2053
e4a7d5e8 2054 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
811d4cf4 2055 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
e4a7d5e8
AJ
2056 (1 << TCG_REG_R0) |
2057 (1 << TCG_REG_R1) |
2058 (1 << TCG_REG_R2) |
2059 (1 << TCG_REG_R3) |
2060 (1 << TCG_REG_R12) |
2061 (1 << TCG_REG_R14));
811d4cf4
AZ
2062
2063 tcg_regset_clear(s->reserved_regs);
811d4cf4 2064 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4346457a 2065 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
e4a7d5e8 2066 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
811d4cf4
AZ
2067
2068 tcg_add_target_add_op_defs(arm_op_defs);
2069}
2070
2a534aff
RH
2071static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2072 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2073{
2074 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2075}
2076
2a534aff
RH
2077static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2078 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2079{
2080 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2081}
2082
2a534aff
RH
2083static inline void tcg_out_mov(TCGContext *s, TCGType type,
2084 TCGReg ret, TCGReg arg)
811d4cf4
AZ
2085{
2086 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2087}
2088
2089static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 2090 TCGReg ret, tcg_target_long arg)
811d4cf4
AZ
2091{
2092 tcg_out_movi32(s, COND_AL, ret, arg);
2093}
2094
e4d58b41 2095static void tcg_target_qemu_prologue(TCGContext *s)
811d4cf4 2096{
fc4d60ee
RH
2097 int frame_size;
2098
2099 /* Calling convention requires us to save r4-r11 and lr. */
2100 /* stmdb sp!, { r4 - r11, lr } */
2101 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
cea5f9a2 2102
fc4d60ee
RH
2103 /* Allocate the local stack frame. */
2104 frame_size = TCG_STATIC_CALL_ARGS_SIZE;
2105 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2106 /* We saved an odd number of registers above; keep an 8 aligned stack. */
2107 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
2108 & -TCG_TARGET_STACK_ALIGN) + 4;
2109
2110 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2111 TCG_REG_CALL_STACK, frame_size, 1);
2112 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2113 CPU_TEMP_BUF_NLONGS * sizeof(long));
4e17eae9 2114
cea5f9a2 2115 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
811d4cf4 2116
cea5f9a2 2117 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
811d4cf4
AZ
2118 tb_ret_addr = s->code_ptr;
2119
fc4d60ee
RH
2120 /* Epilogue. We branch here via tb_ret_addr. */
2121 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2122 TCG_REG_CALL_STACK, frame_size, 1);
2123
2124 /* ldmia sp!, { r4 - r11, pc } */
2125 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
811d4cf4 2126}