]> git.proxmox.com Git - qemu.git/blame - tcg/arm/tcg-target.c
tcg-arm: Cleanup multiply subroutines
[qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f 24
ac34fb5c
AJ
25#if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30#define USE_ARMV7_INSTRUCTIONS
31#endif
32
33#if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39#define USE_ARMV6_INSTRUCTIONS
40#endif
41
42#if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46#define USE_ARMV5_INSTRUCTIONS
47#endif
48
49#ifdef USE_ARMV5_INSTRUCTIONS
50static const int use_armv5_instructions = 1;
51#else
52static const int use_armv5_instructions = 0;
53#endif
54#undef USE_ARMV5_INSTRUCTIONS
55
56#ifdef USE_ARMV6_INSTRUCTIONS
57static const int use_armv6_instructions = 1;
58#else
59static const int use_armv6_instructions = 0;
60#endif
61#undef USE_ARMV6_INSTRUCTIONS
62
63#ifdef USE_ARMV7_INSTRUCTIONS
64static const int use_armv7_instructions = 1;
65#else
66static const int use_armv7_instructions = 0;
67#endif
68#undef USE_ARMV7_INSTRUCTIONS
69
d4a9eb1f
BS
70#ifndef NDEBUG
71static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
e4a7d5e8 87 "%pc",
811d4cf4 88};
d4a9eb1f 89#endif
811d4cf4 90
d4a9eb1f 91static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
92 TCG_REG_R4,
93 TCG_REG_R5,
94 TCG_REG_R6,
95 TCG_REG_R7,
96 TCG_REG_R8,
97 TCG_REG_R9,
98 TCG_REG_R10,
99 TCG_REG_R11,
811d4cf4 100 TCG_REG_R13,
914ccf51
AJ
101 TCG_REG_R0,
102 TCG_REG_R1,
103 TCG_REG_R2,
104 TCG_REG_R3,
105 TCG_REG_R12,
811d4cf4
AZ
106 TCG_REG_R14,
107};
108
d4a9eb1f 109static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111};
d4a9eb1f 112static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
113 TCG_REG_R0, TCG_REG_R1
114};
115
13dd6fb9 116#define TCG_REG_TMP TCG_REG_R12
4346457a 117
c69806ab
AJ
118static inline void reloc_abs32(void *code_ptr, tcg_target_long target)
119{
120 *(uint32_t *) code_ptr = target;
121}
122
123static inline void reloc_pc24(void *code_ptr, tcg_target_long target)
124{
125 uint32_t offset = ((target - ((tcg_target_long) code_ptr + 8)) >> 2);
126
127 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
128 | (offset & 0xffffff);
129}
130
650bbb36 131static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
132 tcg_target_long value, tcg_target_long addend)
133{
134 switch (type) {
135 case R_ARM_ABS32:
c69806ab 136 reloc_abs32(code_ptr, value);
811d4cf4
AZ
137 break;
138
139 case R_ARM_CALL:
140 case R_ARM_JUMP24:
141 default:
142 tcg_abort();
143
144 case R_ARM_PC24:
c69806ab 145 reloc_pc24(code_ptr, value);
811d4cf4
AZ
146 break;
147 }
148}
149
b6b24cb0
RH
150#define TCG_CT_CONST_ARM 0x100
151#define TCG_CT_CONST_INV 0x200
152#define TCG_CT_CONST_NEG 0x400
153#define TCG_CT_CONST_ZERO 0x800
19b62bf4 154
811d4cf4 155/* parse target specific constraints */
d4a9eb1f 156static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
157{
158 const char *ct_str;
159
160 ct_str = *pct_str;
161 switch (ct_str[0]) {
cb4e581f 162 case 'I':
19b62bf4
RH
163 ct->ct |= TCG_CT_CONST_ARM;
164 break;
165 case 'K':
166 ct->ct |= TCG_CT_CONST_INV;
167 break;
a9a86ae9
RH
168 case 'N': /* The gcc constraint letter is L, already used here. */
169 ct->ct |= TCG_CT_CONST_NEG;
170 break;
b6b24cb0
RH
171 case 'Z':
172 ct->ct |= TCG_CT_CONST_ZERO;
173 break;
cb4e581f 174
811d4cf4 175 case 'r':
811d4cf4
AZ
176 ct->ct |= TCG_CT_REG;
177 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
178 break;
179
67dcab73
AJ
180 /* qemu_ld address */
181 case 'l':
811d4cf4
AZ
182 ct->ct |= TCG_CT_REG;
183 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
184#ifdef CONFIG_SOFTMMU
185 /* r0 and r1 will be overwritten when reading the tlb entry,
186 so don't use these. */
811d4cf4
AZ
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
89c33337 189#if TARGET_LONG_BITS == 64
9716ef3b
PM
190 /* If we're passing env to the helper as r0 and need a regpair
191 * for the address then r2 will be overwritten as we're setting
192 * up the args to the helper.
193 */
194 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
195#endif
67dcab73 196#endif
811d4cf4 197 break;
67dcab73 198 case 'L':
d0660ed4
AZ
199 ct->ct |= TCG_CT_REG;
200 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
201#ifdef CONFIG_SOFTMMU
202 /* r1 is still needed to load data_reg or data_reg2,
203 so don't use it. */
d0660ed4 204 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73 205#endif
d0660ed4
AZ
206 break;
207
67dcab73
AJ
208 /* qemu_st address & data_reg */
209 case 's':
811d4cf4
AZ
210 ct->ct |= TCG_CT_REG;
211 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
212 /* r0 and r1 will be overwritten when reading the tlb entry
213 (softmmu only) and doing the byte swapping, so don't
214 use these. */
811d4cf4
AZ
215 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
216 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
89c33337 217#if defined(CONFIG_SOFTMMU) && (TARGET_LONG_BITS == 64)
9716ef3b
PM
218 /* Avoid clashes with registers being used for helper args */
219 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
220 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
221#endif
811d4cf4 222 break;
67dcab73
AJ
223 /* qemu_st64 data_reg2 */
224 case 'S':
811d4cf4
AZ
225 ct->ct |= TCG_CT_REG;
226 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
67dcab73
AJ
227 /* r0 and r1 will be overwritten when reading the tlb entry
228 (softmmu only) and doing the byte swapping, so don't
229 use these. */
811d4cf4 230 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 231 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
67dcab73
AJ
232#ifdef CONFIG_SOFTMMU
233 /* r2 is still needed to load data_reg, so don't use it. */
234 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
89c33337 235#if TARGET_LONG_BITS == 64
9716ef3b
PM
236 /* Avoid clashes with registers being used for helper args */
237 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
238#endif
811d4cf4 239#endif
67dcab73 240 break;
811d4cf4 241
811d4cf4
AZ
242 default:
243 return -1;
244 }
245 ct_str++;
246 *pct_str = ct_str;
247
248 return 0;
249}
250
94953e6d
LD
251static inline uint32_t rotl(uint32_t val, int n)
252{
253 return (val << n) | (val >> (32 - n));
254}
255
256/* ARM immediates for ALU instructions are made of an unsigned 8-bit
257 right-rotated by an even amount between 0 and 30. */
258static inline int encode_imm(uint32_t imm)
259{
4e6f6d4c
LD
260 int shift;
261
94953e6d
LD
262 /* simple case, only lower bits */
263 if ((imm & ~0xff) == 0)
264 return 0;
265 /* then try a simple even shift */
266 shift = ctz32(imm) & ~1;
267 if (((imm >> shift) & ~0xff) == 0)
268 return 32 - shift;
269 /* now try harder with rotations */
270 if ((rotl(imm, 2) & ~0xff) == 0)
271 return 2;
272 if ((rotl(imm, 4) & ~0xff) == 0)
273 return 4;
274 if ((rotl(imm, 6) & ~0xff) == 0)
275 return 6;
276 /* imm can't be encoded */
277 return -1;
278}
cb4e581f
LD
279
280static inline int check_fit_imm(uint32_t imm)
281{
94953e6d 282 return encode_imm(imm) >= 0;
cb4e581f
LD
283}
284
811d4cf4
AZ
285/* Test if a constant matches the constraint.
286 * TODO: define constraints for:
287 *
288 * ldr/str offset: between -0xfff and 0xfff
289 * ldrh/strh offset: between -0xff and 0xff
290 * mov operand2: values represented with x << (2 * y), x < 0x100
291 * add, sub, eor...: ditto
292 */
293static inline int tcg_target_const_match(tcg_target_long val,
19b62bf4 294 const TCGArgConstraint *arg_ct)
811d4cf4
AZ
295{
296 int ct;
297 ct = arg_ct->ct;
19b62bf4 298 if (ct & TCG_CT_CONST) {
811d4cf4 299 return 1;
19b62bf4 300 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
cb4e581f 301 return 1;
19b62bf4
RH
302 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
303 return 1;
a9a86ae9
RH
304 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
305 return 1;
b6b24cb0
RH
306 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
307 return 1;
19b62bf4 308 } else {
811d4cf4 309 return 0;
19b62bf4 310 }
811d4cf4
AZ
311}
312
2df3f1ee
RH
313#define TO_CPSR (1 << 20)
314
811d4cf4 315enum arm_data_opc_e {
2df3f1ee
RH
316 ARITH_AND = 0x0 << 21,
317 ARITH_EOR = 0x1 << 21,
318 ARITH_SUB = 0x2 << 21,
319 ARITH_RSB = 0x3 << 21,
320 ARITH_ADD = 0x4 << 21,
321 ARITH_ADC = 0x5 << 21,
322 ARITH_SBC = 0x6 << 21,
323 ARITH_RSC = 0x7 << 21,
324 ARITH_TST = 0x8 << 21 | TO_CPSR,
325 ARITH_CMP = 0xa << 21 | TO_CPSR,
326 ARITH_CMN = 0xb << 21 | TO_CPSR,
327 ARITH_ORR = 0xc << 21,
328 ARITH_MOV = 0xd << 21,
329 ARITH_BIC = 0xe << 21,
330 ARITH_MVN = 0xf << 21,
811d4cf4
AZ
331};
332
811d4cf4
AZ
333#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
334#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
335#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
336#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
337#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
338#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
339#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
340#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
341
342enum arm_cond_code_e {
343 COND_EQ = 0x0,
344 COND_NE = 0x1,
345 COND_CS = 0x2, /* Unsigned greater or equal */
346 COND_CC = 0x3, /* Unsigned less than */
347 COND_MI = 0x4, /* Negative */
348 COND_PL = 0x5, /* Zero or greater */
349 COND_VS = 0x6, /* Overflow */
350 COND_VC = 0x7, /* No overflow */
351 COND_HI = 0x8, /* Unsigned greater than */
352 COND_LS = 0x9, /* Unsigned less or equal */
353 COND_GE = 0xa,
354 COND_LT = 0xb,
355 COND_GT = 0xc,
356 COND_LE = 0xd,
357 COND_AL = 0xe,
358};
359
0aed257f 360static const uint8_t tcg_cond_to_arm_cond[] = {
811d4cf4
AZ
361 [TCG_COND_EQ] = COND_EQ,
362 [TCG_COND_NE] = COND_NE,
363 [TCG_COND_LT] = COND_LT,
364 [TCG_COND_GE] = COND_GE,
365 [TCG_COND_LE] = COND_LE,
366 [TCG_COND_GT] = COND_GT,
367 /* unsigned */
368 [TCG_COND_LTU] = COND_CC,
369 [TCG_COND_GEU] = COND_CS,
370 [TCG_COND_LEU] = COND_LS,
371 [TCG_COND_GTU] = COND_HI,
372};
373
374static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
375{
376 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
377}
378
379static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
380{
381 tcg_out32(s, (cond << 28) | 0x0a000000 |
382 (((offset - 8) >> 2) & 0x00ffffff));
383}
384
e936243a
AZ
385static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
386{
56779034
AJ
387 /* We pay attention here to not modify the branch target by skipping
388 the corresponding bytes. This ensure that caches and memory are
389 kept coherent during retranslation. */
e2542fe2 390#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
391 tcg_out8(s, (cond << 4) | 0x0a);
392 s->code_ptr += 3;
393#else
394 s->code_ptr += 3;
395 tcg_out8(s, (cond << 4) | 0x0a);
396#endif
397}
398
811d4cf4
AZ
399static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
400{
401 tcg_out32(s, (cond << 28) | 0x0b000000 |
402 (((offset - 8) >> 2) & 0x00ffffff));
403}
404
23401b58
AJ
405static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
406{
407 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
408}
409
24e838b7
PM
410static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
411{
412 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
413 (((offset - 8) >> 2) & 0x00ffffff));
414}
415
811d4cf4
AZ
416static inline void tcg_out_dat_reg(TCGContext *s,
417 int cond, int opc, int rd, int rn, int rm, int shift)
418{
2df3f1ee 419 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
811d4cf4
AZ
420 (rn << 16) | (rd << 12) | shift | rm);
421}
422
9716ef3b
PM
423static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
424{
425 /* Simple reg-reg move, optimising out the 'do nothing' case */
426 if (rd != rm) {
427 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
428 }
429}
430
811d4cf4
AZ
431static inline void tcg_out_dat_imm(TCGContext *s,
432 int cond, int opc, int rd, int rn, int im)
433{
2df3f1ee 434 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
811d4cf4
AZ
435 (rn << 16) | (rd << 12) | im);
436}
437
e86e0f28 438static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
811d4cf4 439{
e86e0f28
RH
440 int rot, opc, rn;
441
442 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
443 Speed things up by only checking when movt would be required.
444 Prior to armv7, have one go at fully rotated immediates before
445 doing the decomposition thing below. */
446 if (!use_armv7_instructions || (arg & 0xffff0000)) {
447 rot = encode_imm(arg);
448 if (rot >= 0) {
449 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
450 rotl(arg, rot) | (rot << 7));
451 return;
452 }
453 rot = encode_imm(~arg);
454 if (rot >= 0) {
455 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
456 rotl(~arg, rot) | (rot << 7));
457 return;
458 }
459 }
460
461 /* Use movw + movt. */
462 if (use_armv7_instructions) {
ac34fb5c
AJ
463 /* movw */
464 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
465 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
0f11f25a 466 if (arg & 0xffff0000) {
ac34fb5c
AJ
467 /* movt */
468 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
469 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
ac34fb5c 470 }
e86e0f28
RH
471 return;
472 }
0f11f25a 473
e86e0f28
RH
474 /* TODO: This is very suboptimal, we can easily have a constant
475 pool somewhere after all the instructions. */
476 opc = ARITH_MOV;
477 rn = 0;
478 /* If we have lots of leading 1's, we can shorten the sequence by
479 beginning with mvn and then clearing higher bits with eor. */
480 if (clz32(~arg) > clz32(arg)) {
481 opc = ARITH_MVN, arg = ~arg;
0f11f25a 482 }
e86e0f28
RH
483 do {
484 int i = ctz32(arg) & ~1;
485 rot = ((32 - i) << 7) & 0xf00;
486 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
487 arg &= ~(0xff << i);
488
489 opc = ARITH_EOR;
490 rn = rd;
491 } while (arg);
811d4cf4
AZ
492}
493
7fc645bf
PM
494static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
495 TCGArg lhs, TCGArg rhs, int rhs_is_const)
496{
497 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
498 * rhs must satisfy the "rI" constraint.
499 */
500 if (rhs_is_const) {
501 int rot = encode_imm(rhs);
502 assert(rot >= 0);
503 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
504 } else {
505 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
506 }
507}
508
19b62bf4
RH
509static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
510 TCGReg dst, TCGReg lhs, TCGArg rhs,
511 bool rhs_is_const)
512{
513 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
514 * rhs must satisfy the "rIK" constraint.
515 */
516 if (rhs_is_const) {
517 int rot = encode_imm(rhs);
518 if (rot < 0) {
519 rhs = ~rhs;
520 rot = encode_imm(rhs);
521 assert(rot >= 0);
522 opc = opinv;
523 }
524 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
525 } else {
526 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
527 }
528}
529
a9a86ae9
RH
530static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
531 TCGArg dst, TCGArg lhs, TCGArg rhs,
532 bool rhs_is_const)
533{
534 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
535 * rhs must satisfy the "rIN" constraint.
536 */
537 if (rhs_is_const) {
538 int rot = encode_imm(rhs);
539 if (rot < 0) {
540 rhs = -rhs;
541 rot = encode_imm(rhs);
542 assert(rot >= 0);
543 opc = opneg;
544 }
545 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
546 } else {
547 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
548 }
549}
550
34358a12
RH
551static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
552 TCGReg rn, TCGReg rm)
811d4cf4 553{
34358a12
RH
554 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
555 if (!use_armv6_instructions && rd == rn) {
556 if (rd == rm) {
557 /* rd == rn == rm; copy an input to tmp first. */
558 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
559 rm = rn = TCG_REG_TMP;
560 } else {
561 rn = rm;
562 rm = rd;
563 }
811d4cf4 564 }
34358a12
RH
565 /* mul */
566 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
811d4cf4
AZ
567}
568
34358a12
RH
569static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
570 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 571{
34358a12
RH
572 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
573 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
574 if (rd0 == rm || rd1 == rm) {
575 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
576 rn = TCG_REG_TMP;
577 } else {
578 TCGReg t = rn;
579 rn = rm;
580 rm = t;
581 }
811d4cf4 582 }
34358a12
RH
583 /* umull */
584 tcg_out32(s, (cond << 28) | 0x00800090 |
585 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
586}
587
34358a12
RH
588static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
589 TCGReg rd1, TCGReg rn, TCGReg rm)
811d4cf4 590{
34358a12
RH
591 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
592 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
593 if (rd0 == rm || rd1 == rm) {
594 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
595 rn = TCG_REG_TMP;
596 } else {
597 TCGReg t = rn;
598 rn = rm;
599 rm = t;
600 }
811d4cf4 601 }
34358a12
RH
602 /* smull */
603 tcg_out32(s, (cond << 28) | 0x00c00090 |
604 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
811d4cf4
AZ
605}
606
0637c56c
RH
607static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
608{
609 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
610}
611
612static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
613{
614 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
615}
616
9517094f
AJ
617static inline void tcg_out_ext8s(TCGContext *s, int cond,
618 int rd, int rn)
619{
620 if (use_armv6_instructions) {
621 /* sxtb */
622 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
623 } else {
e23886a9 624 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 625 rd, 0, rn, SHIFT_IMM_LSL(24));
e23886a9 626 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
627 rd, 0, rd, SHIFT_IMM_ASR(24));
628 }
629}
630
e854b6d3
AJ
631static inline void tcg_out_ext8u(TCGContext *s, int cond,
632 int rd, int rn)
633{
634 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
635}
636
9517094f
AJ
637static inline void tcg_out_ext16s(TCGContext *s, int cond,
638 int rd, int rn)
639{
640 if (use_armv6_instructions) {
641 /* sxth */
642 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
643 } else {
e23886a9 644 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 645 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 646 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
647 rd, 0, rd, SHIFT_IMM_ASR(16));
648 }
649}
650
651static inline void tcg_out_ext16u(TCGContext *s, int cond,
652 int rd, int rn)
653{
654 if (use_armv6_instructions) {
655 /* uxth */
656 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
657 } else {
e23886a9 658 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f 659 rd, 0, rn, SHIFT_IMM_LSL(16));
e23886a9 660 tcg_out_dat_reg(s, cond, ARITH_MOV,
9517094f
AJ
661 rd, 0, rd, SHIFT_IMM_LSR(16));
662 }
663}
664
67dcab73
AJ
665static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
666{
667 if (use_armv6_instructions) {
668 /* revsh */
669 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
670 } else {
671 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 672 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
67dcab73 673 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 674 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
67dcab73 675 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 676 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
67dcab73
AJ
677 }
678}
679
244b1e81
AJ
680static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
681{
682 if (use_armv6_instructions) {
683 /* rev16 */
684 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
685 } else {
686 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 687 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
244b1e81 688 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a 689 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
244b1e81 690 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 691 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
244b1e81
AJ
692 }
693}
694
7aab08aa
AJ
695/* swap the two low bytes assuming that the two high input bytes and the
696 two high output bit can hold any value. */
697static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
698{
699 if (use_armv6_instructions) {
700 /* rev16 */
701 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
702 } else {
703 tcg_out_dat_reg(s, cond, ARITH_MOV,
4346457a
RH
704 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
705 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
7aab08aa 706 tcg_out_dat_reg(s, cond, ARITH_ORR,
4346457a 707 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
7aab08aa
AJ
708 }
709}
710
244b1e81
AJ
711static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
712{
713 if (use_armv6_instructions) {
714 /* rev */
715 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
716 } else {
717 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 718 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
244b1e81 719 tcg_out_dat_imm(s, cond, ARITH_BIC,
4346457a 720 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
244b1e81
AJ
721 tcg_out_dat_reg(s, cond, ARITH_MOV,
722 rd, 0, rn, SHIFT_IMM_ROR(8));
723 tcg_out_dat_reg(s, cond, ARITH_EOR,
4346457a 724 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
244b1e81
AJ
725 }
726}
727
b6b24cb0
RH
728bool tcg_target_deposit_valid(int ofs, int len)
729{
730 /* ??? Without bfi, we could improve over generic code by combining
731 the right-shift from a non-zero ofs with the orr. We do run into
732 problems when rd == rs, and the mask generated from ofs+len doesn't
733 fit into an immediate. We would have to be careful not to pessimize
734 wrt the optimizations performed on the expanded code. */
735 return use_armv7_instructions;
736}
737
738static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
739 TCGArg a1, int ofs, int len, bool const_a1)
740{
741 if (const_a1) {
742 /* bfi becomes bfc with rn == 15. */
743 a1 = 15;
744 }
745 /* bfi/bfc */
746 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
747 | (ofs << 7) | ((ofs + len - 1) << 16));
748}
749
811d4cf4
AZ
750static inline void tcg_out_ld32_12(TCGContext *s, int cond,
751 int rd, int rn, tcg_target_long im)
752{
753 if (im >= 0)
754 tcg_out32(s, (cond << 28) | 0x05900000 |
755 (rn << 16) | (rd << 12) | (im & 0xfff));
756 else
757 tcg_out32(s, (cond << 28) | 0x05100000 |
758 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
759}
760
d17bd1d8
AJ
761/* Offset pre-increment with base writeback. */
762static inline void tcg_out_ld32_12wb(TCGContext *s, int cond,
763 int rd, int rn, tcg_target_long im)
764{
765 /* ldr with writeback and both register equals is UNPREDICTABLE */
766 assert(rd != rn);
767
768 if (im >= 0) {
769 tcg_out32(s, (cond << 28) | 0x05b00000 |
770 (rn << 16) | (rd << 12) | (im & 0xfff));
771 } else {
772 tcg_out32(s, (cond << 28) | 0x05300000 |
773 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
774 }
775}
776
811d4cf4
AZ
777static inline void tcg_out_st32_12(TCGContext *s, int cond,
778 int rd, int rn, tcg_target_long im)
779{
780 if (im >= 0)
781 tcg_out32(s, (cond << 28) | 0x05800000 |
782 (rn << 16) | (rd << 12) | (im & 0xfff));
783 else
784 tcg_out32(s, (cond << 28) | 0x05000000 |
785 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
786}
787
788static inline void tcg_out_ld32_r(TCGContext *s, int cond,
789 int rd, int rn, int rm)
790{
791 tcg_out32(s, (cond << 28) | 0x07900000 |
792 (rn << 16) | (rd << 12) | rm);
793}
794
795static inline void tcg_out_st32_r(TCGContext *s, int cond,
796 int rd, int rn, int rm)
797{
798 tcg_out32(s, (cond << 28) | 0x07800000 |
799 (rn << 16) | (rd << 12) | rm);
800}
801
3979144c
PB
802/* Register pre-increment with base writeback. */
803static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
804 int rd, int rn, int rm)
805{
806 tcg_out32(s, (cond << 28) | 0x07b00000 |
807 (rn << 16) | (rd << 12) | rm);
808}
809
810static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
811 int rd, int rn, int rm)
812{
813 tcg_out32(s, (cond << 28) | 0x07a00000 |
814 (rn << 16) | (rd << 12) | rm);
815}
816
811d4cf4
AZ
817static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
818 int rd, int rn, tcg_target_long im)
819{
820 if (im >= 0)
821 tcg_out32(s, (cond << 28) | 0x01d000b0 |
822 (rn << 16) | (rd << 12) |
823 ((im & 0xf0) << 4) | (im & 0xf));
824 else
825 tcg_out32(s, (cond << 28) | 0x015000b0 |
826 (rn << 16) | (rd << 12) |
827 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
828}
829
f694a27e 830static inline void tcg_out_st16_8(TCGContext *s, int cond,
811d4cf4
AZ
831 int rd, int rn, tcg_target_long im)
832{
833 if (im >= 0)
834 tcg_out32(s, (cond << 28) | 0x01c000b0 |
835 (rn << 16) | (rd << 12) |
836 ((im & 0xf0) << 4) | (im & 0xf));
837 else
838 tcg_out32(s, (cond << 28) | 0x014000b0 |
839 (rn << 16) | (rd << 12) |
840 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
841}
842
843static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
844 int rd, int rn, int rm)
845{
846 tcg_out32(s, (cond << 28) | 0x019000b0 |
847 (rn << 16) | (rd << 12) | rm);
848}
849
f694a27e 850static inline void tcg_out_st16_r(TCGContext *s, int cond,
811d4cf4
AZ
851 int rd, int rn, int rm)
852{
853 tcg_out32(s, (cond << 28) | 0x018000b0 |
854 (rn << 16) | (rd << 12) | rm);
855}
856
857static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
858 int rd, int rn, tcg_target_long im)
859{
860 if (im >= 0)
861 tcg_out32(s, (cond << 28) | 0x01d000f0 |
862 (rn << 16) | (rd << 12) |
863 ((im & 0xf0) << 4) | (im & 0xf));
864 else
865 tcg_out32(s, (cond << 28) | 0x015000f0 |
866 (rn << 16) | (rd << 12) |
867 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
868}
869
811d4cf4
AZ
870static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
871 int rd, int rn, int rm)
872{
873 tcg_out32(s, (cond << 28) | 0x019000f0 |
874 (rn << 16) | (rd << 12) | rm);
875}
876
811d4cf4
AZ
877static inline void tcg_out_ld8_12(TCGContext *s, int cond,
878 int rd, int rn, tcg_target_long im)
879{
880 if (im >= 0)
881 tcg_out32(s, (cond << 28) | 0x05d00000 |
882 (rn << 16) | (rd << 12) | (im & 0xfff));
883 else
884 tcg_out32(s, (cond << 28) | 0x05500000 |
885 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
886}
887
888static inline void tcg_out_st8_12(TCGContext *s, int cond,
889 int rd, int rn, tcg_target_long im)
890{
891 if (im >= 0)
892 tcg_out32(s, (cond << 28) | 0x05c00000 |
893 (rn << 16) | (rd << 12) | (im & 0xfff));
894 else
895 tcg_out32(s, (cond << 28) | 0x05400000 |
896 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
897}
898
899static inline void tcg_out_ld8_r(TCGContext *s, int cond,
900 int rd, int rn, int rm)
901{
902 tcg_out32(s, (cond << 28) | 0x07d00000 |
903 (rn << 16) | (rd << 12) | rm);
904}
905
906static inline void tcg_out_st8_r(TCGContext *s, int cond,
907 int rd, int rn, int rm)
908{
909 tcg_out32(s, (cond << 28) | 0x07c00000 |
910 (rn << 16) | (rd << 12) | rm);
911}
912
913static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
914 int rd, int rn, tcg_target_long im)
915{
916 if (im >= 0)
917 tcg_out32(s, (cond << 28) | 0x01d000d0 |
918 (rn << 16) | (rd << 12) |
919 ((im & 0xf0) << 4) | (im & 0xf));
920 else
921 tcg_out32(s, (cond << 28) | 0x015000d0 |
922 (rn << 16) | (rd << 12) |
923 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
924}
925
811d4cf4
AZ
926static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
927 int rd, int rn, int rm)
928{
204c1674 929 tcg_out32(s, (cond << 28) | 0x019000d0 |
811d4cf4
AZ
930 (rn << 16) | (rd << 12) | rm);
931}
932
811d4cf4
AZ
933static inline void tcg_out_ld32u(TCGContext *s, int cond,
934 int rd, int rn, int32_t offset)
935{
936 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
937 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
938 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
939 } else
940 tcg_out_ld32_12(s, cond, rd, rn, offset);
941}
942
943static inline void tcg_out_st32(TCGContext *s, int cond,
944 int rd, int rn, int32_t offset)
945{
946 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
947 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
948 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
949 } else
950 tcg_out_st32_12(s, cond, rd, rn, offset);
951}
952
953static inline void tcg_out_ld16u(TCGContext *s, int cond,
954 int rd, int rn, int32_t offset)
955{
956 if (offset > 0xff || offset < -0xff) {
4346457a
RH
957 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
958 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
959 } else
960 tcg_out_ld16u_8(s, cond, rd, rn, offset);
961}
962
963static inline void tcg_out_ld16s(TCGContext *s, int cond,
964 int rd, int rn, int32_t offset)
965{
966 if (offset > 0xff || offset < -0xff) {
4346457a
RH
967 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
968 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
969 } else
970 tcg_out_ld16s_8(s, cond, rd, rn, offset);
971}
972
f694a27e 973static inline void tcg_out_st16(TCGContext *s, int cond,
811d4cf4
AZ
974 int rd, int rn, int32_t offset)
975{
976 if (offset > 0xff || offset < -0xff) {
4346457a
RH
977 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
978 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4 979 } else
f694a27e 980 tcg_out_st16_8(s, cond, rd, rn, offset);
811d4cf4
AZ
981}
982
983static inline void tcg_out_ld8u(TCGContext *s, int cond,
984 int rd, int rn, int32_t offset)
985{
986 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
987 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
988 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
989 } else
990 tcg_out_ld8_12(s, cond, rd, rn, offset);
991}
992
993static inline void tcg_out_ld8s(TCGContext *s, int cond,
994 int rd, int rn, int32_t offset)
995{
996 if (offset > 0xff || offset < -0xff) {
4346457a
RH
997 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
998 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
999 } else
1000 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1001}
1002
f694a27e 1003static inline void tcg_out_st8(TCGContext *s, int cond,
811d4cf4
AZ
1004 int rd, int rn, int32_t offset)
1005{
1006 if (offset > 0xfff || offset < -0xfff) {
4346457a
RH
1007 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1008 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
811d4cf4
AZ
1009 } else
1010 tcg_out_st8_12(s, cond, rd, rn, offset);
1011}
1012
222f23f5 1013/* The _goto case is normally between TBs within the same code buffer,
5c84bd90 1014 * and with the code buffer limited to 16MB we shouldn't need the long
222f23f5
DDAG
1015 * case.
1016 *
1017 * .... except to the prologue that is in its own buffer.
1018 */
811d4cf4
AZ
1019static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
1020{
1021 int32_t val;
1022
24e838b7
PM
1023 if (addr & 1) {
1024 /* goto to a Thumb destination isn't supported */
1025 tcg_abort();
1026 }
1027
811d4cf4
AZ
1028 val = addr - (tcg_target_long) s->code_ptr;
1029 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
1030 tcg_out_b(s, cond, val);
1031 else {
811d4cf4 1032 if (cond == COND_AL) {
c8d80cef 1033 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
222f23f5 1034 tcg_out32(s, addr);
811d4cf4 1035 } else {
4346457a 1036 tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
811d4cf4 1037 tcg_out_dat_reg(s, cond, ARITH_ADD,
c8d80cef 1038 TCG_REG_PC, TCG_REG_PC,
4346457a 1039 TCG_REG_TMP, SHIFT_IMM_LSL(0));
811d4cf4 1040 }
811d4cf4
AZ
1041 }
1042}
1043
222f23f5
DDAG
1044/* The call case is mostly used for helpers - so it's not unreasonable
1045 * for them to be beyond branch range */
24e838b7 1046static inline void tcg_out_call(TCGContext *s, uint32_t addr)
811d4cf4
AZ
1047{
1048 int32_t val;
1049
811d4cf4 1050 val = addr - (tcg_target_long) s->code_ptr;
24e838b7
PM
1051 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1052 if (addr & 1) {
1053 /* Use BLX if the target is in Thumb mode */
1054 if (!use_armv5_instructions) {
1055 tcg_abort();
1056 }
1057 tcg_out_blx_imm(s, val);
1058 } else {
1059 tcg_out_bl(s, COND_AL, val);
1060 }
1061 } else {
222f23f5
DDAG
1062 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1063 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1064 tcg_out32(s, addr);
811d4cf4 1065 }
811d4cf4
AZ
1066}
1067
1068static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1069{
23401b58
AJ
1070 if (use_armv5_instructions) {
1071 tcg_out_blx(s, cond, arg);
1072 } else {
1073 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1074 TCG_REG_PC, SHIFT_IMM_LSL(0));
1075 tcg_out_bx(s, cond, arg);
1076 }
811d4cf4
AZ
1077}
1078
1079static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1080{
1081 TCGLabel *l = &s->labels[label_index];
1082
1083 if (l->has_value)
1084 tcg_out_goto(s, cond, l->u.value);
1085 else if (cond == COND_AL) {
c8d80cef 1086 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1087 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
1088 s->code_ptr += 4;
1089 } else {
1090 /* Probably this should be preferred even for COND_AL... */
1091 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 1092 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
1093 }
1094}
1095
811d4cf4 1096#ifdef CONFIG_SOFTMMU
79383c9c 1097
022c62cb 1098#include "exec/softmmu_defs.h"
811d4cf4 1099
e141ab52
BS
1100/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1101 int mmu_idx) */
1102static const void * const qemu_ld_helpers[4] = {
1103 helper_ldb_mmu,
1104 helper_ldw_mmu,
1105 helper_ldl_mmu,
1106 helper_ldq_mmu,
1107};
1108
1109/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1110 uintxx_t val, int mmu_idx) */
1111static const void * const qemu_st_helpers[4] = {
1112 helper_stb_mmu,
1113 helper_stw_mmu,
1114 helper_stl_mmu,
1115 helper_stq_mmu,
1116};
9716ef3b
PM
1117
1118/* Helper routines for marshalling helper function arguments into
1119 * the correct registers and stack.
1120 * argreg is where we want to put this argument, arg is the argument itself.
1121 * Return value is the updated argreg ready for the next call.
1122 * Note that argreg 0..3 is real registers, 4+ on stack.
9716ef3b
PM
1123 *
1124 * We provide routines for arguments which are: immediate, 32 bit
1125 * value in register, 16 and 8 bit values in register (which must be zero
1126 * extended before use) and 64 bit value in a lo:hi register pair.
1127 */
fc4d60ee
RH
1128#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1129static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1130{ \
1131 if (argreg < 4) { \
1132 MOV_ARG(s, COND_AL, argreg, arg); \
1133 } else { \
1134 int ofs = (argreg - 4) * 4; \
1135 EXT_ARG; \
1136 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1137 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1138 } \
1139 return argreg + 1; \
1140}
1141
1142DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
4346457a 1143 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1144DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
4346457a 1145 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee 1146DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
4346457a 1147 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
fc4d60ee
RH
1148DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1149
1150static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1151 TCGReg arglo, TCGReg arghi)
9716ef3b
PM
1152{
1153 /* 64 bit arguments must go in even/odd register pairs
1154 * and in 8-aligned stack slots.
1155 */
1156 if (argreg & 1) {
1157 argreg++;
1158 }
1159 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1160 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1161 return argreg;
1162}
fc4d60ee 1163#endif /* SOFTMMU */
811d4cf4 1164
3979144c
PB
1165#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1166
7e0d9562 1167static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1168{
67dcab73 1169 int addr_reg, data_reg, data_reg2, bswap;
811d4cf4 1170#ifdef CONFIG_SOFTMMU
d17bd1d8 1171 int mem_index, s_bits, tlb_offset;
9716ef3b 1172 TCGReg argreg;
811d4cf4
AZ
1173# if TARGET_LONG_BITS == 64
1174 int addr_reg2;
1175# endif
811d4cf4 1176 uint32_t *label_ptr;
811d4cf4
AZ
1177#endif
1178
67dcab73
AJ
1179#ifdef TARGET_WORDS_BIGENDIAN
1180 bswap = 1;
1181#else
1182 bswap = 0;
1183#endif
811d4cf4
AZ
1184 data_reg = *args++;
1185 if (opc == 3)
1186 data_reg2 = *args++;
1187 else
d89c682f 1188 data_reg2 = 0; /* suppress warning */
811d4cf4 1189 addr_reg = *args++;
811d4cf4 1190#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1191# if TARGET_LONG_BITS == 64
1192 addr_reg2 = *args++;
1193# endif
811d4cf4
AZ
1194 mem_index = *args;
1195 s_bits = opc & 3;
1196
91a3c1b0 1197 /* Should generate something like the following:
3979144c 1198 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1199 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1200 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0
AZ
1201 */
1202# if CPU_TLB_BITS > 8
1203# error
1204# endif
4346457a 1205 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
c8d80cef 1206 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 1207 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1208 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
c8d80cef
AJ
1209 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
1210 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
d17bd1d8
AJ
1211 /* We assume that the offset is contained within 20 bits. */
1212 tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
5256a720 1213 assert((tlb_offset & ~0xfffff) == 0);
d17bd1d8 1214 if (tlb_offset > 0xfff) {
c8d80cef 1215 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
d17bd1d8
AJ
1216 0xa00 | (tlb_offset >> 12));
1217 tlb_offset &= 0xfff;
1218 }
1219 tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
c8d80cef 1220 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
4346457a 1221 TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1222 /* Check alignment. */
1223 if (s_bits)
1224 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1225 0, addr_reg, (1 << s_bits) - 1);
811d4cf4 1226# if TARGET_LONG_BITS == 64
d17bd1d8
AJ
1227 /* XXX: possibly we could use a block data load in the first access. */
1228 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
c8d80cef
AJ
1229 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1230 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 1231# endif
c8d80cef 1232 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
d17bd1d8
AJ
1233 offsetof(CPUTLBEntry, addend)
1234 - offsetof(CPUTLBEntry, addr_read));
811d4cf4
AZ
1235
1236 switch (opc) {
1237 case 0:
c8d80cef 1238 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1239 break;
1240 case 0 | 4:
c8d80cef 1241 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1242 break;
1243 case 1:
c8d80cef 1244 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
67dcab73
AJ
1245 if (bswap) {
1246 tcg_out_bswap16(s, COND_EQ, data_reg, data_reg);
1247 }
811d4cf4
AZ
1248 break;
1249 case 1 | 4:
67dcab73
AJ
1250 if (bswap) {
1251 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1252 tcg_out_bswap16s(s, COND_EQ, data_reg, data_reg);
1253 } else {
1254 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1255 }
811d4cf4
AZ
1256 break;
1257 case 2:
1258 default:
c8d80cef 1259 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
67dcab73
AJ
1260 if (bswap) {
1261 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1262 }
811d4cf4
AZ
1263 break;
1264 case 3:
67dcab73
AJ
1265 if (bswap) {
1266 tcg_out_ld32_rwb(s, COND_EQ, data_reg2, TCG_REG_R1, addr_reg);
1267 tcg_out_ld32_12(s, COND_EQ, data_reg, TCG_REG_R1, 4);
1268 tcg_out_bswap32(s, COND_EQ, data_reg2, data_reg2);
1269 tcg_out_bswap32(s, COND_EQ, data_reg, data_reg);
1270 } else {
1271 tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1272 tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1273 }
811d4cf4
AZ
1274 break;
1275 }
1276
1277 label_ptr = (void *) s->code_ptr;
c69806ab 1278 tcg_out_b_noaddr(s, COND_EQ);
811d4cf4 1279
811d4cf4 1280 /* TODO: move this code to where the constants pool will be */
9716ef3b
PM
1281 /* Note that this code relies on the constraints we set in arm_op_defs[]
1282 * to ensure that later arguments are not passed to us in registers we
1283 * trash by moving the earlier arguments into them.
1284 */
1285 argreg = TCG_REG_R0;
9716ef3b 1286 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
9716ef3b
PM
1287#if TARGET_LONG_BITS == 64
1288 argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1289#else
1290 argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
e141ab52 1291#endif
9716ef3b 1292 argreg = tcg_out_arg_imm32(s, argreg, mem_index);
24e838b7 1293 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[s_bits]);
811d4cf4
AZ
1294
1295 switch (opc) {
1296 case 0 | 4:
e854b6d3 1297 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1298 break;
1299 case 1 | 4:
e854b6d3 1300 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1301 break;
1302 case 0:
1303 case 1:
1304 case 2:
1305 default:
f97713ff 1306 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
811d4cf4
AZ
1307 break;
1308 case 3:
f97713ff
PM
1309 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1310 tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
811d4cf4
AZ
1311 break;
1312 }
1313
c69806ab 1314 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
379f6698
PB
1315#else /* !CONFIG_SOFTMMU */
1316 if (GUEST_BASE) {
1317 uint32_t offset = GUEST_BASE;
1318 int i;
1319 int rot;
1320
1321 while (offset) {
1322 i = ctz32(offset) & ~1;
1323 rot = ((32 - i) << 7) & 0xf00;
1324
4346457a 1325 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
379f6698 1326 ((offset >> i) & 0xff) | rot);
4346457a 1327 addr_reg = TCG_REG_TMP;
379f6698
PB
1328 offset &= ~(0xff << i);
1329 }
1330 }
811d4cf4
AZ
1331 switch (opc) {
1332 case 0:
1333 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1334 break;
1335 case 0 | 4:
1336 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1337 break;
1338 case 1:
1339 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1340 if (bswap) {
1341 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1342 }
811d4cf4
AZ
1343 break;
1344 case 1 | 4:
67dcab73
AJ
1345 if (bswap) {
1346 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1347 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1348 } else {
1349 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1350 }
811d4cf4
AZ
1351 break;
1352 case 2:
1353 default:
1354 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
67dcab73
AJ
1355 if (bswap) {
1356 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1357 }
811d4cf4
AZ
1358 break;
1359 case 3:
eae6ce52
AZ
1360 /* TODO: use block load -
1361 * check that data_reg2 > data_reg or the other way */
419bafa5 1362 if (data_reg == addr_reg) {
67dcab73
AJ
1363 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1364 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
419bafa5 1365 } else {
67dcab73
AJ
1366 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1367 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1368 }
1369 if (bswap) {
1370 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1371 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
419bafa5 1372 }
811d4cf4
AZ
1373 break;
1374 }
1375#endif
1376}
1377
7e0d9562 1378static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
811d4cf4 1379{
67dcab73 1380 int addr_reg, data_reg, data_reg2, bswap;
811d4cf4 1381#ifdef CONFIG_SOFTMMU
d17bd1d8 1382 int mem_index, s_bits, tlb_offset;
9716ef3b 1383 TCGReg argreg;
811d4cf4
AZ
1384# if TARGET_LONG_BITS == 64
1385 int addr_reg2;
1386# endif
811d4cf4 1387 uint32_t *label_ptr;
811d4cf4
AZ
1388#endif
1389
67dcab73
AJ
1390#ifdef TARGET_WORDS_BIGENDIAN
1391 bswap = 1;
1392#else
1393 bswap = 0;
1394#endif
811d4cf4
AZ
1395 data_reg = *args++;
1396 if (opc == 3)
1397 data_reg2 = *args++;
1398 else
d89c682f 1399 data_reg2 = 0; /* suppress warning */
811d4cf4 1400 addr_reg = *args++;
811d4cf4 1401#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1402# if TARGET_LONG_BITS == 64
1403 addr_reg2 = *args++;
1404# endif
811d4cf4
AZ
1405 mem_index = *args;
1406 s_bits = opc & 3;
1407
91a3c1b0 1408 /* Should generate something like the following:
3979144c 1409 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1410 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1411 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0 1412 */
811d4cf4 1413 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
4346457a 1414 TCG_REG_TMP, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 1415 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
4346457a 1416 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
c8d80cef
AJ
1417 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1418 TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
d17bd1d8
AJ
1419 /* We assume that the offset is contained within 20 bits. */
1420 tlb_offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
5256a720 1421 assert((tlb_offset & ~0xfffff) == 0);
d17bd1d8 1422 if (tlb_offset > 0xfff) {
c8d80cef 1423 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
d17bd1d8
AJ
1424 0xa00 | (tlb_offset >> 12));
1425 tlb_offset &= 0xfff;
1426 }
1427 tcg_out_ld32_12wb(s, COND_AL, TCG_REG_R1, TCG_REG_R0, tlb_offset);
c8d80cef 1428 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
4346457a 1429 TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1430 /* Check alignment. */
1431 if (s_bits)
1432 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1433 0, addr_reg, (1 << s_bits) - 1);
811d4cf4 1434# if TARGET_LONG_BITS == 64
d17bd1d8
AJ
1435 /* XXX: possibly we could use a block data load in the first access. */
1436 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0, 4);
c8d80cef
AJ
1437 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1438 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 1439# endif
c8d80cef 1440 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
d17bd1d8
AJ
1441 offsetof(CPUTLBEntry, addend)
1442 - offsetof(CPUTLBEntry, addr_write));
811d4cf4
AZ
1443
1444 switch (opc) {
1445 case 0:
c8d80cef 1446 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4 1447 break;
811d4cf4 1448 case 1:
67dcab73 1449 if (bswap) {
7aab08aa 1450 tcg_out_bswap16st(s, COND_EQ, TCG_REG_R0, data_reg);
67dcab73
AJ
1451 tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1452 } else {
1453 tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1454 }
811d4cf4
AZ
1455 break;
1456 case 2:
1457 default:
67dcab73
AJ
1458 if (bswap) {
1459 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
1460 tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, TCG_REG_R1);
1461 } else {
1462 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
1463 }
811d4cf4
AZ
1464 break;
1465 case 3:
67dcab73
AJ
1466 if (bswap) {
1467 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
1468 tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, addr_reg);
1469 tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
9a3abc21 1470 tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, TCG_REG_R1, 4);
67dcab73
AJ
1471 } else {
1472 tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1473 tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
1474 }
811d4cf4
AZ
1475 break;
1476 }
1477
1478 label_ptr = (void *) s->code_ptr;
c69806ab 1479 tcg_out_b_noaddr(s, COND_EQ);
811d4cf4 1480
811d4cf4 1481 /* TODO: move this code to where the constants pool will be */
9716ef3b
PM
1482 /* Note that this code relies on the constraints we set in arm_op_defs[]
1483 * to ensure that later arguments are not passed to us in registers we
1484 * trash by moving the earlier arguments into them.
1485 */
1486 argreg = TCG_REG_R0;
9716ef3b 1487 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
9716ef3b
PM
1488#if TARGET_LONG_BITS == 64
1489 argreg = tcg_out_arg_reg64(s, argreg, addr_reg, addr_reg2);
1490#else
1491 argreg = tcg_out_arg_reg32(s, argreg, addr_reg);
1492#endif
1493
811d4cf4
AZ
1494 switch (opc) {
1495 case 0:
9716ef3b 1496 argreg = tcg_out_arg_reg8(s, argreg, data_reg);
811d4cf4
AZ
1497 break;
1498 case 1:
9716ef3b 1499 argreg = tcg_out_arg_reg16(s, argreg, data_reg);
811d4cf4
AZ
1500 break;
1501 case 2:
9716ef3b 1502 argreg = tcg_out_arg_reg32(s, argreg, data_reg);
811d4cf4
AZ
1503 break;
1504 case 3:
9716ef3b 1505 argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
811d4cf4
AZ
1506 break;
1507 }
811d4cf4 1508
9716ef3b 1509 argreg = tcg_out_arg_imm32(s, argreg, mem_index);
24e838b7 1510 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[s_bits]);
811d4cf4 1511
c69806ab 1512 reloc_pc24(label_ptr, (tcg_target_long)s->code_ptr);
379f6698
PB
1513#else /* !CONFIG_SOFTMMU */
1514 if (GUEST_BASE) {
1515 uint32_t offset = GUEST_BASE;
1516 int i;
1517 int rot;
1518
1519 while (offset) {
1520 i = ctz32(offset) & ~1;
1521 rot = ((32 - i) << 7) & 0xf00;
1522
67dcab73 1523 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
379f6698 1524 ((offset >> i) & 0xff) | rot);
67dcab73 1525 addr_reg = TCG_REG_R1;
379f6698
PB
1526 offset &= ~(0xff << i);
1527 }
1528 }
811d4cf4
AZ
1529 switch (opc) {
1530 case 0:
1531 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1532 break;
811d4cf4 1533 case 1:
67dcab73 1534 if (bswap) {
7aab08aa 1535 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
67dcab73
AJ
1536 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1537 } else {
1538 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1539 }
811d4cf4
AZ
1540 break;
1541 case 2:
1542 default:
67dcab73
AJ
1543 if (bswap) {
1544 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1545 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1546 } else {
1547 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1548 }
811d4cf4
AZ
1549 break;
1550 case 3:
eae6ce52
AZ
1551 /* TODO: use block store -
1552 * check that data_reg2 > data_reg or the other way */
67dcab73
AJ
1553 if (bswap) {
1554 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1555 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1556 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1557 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1558 } else {
1559 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1560 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1561 }
811d4cf4
AZ
1562 break;
1563 }
1564#endif
1565}
1566
811d4cf4
AZ
1567static uint8_t *tb_ret_addr;
1568
a9751609 1569static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
811d4cf4
AZ
1570 const TCGArg *args, const int *const_args)
1571{
2df3f1ee 1572 TCGArg a0, a1, a2, a3, a4, a5;
811d4cf4
AZ
1573 int c;
1574
1575 switch (opc) {
1576 case INDEX_op_exit_tb:
fe33867b
AZ
1577 {
1578 uint8_t *ld_ptr = s->code_ptr;
1579 if (args[0] >> 8)
c8d80cef 1580 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
fe33867b 1581 else
c8d80cef 1582 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
fe33867b
AZ
1583 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1584 if (args[0] >> 8) {
1585 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1586 tcg_out32(s, args[0]);
1587 }
1588 }
811d4cf4
AZ
1589 break;
1590 case INDEX_op_goto_tb:
1591 if (s->tb_jmp_offset) {
1592 /* Direct jump method */
fe33867b 1593#if defined(USE_DIRECT_JUMP)
811d4cf4 1594 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
c69806ab 1595 tcg_out_b_noaddr(s, COND_AL);
811d4cf4 1596#else
c8d80cef 1597 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1598 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1599 tcg_out32(s, 0);
1600#endif
1601 } else {
1602 /* Indirect jump method */
1603#if 1
1604 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1605 if (c > 0xfff || c < -0xfff) {
1606 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1607 (tcg_target_long) (s->tb_next + args[0]));
c8d80cef 1608 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4 1609 } else
c8d80cef 1610 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
811d4cf4 1611#else
c8d80cef
AJ
1612 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1613 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4
AZ
1614 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1615#endif
1616 }
1617 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1618 break;
1619 case INDEX_op_call:
1620 if (const_args[0])
24e838b7 1621 tcg_out_call(s, args[0]);
811d4cf4
AZ
1622 else
1623 tcg_out_callr(s, COND_AL, args[0]);
1624 break;
811d4cf4
AZ
1625 case INDEX_op_br:
1626 tcg_out_goto_label(s, COND_AL, args[0]);
1627 break;
1628
1629 case INDEX_op_ld8u_i32:
1630 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1631 break;
1632 case INDEX_op_ld8s_i32:
1633 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1634 break;
1635 case INDEX_op_ld16u_i32:
1636 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1637 break;
1638 case INDEX_op_ld16s_i32:
1639 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1640 break;
1641 case INDEX_op_ld_i32:
1642 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1643 break;
1644 case INDEX_op_st8_i32:
f694a27e 1645 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1646 break;
1647 case INDEX_op_st16_i32:
f694a27e 1648 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1649 break;
1650 case INDEX_op_st_i32:
1651 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1652 break;
1653
1654 case INDEX_op_mov_i32:
1655 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1656 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1657 break;
1658 case INDEX_op_movi_i32:
1659 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1660 break;
4a1d241e
PM
1661 case INDEX_op_movcond_i32:
1662 /* Constraints mean that v2 is always in the same register as dest,
1663 * so we only need to do "if condition passed, move v1 to dest".
1664 */
5d53b4c9
RH
1665 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1666 args[1], args[2], const_args[2]);
1667 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1668 ARITH_MVN, args[0], 0, args[3], const_args[3]);
4a1d241e 1669 break;
811d4cf4 1670 case INDEX_op_add_i32:
a9a86ae9
RH
1671 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1672 args[0], args[1], args[2], const_args[2]);
1673 break;
811d4cf4 1674 case INDEX_op_sub_i32:
d9fda575
RH
1675 if (const_args[1]) {
1676 if (const_args[2]) {
1677 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1678 } else {
1679 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1680 args[0], args[2], args[1], 1);
1681 }
1682 } else {
1683 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1684 args[0], args[1], args[2], const_args[2]);
1685 }
a9a86ae9 1686 break;
811d4cf4 1687 case INDEX_op_and_i32:
19b62bf4
RH
1688 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1689 args[0], args[1], args[2], const_args[2]);
1690 break;
932234f6 1691 case INDEX_op_andc_i32:
19b62bf4
RH
1692 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1693 args[0], args[1], args[2], const_args[2]);
1694 break;
811d4cf4
AZ
1695 case INDEX_op_or_i32:
1696 c = ARITH_ORR;
1697 goto gen_arith;
1698 case INDEX_op_xor_i32:
1699 c = ARITH_EOR;
1700 /* Fall through. */
1701 gen_arith:
7fc645bf 1702 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
811d4cf4
AZ
1703 break;
1704 case INDEX_op_add2_i32:
2df3f1ee
RH
1705 a0 = args[0], a1 = args[1], a2 = args[2];
1706 a3 = args[3], a4 = args[4], a5 = args[5];
1707 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
4346457a 1708 a0 = TCG_REG_TMP;
2df3f1ee
RH
1709 }
1710 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1711 a0, a2, a4, const_args[4]);
1712 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1713 a1, a3, a5, const_args[5]);
1714 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4
AZ
1715 break;
1716 case INDEX_op_sub2_i32:
2df3f1ee
RH
1717 a0 = args[0], a1 = args[1], a2 = args[2];
1718 a3 = args[3], a4 = args[4], a5 = args[5];
1719 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
4346457a 1720 a0 = TCG_REG_TMP;
2df3f1ee
RH
1721 }
1722 if (const_args[2]) {
1723 if (const_args[4]) {
1724 tcg_out_movi32(s, COND_AL, a0, a4);
1725 a4 = a0;
1726 }
1727 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1728 } else {
1729 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1730 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1731 }
1732 if (const_args[3]) {
1733 if (const_args[5]) {
1734 tcg_out_movi32(s, COND_AL, a1, a5);
1735 a5 = a1;
1736 }
1737 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1738 } else {
1739 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1740 a1, a3, a5, const_args[5]);
1741 }
1742 tcg_out_mov_reg(s, COND_AL, args[0], a0);
811d4cf4 1743 break;
650bbb36
AZ
1744 case INDEX_op_neg_i32:
1745 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1746 break;
f878d2d2
LD
1747 case INDEX_op_not_i32:
1748 tcg_out_dat_reg(s, COND_AL,
1749 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1750 break;
811d4cf4
AZ
1751 case INDEX_op_mul_i32:
1752 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1753 break;
1754 case INDEX_op_mulu2_i32:
1755 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1756 break;
d693e147
RH
1757 case INDEX_op_muls2_i32:
1758 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1759 break;
811d4cf4
AZ
1760 /* XXX: Perhaps args[2] & 0x1f is wrong */
1761 case INDEX_op_shl_i32:
1762 c = const_args[2] ?
1763 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1764 goto gen_shift32;
1765 case INDEX_op_shr_i32:
1766 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1767 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1768 goto gen_shift32;
1769 case INDEX_op_sar_i32:
1770 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1771 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
293579e5
AJ
1772 goto gen_shift32;
1773 case INDEX_op_rotr_i32:
1774 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1775 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
811d4cf4
AZ
1776 /* Fall through. */
1777 gen_shift32:
1778 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1779 break;
1780
293579e5
AJ
1781 case INDEX_op_rotl_i32:
1782 if (const_args[2]) {
1783 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1784 ((0x20 - args[2]) & 0x1f) ?
1785 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1786 SHIFT_IMM_LSL(0));
1787 } else {
4346457a 1788 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
293579e5 1789 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
4346457a 1790 SHIFT_REG_ROR(TCG_REG_TMP));
293579e5
AJ
1791 }
1792 break;
1793
811d4cf4 1794 case INDEX_op_brcond_i32:
5d53b4c9 1795 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
7fc645bf 1796 args[0], args[1], const_args[1]);
811d4cf4
AZ
1797 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1798 break;
1799 case INDEX_op_brcond2_i32:
1800 /* The resulting conditions are:
1801 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1802 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1803 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1804 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1805 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1806 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1807 */
5d53b4c9
RH
1808 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1809 args[1], args[3], const_args[3]);
1810 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1811 args[0], args[2], const_args[2]);
811d4cf4
AZ
1812 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1813 break;
f72a6cd7 1814 case INDEX_op_setcond_i32:
5d53b4c9
RH
1815 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1816 args[1], args[2], const_args[2]);
f72a6cd7
AJ
1817 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1818 ARITH_MOV, args[0], 0, 1);
1819 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1820 ARITH_MOV, args[0], 0, 0);
1821 break;
e0404769
AJ
1822 case INDEX_op_setcond2_i32:
1823 /* See brcond2_i32 comment */
5d53b4c9
RH
1824 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1825 args[2], args[4], const_args[4]);
1826 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1827 args[1], args[3], const_args[3]);
e0404769
AJ
1828 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1829 ARITH_MOV, args[0], 0, 1);
1830 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1831 ARITH_MOV, args[0], 0, 0);
b525f0a9 1832 break;
811d4cf4
AZ
1833
1834 case INDEX_op_qemu_ld8u:
7e0d9562 1835 tcg_out_qemu_ld(s, args, 0);
811d4cf4
AZ
1836 break;
1837 case INDEX_op_qemu_ld8s:
7e0d9562 1838 tcg_out_qemu_ld(s, args, 0 | 4);
811d4cf4
AZ
1839 break;
1840 case INDEX_op_qemu_ld16u:
7e0d9562 1841 tcg_out_qemu_ld(s, args, 1);
811d4cf4
AZ
1842 break;
1843 case INDEX_op_qemu_ld16s:
7e0d9562 1844 tcg_out_qemu_ld(s, args, 1 | 4);
811d4cf4 1845 break;
86feb1c8 1846 case INDEX_op_qemu_ld32:
7e0d9562 1847 tcg_out_qemu_ld(s, args, 2);
811d4cf4
AZ
1848 break;
1849 case INDEX_op_qemu_ld64:
7e0d9562 1850 tcg_out_qemu_ld(s, args, 3);
811d4cf4 1851 break;
650bbb36 1852
811d4cf4 1853 case INDEX_op_qemu_st8:
7e0d9562 1854 tcg_out_qemu_st(s, args, 0);
811d4cf4
AZ
1855 break;
1856 case INDEX_op_qemu_st16:
7e0d9562 1857 tcg_out_qemu_st(s, args, 1);
811d4cf4
AZ
1858 break;
1859 case INDEX_op_qemu_st32:
7e0d9562 1860 tcg_out_qemu_st(s, args, 2);
811d4cf4
AZ
1861 break;
1862 case INDEX_op_qemu_st64:
7e0d9562 1863 tcg_out_qemu_st(s, args, 3);
811d4cf4
AZ
1864 break;
1865
244b1e81
AJ
1866 case INDEX_op_bswap16_i32:
1867 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1868 break;
1869 case INDEX_op_bswap32_i32:
1870 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1871 break;
1872
811d4cf4 1873 case INDEX_op_ext8s_i32:
9517094f 1874 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1875 break;
1876 case INDEX_op_ext16s_i32:
9517094f
AJ
1877 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1878 break;
1879 case INDEX_op_ext16u_i32:
1880 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
811d4cf4
AZ
1881 break;
1882
b6b24cb0
RH
1883 case INDEX_op_deposit_i32:
1884 tcg_out_deposit(s, COND_AL, args[0], args[2],
1885 args[3], args[4], const_args[2]);
1886 break;
1887
0637c56c
RH
1888 case INDEX_op_div_i32:
1889 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1890 break;
1891 case INDEX_op_divu_i32:
1892 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1893 break;
1894 case INDEX_op_rem_i32:
4346457a
RH
1895 tcg_out_sdiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1896 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1897 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1898 SHIFT_IMM_LSL(0));
1899 break;
1900 case INDEX_op_remu_i32:
4346457a
RH
1901 tcg_out_udiv(s, COND_AL, TCG_REG_TMP, args[1], args[2]);
1902 tcg_out_mul32(s, COND_AL, TCG_REG_TMP, TCG_REG_TMP, args[2]);
1903 tcg_out_dat_reg(s, COND_AL, ARITH_SUB, args[0], args[1], TCG_REG_TMP,
0637c56c
RH
1904 SHIFT_IMM_LSL(0));
1905 break;
1906
811d4cf4
AZ
1907 default:
1908 tcg_abort();
1909 }
1910}
1911
1912static const TCGTargetOpDef arm_op_defs[] = {
1913 { INDEX_op_exit_tb, { } },
1914 { INDEX_op_goto_tb, { } },
1915 { INDEX_op_call, { "ri" } },
811d4cf4
AZ
1916 { INDEX_op_br, { } },
1917
1918 { INDEX_op_mov_i32, { "r", "r" } },
1919 { INDEX_op_movi_i32, { "r" } },
1920
1921 { INDEX_op_ld8u_i32, { "r", "r" } },
1922 { INDEX_op_ld8s_i32, { "r", "r" } },
1923 { INDEX_op_ld16u_i32, { "r", "r" } },
1924 { INDEX_op_ld16s_i32, { "r", "r" } },
1925 { INDEX_op_ld_i32, { "r", "r" } },
1926 { INDEX_op_st8_i32, { "r", "r" } },
1927 { INDEX_op_st16_i32, { "r", "r" } },
1928 { INDEX_op_st_i32, { "r", "r" } },
1929
1930 /* TODO: "r", "r", "ri" */
a9a86ae9 1931 { INDEX_op_add_i32, { "r", "r", "rIN" } },
d9fda575 1932 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
811d4cf4
AZ
1933 { INDEX_op_mul_i32, { "r", "r", "r" } },
1934 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
d693e147 1935 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
19b62bf4
RH
1936 { INDEX_op_and_i32, { "r", "r", "rIK" } },
1937 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
cb4e581f
LD
1938 { INDEX_op_or_i32, { "r", "r", "rI" } },
1939 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1940 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1941 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1942
1943 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1944 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1945 { INDEX_op_sar_i32, { "r", "r", "ri" } },
293579e5
AJ
1946 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1947 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
811d4cf4 1948
5d53b4c9
RH
1949 { INDEX_op_brcond_i32, { "r", "rIN" } },
1950 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
1951 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
811d4cf4 1952
2df3f1ee
RH
1953 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
1954 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
5d53b4c9
RH
1955 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
1956 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
811d4cf4 1957
26c5d372 1958#if TARGET_LONG_BITS == 32
67dcab73
AJ
1959 { INDEX_op_qemu_ld8u, { "r", "l" } },
1960 { INDEX_op_qemu_ld8s, { "r", "l" } },
1961 { INDEX_op_qemu_ld16u, { "r", "l" } },
1962 { INDEX_op_qemu_ld16s, { "r", "l" } },
1963 { INDEX_op_qemu_ld32, { "r", "l" } },
1964 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1965
1966 { INDEX_op_qemu_st8, { "s", "s" } },
1967 { INDEX_op_qemu_st16, { "s", "s" } },
1968 { INDEX_op_qemu_st32, { "s", "s" } },
bf5675ef 1969 { INDEX_op_qemu_st64, { "S", "S", "s" } },
26c5d372 1970#else
67dcab73
AJ
1971 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
1972 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
1973 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
1974 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
1975 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
1976 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
1977
1978 { INDEX_op_qemu_st8, { "s", "s", "s" } },
1979 { INDEX_op_qemu_st16, { "s", "s", "s" } },
1980 { INDEX_op_qemu_st32, { "s", "s", "s" } },
bf5675ef 1981 { INDEX_op_qemu_st64, { "S", "S", "s", "s" } },
26c5d372 1982#endif
811d4cf4 1983
244b1e81
AJ
1984 { INDEX_op_bswap16_i32, { "r", "r" } },
1985 { INDEX_op_bswap32_i32, { "r", "r" } },
1986
811d4cf4
AZ
1987 { INDEX_op_ext8s_i32, { "r", "r" } },
1988 { INDEX_op_ext16s_i32, { "r", "r" } },
9517094f 1989 { INDEX_op_ext16u_i32, { "r", "r" } },
811d4cf4 1990
b6b24cb0
RH
1991 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
1992
0637c56c
RH
1993#if TCG_TARGET_HAS_div_i32
1994 { INDEX_op_div_i32, { "r", "r", "r" } },
1995 { INDEX_op_rem_i32, { "r", "r", "r" } },
1996 { INDEX_op_divu_i32, { "r", "r", "r" } },
1997 { INDEX_op_remu_i32, { "r", "r", "r" } },
1998#endif
1999
811d4cf4
AZ
2000 { -1 },
2001};
2002
e4d58b41 2003static void tcg_target_init(TCGContext *s)
811d4cf4 2004{
20cb400d 2005#if !defined(CONFIG_USER_ONLY)
811d4cf4
AZ
2006 /* fail safe */
2007 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
2008 tcg_abort();
20cb400d 2009#endif
811d4cf4 2010
e4a7d5e8 2011 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
811d4cf4 2012 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
e4a7d5e8
AJ
2013 (1 << TCG_REG_R0) |
2014 (1 << TCG_REG_R1) |
2015 (1 << TCG_REG_R2) |
2016 (1 << TCG_REG_R3) |
2017 (1 << TCG_REG_R12) |
2018 (1 << TCG_REG_R14));
811d4cf4
AZ
2019
2020 tcg_regset_clear(s->reserved_regs);
811d4cf4 2021 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
4346457a 2022 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
e4a7d5e8 2023 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
811d4cf4
AZ
2024
2025 tcg_add_target_add_op_defs(arm_op_defs);
2026}
2027
2a534aff
RH
2028static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2029 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2030{
2031 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2032}
2033
2a534aff
RH
2034static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2035 TCGReg arg1, tcg_target_long arg2)
811d4cf4
AZ
2036{
2037 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2038}
2039
2a534aff
RH
2040static inline void tcg_out_mov(TCGContext *s, TCGType type,
2041 TCGReg ret, TCGReg arg)
811d4cf4
AZ
2042{
2043 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2044}
2045
2046static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 2047 TCGReg ret, tcg_target_long arg)
811d4cf4
AZ
2048{
2049 tcg_out_movi32(s, COND_AL, ret, arg);
2050}
2051
e4d58b41 2052static void tcg_target_qemu_prologue(TCGContext *s)
811d4cf4 2053{
fc4d60ee
RH
2054 int frame_size;
2055
2056 /* Calling convention requires us to save r4-r11 and lr. */
2057 /* stmdb sp!, { r4 - r11, lr } */
2058 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
cea5f9a2 2059
fc4d60ee
RH
2060 /* Allocate the local stack frame. */
2061 frame_size = TCG_STATIC_CALL_ARGS_SIZE;
2062 frame_size += CPU_TEMP_BUF_NLONGS * sizeof(long);
2063 /* We saved an odd number of registers above; keep an 8 aligned stack. */
2064 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
2065 & -TCG_TARGET_STACK_ALIGN) + 4;
2066
2067 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2068 TCG_REG_CALL_STACK, frame_size, 1);
2069 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2070 CPU_TEMP_BUF_NLONGS * sizeof(long));
4e17eae9 2071
cea5f9a2 2072 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
811d4cf4 2073
cea5f9a2 2074 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
811d4cf4
AZ
2075 tb_ret_addr = s->code_ptr;
2076
fc4d60ee
RH
2077 /* Epilogue. We branch here via tb_ret_addr. */
2078 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2079 TCG_REG_CALL_STACK, frame_size, 1);
2080
2081 /* ldmia sp!, { r4 - r11, pc } */
2082 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
811d4cf4 2083}