]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/arm/tcg-target.c
tcg/arm: sxtb and sxth are available starting with ARMv6
[mirror_qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f 24
ac34fb5c
AJ
25#if defined(__ARM_ARCH_7__) || \
26 defined(__ARM_ARCH_7A__) || \
27 defined(__ARM_ARCH_7EM__) || \
28 defined(__ARM_ARCH_7M__) || \
29 defined(__ARM_ARCH_7R__)
30#define USE_ARMV7_INSTRUCTIONS
31#endif
32
33#if defined(USE_ARMV7_INSTRUCTIONS) || \
34 defined(__ARM_ARCH_6J__) || \
35 defined(__ARM_ARCH_6K__) || \
36 defined(__ARM_ARCH_6T2__) || \
37 defined(__ARM_ARCH_6Z__) || \
38 defined(__ARM_ARCH_6ZK__)
39#define USE_ARMV6_INSTRUCTIONS
40#endif
41
42#if defined(USE_ARMV6_INSTRUCTIONS) || \
43 defined(__ARM_ARCH_5T__) || \
44 defined(__ARM_ARCH_5TE__) || \
45 defined(__ARM_ARCH_5TEJ__)
46#define USE_ARMV5_INSTRUCTIONS
47#endif
48
49#ifdef USE_ARMV5_INSTRUCTIONS
50static const int use_armv5_instructions = 1;
51#else
52static const int use_armv5_instructions = 0;
53#endif
54#undef USE_ARMV5_INSTRUCTIONS
55
56#ifdef USE_ARMV6_INSTRUCTIONS
57static const int use_armv6_instructions = 1;
58#else
59static const int use_armv6_instructions = 0;
60#endif
61#undef USE_ARMV6_INSTRUCTIONS
62
63#ifdef USE_ARMV7_INSTRUCTIONS
64static const int use_armv7_instructions = 1;
65#else
66static const int use_armv7_instructions = 0;
67#endif
68#undef USE_ARMV7_INSTRUCTIONS
69
d4a9eb1f
BS
70#ifndef NDEBUG
71static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
72 "%r0",
73 "%r1",
74 "%r2",
75 "%r3",
76 "%r4",
77 "%r5",
78 "%r6",
79 "%r7",
80 "%r8",
81 "%r9",
82 "%r10",
83 "%r11",
84 "%r12",
85 "%r13",
86 "%r14",
e4a7d5e8 87 "%pc",
811d4cf4 88};
d4a9eb1f 89#endif
811d4cf4 90
d4a9eb1f 91static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
92 TCG_REG_R0,
93 TCG_REG_R1,
94 TCG_REG_R2,
95 TCG_REG_R3,
96 TCG_REG_R4,
97 TCG_REG_R5,
98 TCG_REG_R6,
99 TCG_REG_R7,
100 TCG_REG_R8,
101 TCG_REG_R9,
102 TCG_REG_R10,
103 TCG_REG_R11,
104 TCG_REG_R12,
105 TCG_REG_R13,
106 TCG_REG_R14,
107};
108
d4a9eb1f 109static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
110 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
111};
d4a9eb1f 112static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
113 TCG_REG_R0, TCG_REG_R1
114};
115
650bbb36 116static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
117 tcg_target_long value, tcg_target_long addend)
118{
119 switch (type) {
120 case R_ARM_ABS32:
121 *(uint32_t *) code_ptr = value;
122 break;
123
124 case R_ARM_CALL:
125 case R_ARM_JUMP24:
126 default:
127 tcg_abort();
128
129 case R_ARM_PC24:
eae6ce52 130 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
e936243a 131 (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
811d4cf4
AZ
132 break;
133 }
134}
135
136/* maximum number of register used for input function arguments */
137static inline int tcg_target_get_call_iarg_regs_count(int flags)
138{
139 return 4;
140}
141
811d4cf4 142/* parse target specific constraints */
d4a9eb1f 143static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
144{
145 const char *ct_str;
146
147 ct_str = *pct_str;
148 switch (ct_str[0]) {
cb4e581f
LD
149 case 'I':
150 ct->ct |= TCG_CT_CONST_ARM;
151 break;
152
811d4cf4
AZ
153 case 'r':
154#ifndef CONFIG_SOFTMMU
155 case 'd':
156 case 'D':
157 case 'x':
158 case 'X':
159#endif
160 ct->ct |= TCG_CT_REG;
161 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
162 break;
163
164#ifdef CONFIG_SOFTMMU
d0660ed4 165 /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
811d4cf4
AZ
166 case 'x':
167 ct->ct |= TCG_CT_REG;
168 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
811d4cf4
AZ
169 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
171 break;
172
d0660ed4
AZ
173 /* qemu_ld64 data_reg */
174 case 'd':
175 ct->ct |= TCG_CT_REG;
176 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
177 /* r1 is still needed to load data_reg2, so don't use it. */
178 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
179 break;
180
811d4cf4
AZ
181 /* qemu_ld/st64 data_reg2 */
182 case 'D':
183 ct->ct |= TCG_CT_REG;
184 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
185 /* r0, r1 and optionally r2 will be overwritten by the address
186 * and the low word of data, so don't use these. */
187 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
188 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
189# if TARGET_LONG_BITS == 64
190 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
191# endif
192 break;
193
194# if TARGET_LONG_BITS == 64
195 /* qemu_ld/st addr_reg2 */
196 case 'X':
197 ct->ct |= TCG_CT_REG;
198 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
199 /* r0 will be overwritten by the low word of base, so don't use it. */
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
202 break;
203# endif
204#endif
205
811d4cf4
AZ
206 default:
207 return -1;
208 }
209 ct_str++;
210 *pct_str = ct_str;
211
212 return 0;
213}
214
94953e6d
LD
215static inline uint32_t rotl(uint32_t val, int n)
216{
217 return (val << n) | (val >> (32 - n));
218}
219
220/* ARM immediates for ALU instructions are made of an unsigned 8-bit
221 right-rotated by an even amount between 0 and 30. */
222static inline int encode_imm(uint32_t imm)
223{
4e6f6d4c
LD
224 int shift;
225
94953e6d
LD
226 /* simple case, only lower bits */
227 if ((imm & ~0xff) == 0)
228 return 0;
229 /* then try a simple even shift */
230 shift = ctz32(imm) & ~1;
231 if (((imm >> shift) & ~0xff) == 0)
232 return 32 - shift;
233 /* now try harder with rotations */
234 if ((rotl(imm, 2) & ~0xff) == 0)
235 return 2;
236 if ((rotl(imm, 4) & ~0xff) == 0)
237 return 4;
238 if ((rotl(imm, 6) & ~0xff) == 0)
239 return 6;
240 /* imm can't be encoded */
241 return -1;
242}
cb4e581f
LD
243
244static inline int check_fit_imm(uint32_t imm)
245{
94953e6d 246 return encode_imm(imm) >= 0;
cb4e581f
LD
247}
248
811d4cf4
AZ
249/* Test if a constant matches the constraint.
250 * TODO: define constraints for:
251 *
252 * ldr/str offset: between -0xfff and 0xfff
253 * ldrh/strh offset: between -0xff and 0xff
254 * mov operand2: values represented with x << (2 * y), x < 0x100
255 * add, sub, eor...: ditto
256 */
257static inline int tcg_target_const_match(tcg_target_long val,
258 const TCGArgConstraint *arg_ct)
259{
260 int ct;
261 ct = arg_ct->ct;
262 if (ct & TCG_CT_CONST)
263 return 1;
cb4e581f
LD
264 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
265 return 1;
811d4cf4
AZ
266 else
267 return 0;
268}
269
270enum arm_data_opc_e {
271 ARITH_AND = 0x0,
272 ARITH_EOR = 0x1,
273 ARITH_SUB = 0x2,
274 ARITH_RSB = 0x3,
275 ARITH_ADD = 0x4,
276 ARITH_ADC = 0x5,
277 ARITH_SBC = 0x6,
278 ARITH_RSC = 0x7,
3979144c 279 ARITH_TST = 0x8,
811d4cf4
AZ
280 ARITH_CMP = 0xa,
281 ARITH_CMN = 0xb,
282 ARITH_ORR = 0xc,
283 ARITH_MOV = 0xd,
284 ARITH_BIC = 0xe,
285 ARITH_MVN = 0xf,
286};
287
3979144c
PB
288#define TO_CPSR(opc) \
289 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
811d4cf4
AZ
290
291#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
292#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
293#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
294#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
295#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
296#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
297#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
298#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
299
300enum arm_cond_code_e {
301 COND_EQ = 0x0,
302 COND_NE = 0x1,
303 COND_CS = 0x2, /* Unsigned greater or equal */
304 COND_CC = 0x3, /* Unsigned less than */
305 COND_MI = 0x4, /* Negative */
306 COND_PL = 0x5, /* Zero or greater */
307 COND_VS = 0x6, /* Overflow */
308 COND_VC = 0x7, /* No overflow */
309 COND_HI = 0x8, /* Unsigned greater than */
310 COND_LS = 0x9, /* Unsigned less or equal */
311 COND_GE = 0xa,
312 COND_LT = 0xb,
313 COND_GT = 0xc,
314 COND_LE = 0xd,
315 COND_AL = 0xe,
316};
317
318static const uint8_t tcg_cond_to_arm_cond[10] = {
319 [TCG_COND_EQ] = COND_EQ,
320 [TCG_COND_NE] = COND_NE,
321 [TCG_COND_LT] = COND_LT,
322 [TCG_COND_GE] = COND_GE,
323 [TCG_COND_LE] = COND_LE,
324 [TCG_COND_GT] = COND_GT,
325 /* unsigned */
326 [TCG_COND_LTU] = COND_CC,
327 [TCG_COND_GEU] = COND_CS,
328 [TCG_COND_LEU] = COND_LS,
329 [TCG_COND_GTU] = COND_HI,
330};
331
332static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
333{
334 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
335}
336
337static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
338{
339 tcg_out32(s, (cond << 28) | 0x0a000000 |
340 (((offset - 8) >> 2) & 0x00ffffff));
341}
342
e936243a
AZ
343static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
344{
e2542fe2 345#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
346 tcg_out8(s, (cond << 4) | 0x0a);
347 s->code_ptr += 3;
348#else
349 s->code_ptr += 3;
350 tcg_out8(s, (cond << 4) | 0x0a);
351#endif
352}
353
811d4cf4
AZ
354static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
355{
356 tcg_out32(s, (cond << 28) | 0x0b000000 |
357 (((offset - 8) >> 2) & 0x00ffffff));
358}
359
360static inline void tcg_out_dat_reg(TCGContext *s,
361 int cond, int opc, int rd, int rn, int rm, int shift)
362{
363 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
364 (rn << 16) | (rd << 12) | shift | rm);
365}
366
367static inline void tcg_out_dat_reg2(TCGContext *s,
368 int cond, int opc0, int opc1, int rd0, int rd1,
369 int rn0, int rn1, int rm0, int rm1, int shift)
370{
0c9c3a9e
AZ
371 if (rd0 == rn1 || rd0 == rm1) {
372 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
373 (rn0 << 16) | (8 << 12) | shift | rm0);
374 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
375 (rn1 << 16) | (rd1 << 12) | shift | rm1);
376 tcg_out_dat_reg(s, cond, ARITH_MOV,
377 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
378 } else {
379 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
380 (rn0 << 16) | (rd0 << 12) | shift | rm0);
381 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
382 (rn1 << 16) | (rd1 << 12) | shift | rm1);
383 }
811d4cf4
AZ
384}
385
386static inline void tcg_out_dat_imm(TCGContext *s,
387 int cond, int opc, int rd, int rn, int im)
388{
3979144c 389 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
811d4cf4
AZ
390 (rn << 16) | (rd << 12) | im);
391}
392
393static inline void tcg_out_movi32(TCGContext *s,
394 int cond, int rd, int32_t arg)
395{
396 int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
397
398 /* TODO: This is very suboptimal, we can easily have a constant
399 * pool somewhere after all the instructions. */
400
401 if (arg < 0 && arg > -0x100)
402 return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
403
404 if (offset < 0x100 && offset > -0x100)
405 return offset >= 0 ?
406 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
407 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
408
ac34fb5c
AJ
409 if (use_armv7_instructions) {
410 /* use movw/movt */
411 /* movw */
412 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
413 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
414 if (arg & 0xffff0000)
415 /* movt */
416 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
417 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
418 } else {
419 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
420 if (arg & 0x0000ff00)
421 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
422 ((arg >> 8) & 0xff) | 0xc00);
423 if (arg & 0x00ff0000)
424 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
425 ((arg >> 16) & 0xff) | 0x800);
426 if (arg & 0xff000000)
427 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
428 ((arg >> 24) & 0xff) | 0x400);
429 }
811d4cf4
AZ
430}
431
432static inline void tcg_out_mul32(TCGContext *s,
433 int cond, int rd, int rs, int rm)
434{
435 if (rd != rm)
436 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
437 (rs << 8) | 0x90 | rm);
438 else if (rd != rs)
439 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
440 (rm << 8) | 0x90 | rs);
441 else {
442 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
443 (rs << 8) | 0x90 | rm);
444 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 445 rd, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
811d4cf4
AZ
446 }
447}
448
449static inline void tcg_out_umull32(TCGContext *s,
450 int cond, int rd0, int rd1, int rs, int rm)
451{
452 if (rd0 != rm && rd1 != rm)
453 tcg_out32(s, (cond << 28) | 0x800090 |
454 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
455 else if (rd0 != rs && rd1 != rs)
456 tcg_out32(s, (cond << 28) | 0x800090 |
457 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
458 else {
459 tcg_out_dat_reg(s, cond, ARITH_MOV,
460 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
461 tcg_out32(s, (cond << 28) | 0x800098 |
462 (rd1 << 16) | (rd0 << 12) | (rs << 8));
463 }
464}
465
466static inline void tcg_out_smull32(TCGContext *s,
467 int cond, int rd0, int rd1, int rs, int rm)
468{
469 if (rd0 != rm && rd1 != rm)
470 tcg_out32(s, (cond << 28) | 0xc00090 |
471 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
472 else if (rd0 != rs && rd1 != rs)
473 tcg_out32(s, (cond << 28) | 0xc00090 |
474 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
475 else {
476 tcg_out_dat_reg(s, cond, ARITH_MOV,
477 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
478 tcg_out32(s, (cond << 28) | 0xc00098 |
479 (rd1 << 16) | (rd0 << 12) | (rs << 8));
480 }
481}
482
483static inline void tcg_out_ld32_12(TCGContext *s, int cond,
484 int rd, int rn, tcg_target_long im)
485{
486 if (im >= 0)
487 tcg_out32(s, (cond << 28) | 0x05900000 |
488 (rn << 16) | (rd << 12) | (im & 0xfff));
489 else
490 tcg_out32(s, (cond << 28) | 0x05100000 |
491 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
492}
493
494static inline void tcg_out_st32_12(TCGContext *s, int cond,
495 int rd, int rn, tcg_target_long im)
496{
497 if (im >= 0)
498 tcg_out32(s, (cond << 28) | 0x05800000 |
499 (rn << 16) | (rd << 12) | (im & 0xfff));
500 else
501 tcg_out32(s, (cond << 28) | 0x05000000 |
502 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
503}
504
505static inline void tcg_out_ld32_r(TCGContext *s, int cond,
506 int rd, int rn, int rm)
507{
508 tcg_out32(s, (cond << 28) | 0x07900000 |
509 (rn << 16) | (rd << 12) | rm);
510}
511
512static inline void tcg_out_st32_r(TCGContext *s, int cond,
513 int rd, int rn, int rm)
514{
515 tcg_out32(s, (cond << 28) | 0x07800000 |
516 (rn << 16) | (rd << 12) | rm);
517}
518
3979144c
PB
519/* Register pre-increment with base writeback. */
520static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
521 int rd, int rn, int rm)
522{
523 tcg_out32(s, (cond << 28) | 0x07b00000 |
524 (rn << 16) | (rd << 12) | rm);
525}
526
527static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
528 int rd, int rn, int rm)
529{
530 tcg_out32(s, (cond << 28) | 0x07a00000 |
531 (rn << 16) | (rd << 12) | rm);
532}
533
811d4cf4
AZ
534static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
535 int rd, int rn, tcg_target_long im)
536{
537 if (im >= 0)
538 tcg_out32(s, (cond << 28) | 0x01d000b0 |
539 (rn << 16) | (rd << 12) |
540 ((im & 0xf0) << 4) | (im & 0xf));
541 else
542 tcg_out32(s, (cond << 28) | 0x015000b0 |
543 (rn << 16) | (rd << 12) |
544 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
545}
546
f694a27e 547static inline void tcg_out_st16_8(TCGContext *s, int cond,
811d4cf4
AZ
548 int rd, int rn, tcg_target_long im)
549{
550 if (im >= 0)
551 tcg_out32(s, (cond << 28) | 0x01c000b0 |
552 (rn << 16) | (rd << 12) |
553 ((im & 0xf0) << 4) | (im & 0xf));
554 else
555 tcg_out32(s, (cond << 28) | 0x014000b0 |
556 (rn << 16) | (rd << 12) |
557 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
558}
559
560static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
561 int rd, int rn, int rm)
562{
563 tcg_out32(s, (cond << 28) | 0x019000b0 |
564 (rn << 16) | (rd << 12) | rm);
565}
566
f694a27e 567static inline void tcg_out_st16_r(TCGContext *s, int cond,
811d4cf4
AZ
568 int rd, int rn, int rm)
569{
570 tcg_out32(s, (cond << 28) | 0x018000b0 |
571 (rn << 16) | (rd << 12) | rm);
572}
573
574static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
575 int rd, int rn, tcg_target_long im)
576{
577 if (im >= 0)
578 tcg_out32(s, (cond << 28) | 0x01d000f0 |
579 (rn << 16) | (rd << 12) |
580 ((im & 0xf0) << 4) | (im & 0xf));
581 else
582 tcg_out32(s, (cond << 28) | 0x015000f0 |
583 (rn << 16) | (rd << 12) |
584 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
585}
586
811d4cf4
AZ
587static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
588 int rd, int rn, int rm)
589{
590 tcg_out32(s, (cond << 28) | 0x019000f0 |
591 (rn << 16) | (rd << 12) | rm);
592}
593
811d4cf4
AZ
594static inline void tcg_out_ld8_12(TCGContext *s, int cond,
595 int rd, int rn, tcg_target_long im)
596{
597 if (im >= 0)
598 tcg_out32(s, (cond << 28) | 0x05d00000 |
599 (rn << 16) | (rd << 12) | (im & 0xfff));
600 else
601 tcg_out32(s, (cond << 28) | 0x05500000 |
602 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
603}
604
605static inline void tcg_out_st8_12(TCGContext *s, int cond,
606 int rd, int rn, tcg_target_long im)
607{
608 if (im >= 0)
609 tcg_out32(s, (cond << 28) | 0x05c00000 |
610 (rn << 16) | (rd << 12) | (im & 0xfff));
611 else
612 tcg_out32(s, (cond << 28) | 0x05400000 |
613 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
614}
615
616static inline void tcg_out_ld8_r(TCGContext *s, int cond,
617 int rd, int rn, int rm)
618{
619 tcg_out32(s, (cond << 28) | 0x07d00000 |
620 (rn << 16) | (rd << 12) | rm);
621}
622
623static inline void tcg_out_st8_r(TCGContext *s, int cond,
624 int rd, int rn, int rm)
625{
626 tcg_out32(s, (cond << 28) | 0x07c00000 |
627 (rn << 16) | (rd << 12) | rm);
628}
629
630static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
631 int rd, int rn, tcg_target_long im)
632{
633 if (im >= 0)
634 tcg_out32(s, (cond << 28) | 0x01d000d0 |
635 (rn << 16) | (rd << 12) |
636 ((im & 0xf0) << 4) | (im & 0xf));
637 else
638 tcg_out32(s, (cond << 28) | 0x015000d0 |
639 (rn << 16) | (rd << 12) |
640 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
641}
642
811d4cf4
AZ
643static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
644 int rd, int rn, int rm)
645{
204c1674 646 tcg_out32(s, (cond << 28) | 0x019000d0 |
811d4cf4
AZ
647 (rn << 16) | (rd << 12) | rm);
648}
649
811d4cf4
AZ
650static inline void tcg_out_ld32u(TCGContext *s, int cond,
651 int rd, int rn, int32_t offset)
652{
653 if (offset > 0xfff || offset < -0xfff) {
654 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
655 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
656 } else
657 tcg_out_ld32_12(s, cond, rd, rn, offset);
658}
659
660static inline void tcg_out_st32(TCGContext *s, int cond,
661 int rd, int rn, int32_t offset)
662{
663 if (offset > 0xfff || offset < -0xfff) {
664 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
665 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
666 } else
667 tcg_out_st32_12(s, cond, rd, rn, offset);
668}
669
670static inline void tcg_out_ld16u(TCGContext *s, int cond,
671 int rd, int rn, int32_t offset)
672{
673 if (offset > 0xff || offset < -0xff) {
674 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
675 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
676 } else
677 tcg_out_ld16u_8(s, cond, rd, rn, offset);
678}
679
680static inline void tcg_out_ld16s(TCGContext *s, int cond,
681 int rd, int rn, int32_t offset)
682{
683 if (offset > 0xff || offset < -0xff) {
684 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
685 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
686 } else
687 tcg_out_ld16s_8(s, cond, rd, rn, offset);
688}
689
f694a27e 690static inline void tcg_out_st16(TCGContext *s, int cond,
811d4cf4
AZ
691 int rd, int rn, int32_t offset)
692{
693 if (offset > 0xff || offset < -0xff) {
694 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
f694a27e 695 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_R8);
811d4cf4 696 } else
f694a27e 697 tcg_out_st16_8(s, cond, rd, rn, offset);
811d4cf4
AZ
698}
699
700static inline void tcg_out_ld8u(TCGContext *s, int cond,
701 int rd, int rn, int32_t offset)
702{
703 if (offset > 0xfff || offset < -0xfff) {
704 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
705 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
706 } else
707 tcg_out_ld8_12(s, cond, rd, rn, offset);
708}
709
710static inline void tcg_out_ld8s(TCGContext *s, int cond,
711 int rd, int rn, int32_t offset)
712{
713 if (offset > 0xff || offset < -0xff) {
714 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
715 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
716 } else
717 tcg_out_ld8s_8(s, cond, rd, rn, offset);
718}
719
f694a27e 720static inline void tcg_out_st8(TCGContext *s, int cond,
811d4cf4
AZ
721 int rd, int rn, int32_t offset)
722{
723 if (offset > 0xfff || offset < -0xfff) {
724 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
725 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
726 } else
727 tcg_out_st8_12(s, cond, rd, rn, offset);
728}
729
730static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
731{
732 int32_t val;
733
734 val = addr - (tcg_target_long) s->code_ptr;
735 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
736 tcg_out_b(s, cond, val);
737 else {
738#if 1
739 tcg_abort();
740#else
741 if (cond == COND_AL) {
c8d80cef 742 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
743 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
744 } else {
745 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
746 tcg_out_dat_reg(s, cond, ARITH_ADD,
c8d80cef
AJ
747 TCG_REG_PC, TCG_REG_PC,
748 TCG_REG_R8, SHIFT_IMM_LSL(0));
811d4cf4
AZ
749 }
750#endif
751 }
752}
753
754static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
755{
756 int32_t val;
757
811d4cf4
AZ
758 val = addr - (tcg_target_long) s->code_ptr;
759 if (val < 0x01fffffd && val > -0x01fffffd)
760 tcg_out_bl(s, cond, val);
761 else {
762#if 1
763 tcg_abort();
764#else
765 if (cond == COND_AL) {
c8d80cef
AJ
766 tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
767 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
768 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
769 } else {
770 tcg_out_movi32(s, cond, TCG_REG_R9, addr);
c8d80cef
AJ
771 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
772 TCG_REG_PC, SHIFT_IMM_LSL(0));
811d4cf4
AZ
773 tcg_out_bx(s, cond, TCG_REG_R9);
774 }
775#endif
776 }
811d4cf4
AZ
777}
778
779static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
780{
811d4cf4 781 /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
c8d80cef
AJ
782 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
783 TCG_REG_PC, SHIFT_IMM_LSL(0));
811d4cf4 784 tcg_out_bx(s, cond, arg);
811d4cf4
AZ
785}
786
787static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
788{
789 TCGLabel *l = &s->labels[label_index];
790
791 if (l->has_value)
792 tcg_out_goto(s, cond, l->u.value);
793 else if (cond == COND_AL) {
c8d80cef 794 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
795 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
796 s->code_ptr += 4;
797 } else {
798 /* Probably this should be preferred even for COND_AL... */
799 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 800 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
801 }
802}
803
811d4cf4 804#ifdef CONFIG_SOFTMMU
79383c9c
BS
805
806#include "../../softmmu_defs.h"
811d4cf4
AZ
807
808static void *qemu_ld_helpers[4] = {
809 __ldb_mmu,
810 __ldw_mmu,
811 __ldl_mmu,
812 __ldq_mmu,
813};
814
815static void *qemu_st_helpers[4] = {
816 __stb_mmu,
817 __stw_mmu,
818 __stl_mmu,
819 __stq_mmu,
820};
821#endif
822
3979144c
PB
823#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
824
811d4cf4
AZ
825static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
826 const TCGArg *args, int opc)
827{
828 int addr_reg, data_reg, data_reg2;
829#ifdef CONFIG_SOFTMMU
830 int mem_index, s_bits;
831# if TARGET_LONG_BITS == 64
832 int addr_reg2;
833# endif
811d4cf4 834 uint32_t *label_ptr;
811d4cf4
AZ
835#endif
836
837 data_reg = *args++;
838 if (opc == 3)
839 data_reg2 = *args++;
840 else
d89c682f 841 data_reg2 = 0; /* suppress warning */
811d4cf4 842 addr_reg = *args++;
811d4cf4 843#ifdef CONFIG_SOFTMMU
aef3a282
AZ
844# if TARGET_LONG_BITS == 64
845 addr_reg2 = *args++;
846# endif
811d4cf4
AZ
847 mem_index = *args;
848 s_bits = opc & 3;
849
91a3c1b0 850 /* Should generate something like the following:
3979144c 851 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 852 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 853 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0
AZ
854 */
855# if CPU_TLB_BITS > 8
856# error
857# endif
c8d80cef
AJ
858 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_R8,
859 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 860 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
c8d80cef
AJ
861 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
862 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_AREG0,
863 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
864 /* In the
865 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
866 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
867 * not exceed otherwise, so use an
868 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
869 * before.
870 */
225b4376 871 if (mem_index)
c8d80cef 872 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
225b4376
AZ
873 (mem_index << (TLB_SHIFT & 1)) |
874 ((16 - (TLB_SHIFT >> 1)) << 8));
c8d80cef 875 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
225b4376 876 offsetof(CPUState, tlb_table[0][0].addr_read));
c8d80cef
AJ
877 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
878 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
879 /* Check alignment. */
880 if (s_bits)
881 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
882 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
883# if TARGET_LONG_BITS == 64
884 /* XXX: possibly we could use a block data load or writeback in
885 * the first access. */
c8d80cef 886 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
225b4376 887 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
c8d80cef
AJ
888 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
889 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 890# endif
c8d80cef 891 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
225b4376 892 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
893
894 switch (opc) {
895 case 0:
c8d80cef 896 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
897 break;
898 case 0 | 4:
c8d80cef 899 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
900 break;
901 case 1:
c8d80cef 902 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
903 break;
904 case 1 | 4:
c8d80cef 905 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
906 break;
907 case 2:
908 default:
c8d80cef 909 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
910 break;
911 case 3:
c8d80cef
AJ
912 tcg_out_ld32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
913 tcg_out_ld32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
811d4cf4
AZ
914 break;
915 }
916
917 label_ptr = (void *) s->code_ptr;
918 tcg_out_b(s, COND_EQ, 8);
811d4cf4 919
811d4cf4 920 /* TODO: move this code to where the constants pool will be */
c8d80cef 921 if (addr_reg != TCG_REG_R0) {
811d4cf4 922 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
923 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
924 }
811d4cf4 925# if TARGET_LONG_BITS == 32
c8d80cef 926 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R1, 0, mem_index);
811d4cf4 927# else
c8d80cef 928 if (addr_reg2 != TCG_REG_R1) {
811d4cf4 929 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
930 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
931 }
932 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
811d4cf4 933# endif
650bbb36 934 tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
811d4cf4
AZ
935 (tcg_target_long) s->code_ptr);
936
937 switch (opc) {
938 case 0 | 4:
939 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 940 TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(24));
811d4cf4 941 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 942 data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(24));
811d4cf4
AZ
943 break;
944 case 1 | 4:
945 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 946 TCG_REG_R0, 0, TCG_REG_R0, SHIFT_IMM_LSL(16));
811d4cf4 947 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 948 data_reg, 0, TCG_REG_R0, SHIFT_IMM_ASR(16));
811d4cf4
AZ
949 break;
950 case 0:
951 case 1:
952 case 2:
953 default:
c8d80cef 954 if (data_reg != TCG_REG_R0) {
811d4cf4 955 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
956 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
957 }
811d4cf4
AZ
958 break;
959 case 3:
c8d80cef 960 if (data_reg != TCG_REG_R0) {
d0660ed4 961 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
962 data_reg, 0, TCG_REG_R0, SHIFT_IMM_LSL(0));
963 }
964 if (data_reg2 != TCG_REG_R1) {
811d4cf4 965 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
966 data_reg2, 0, TCG_REG_R1, SHIFT_IMM_LSL(0));
967 }
811d4cf4
AZ
968 break;
969 }
970
811d4cf4 971 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
972#else /* !CONFIG_SOFTMMU */
973 if (GUEST_BASE) {
974 uint32_t offset = GUEST_BASE;
975 int i;
976 int rot;
977
978 while (offset) {
979 i = ctz32(offset) & ~1;
980 rot = ((32 - i) << 7) & 0xf00;
981
c8d80cef 982 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
379f6698 983 ((offset >> i) & 0xff) | rot);
c8d80cef 984 addr_reg = TCG_REG_R8;
379f6698
PB
985 offset &= ~(0xff << i);
986 }
987 }
811d4cf4
AZ
988 switch (opc) {
989 case 0:
990 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
991 break;
992 case 0 | 4:
993 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
994 break;
995 case 1:
996 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
997 break;
998 case 1 | 4:
999 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1000 break;
1001 case 2:
1002 default:
1003 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1004 break;
1005 case 3:
eae6ce52
AZ
1006 /* TODO: use block load -
1007 * check that data_reg2 > data_reg or the other way */
419bafa5
AJ
1008 if (data_reg == addr_reg) {
1009 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1010 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1011 } else {
1012 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1013 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1014 }
811d4cf4
AZ
1015 break;
1016 }
1017#endif
1018}
1019
1020static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1021 const TCGArg *args, int opc)
1022{
1023 int addr_reg, data_reg, data_reg2;
1024#ifdef CONFIG_SOFTMMU
1025 int mem_index, s_bits;
1026# if TARGET_LONG_BITS == 64
1027 int addr_reg2;
1028# endif
811d4cf4 1029 uint32_t *label_ptr;
811d4cf4
AZ
1030#endif
1031
1032 data_reg = *args++;
1033 if (opc == 3)
1034 data_reg2 = *args++;
1035 else
d89c682f 1036 data_reg2 = 0; /* suppress warning */
811d4cf4 1037 addr_reg = *args++;
811d4cf4 1038#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1039# if TARGET_LONG_BITS == 64
1040 addr_reg2 = *args++;
1041# endif
811d4cf4
AZ
1042 mem_index = *args;
1043 s_bits = opc & 3;
1044
91a3c1b0 1045 /* Should generate something like the following:
3979144c 1046 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1047 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1048 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0 1049 */
811d4cf4 1050 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
c8d80cef 1051 TCG_REG_R8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4 1052 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
c8d80cef
AJ
1053 TCG_REG_R0, TCG_REG_R8, CPU_TLB_SIZE - 1);
1054 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R0,
1055 TCG_AREG0, TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
1056 /* In the
1057 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1058 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1059 * not exceed otherwise, so use an
1060 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1061 * before.
1062 */
225b4376 1063 if (mem_index)
c8d80cef 1064 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R0, TCG_REG_R0,
225b4376
AZ
1065 (mem_index << (TLB_SHIFT & 1)) |
1066 ((16 - (TLB_SHIFT >> 1)) << 8));
c8d80cef 1067 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R0,
225b4376 1068 offsetof(CPUState, tlb_table[0][0].addr_write));
c8d80cef
AJ
1069 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R1,
1070 TCG_REG_R8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1071 /* Check alignment. */
1072 if (s_bits)
1073 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1074 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
1075# if TARGET_LONG_BITS == 64
1076 /* XXX: possibly we could use a block data load or writeback in
1077 * the first access. */
c8d80cef
AJ
1078 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
1079 offsetof(CPUState, tlb_table[0][0].addr_write) + 4);
1080 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1081 TCG_REG_R1, addr_reg2, SHIFT_IMM_LSL(0));
811d4cf4 1082# endif
c8d80cef 1083 tcg_out_ld32_12(s, COND_EQ, TCG_REG_R1, TCG_REG_R0,
225b4376 1084 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
1085
1086 switch (opc) {
1087 case 0:
c8d80cef 1088 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4 1089 break;
811d4cf4 1090 case 1:
c8d80cef 1091 tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1092 break;
1093 case 2:
1094 default:
c8d80cef 1095 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, TCG_REG_R1);
811d4cf4
AZ
1096 break;
1097 case 3:
c8d80cef
AJ
1098 tcg_out_st32_rwb(s, COND_EQ, data_reg, TCG_REG_R1, addr_reg);
1099 tcg_out_st32_12(s, COND_EQ, data_reg2, TCG_REG_R1, 4);
811d4cf4
AZ
1100 break;
1101 }
1102
1103 label_ptr = (void *) s->code_ptr;
1104 tcg_out_b(s, COND_EQ, 8);
811d4cf4 1105
811d4cf4 1106 /* TODO: move this code to where the constants pool will be */
c8d80cef 1107 if (addr_reg != TCG_REG_R0) {
811d4cf4 1108 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1109 TCG_REG_R0, 0, addr_reg, SHIFT_IMM_LSL(0));
1110 }
811d4cf4
AZ
1111# if TARGET_LONG_BITS == 32
1112 switch (opc) {
1113 case 0:
c8d80cef
AJ
1114 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R1, data_reg, 0xff);
1115 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
811d4cf4
AZ
1116 break;
1117 case 1:
1118 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 1119 TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(16));
811d4cf4 1120 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1121 TCG_REG_R1, 0, TCG_REG_R1, SHIFT_IMM_LSR(16));
1122 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
811d4cf4
AZ
1123 break;
1124 case 2:
c8d80cef 1125 if (data_reg != TCG_REG_R1) {
811d4cf4 1126 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1127 TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1128 }
1129 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R2, 0, mem_index);
811d4cf4
AZ
1130 break;
1131 case 3:
c8d80cef 1132 if (data_reg != TCG_REG_R1) {
811d4cf4 1133 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1134 TCG_REG_R1, 0, data_reg, SHIFT_IMM_LSL(0));
1135 }
1136 if (data_reg2 != TCG_REG_R2) {
811d4cf4 1137 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1138 TCG_REG_R2, 0, data_reg2, SHIFT_IMM_LSL(0));
1139 }
1140 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
811d4cf4
AZ
1141 break;
1142 }
1143# else
c8d80cef 1144 if (addr_reg2 != TCG_REG_R1) {
811d4cf4 1145 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1146 TCG_REG_R1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1147 }
811d4cf4
AZ
1148 switch (opc) {
1149 case 0:
c8d80cef
AJ
1150 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_R2, data_reg, 0xff);
1151 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
811d4cf4
AZ
1152 break;
1153 case 1:
1154 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef 1155 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(16));
811d4cf4 1156 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1157 TCG_REG_R2, 0, TCG_REG_R2, SHIFT_IMM_LSR(16));
1158 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
811d4cf4
AZ
1159 break;
1160 case 2:
c8d80cef 1161 if (data_reg != TCG_REG_R2) {
811d4cf4 1162 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1163 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1164 }
1165 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R3, 0, mem_index);
811d4cf4
AZ
1166 break;
1167 case 3:
c8d80cef 1168 tcg_out_dat_imm(s, cond, ARITH_MOV, TCG_REG_R8, 0, mem_index);
91a3c1b0 1169 tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
c8d80cef 1170 if (data_reg != TCG_REG_R2) {
811d4cf4 1171 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1172 TCG_REG_R2, 0, data_reg, SHIFT_IMM_LSL(0));
1173 }
1174 if (data_reg2 != TCG_REG_R3) {
811d4cf4 1175 tcg_out_dat_reg(s, cond, ARITH_MOV,
c8d80cef
AJ
1176 TCG_REG_R3, 0, data_reg2, SHIFT_IMM_LSL(0));
1177 }
811d4cf4
AZ
1178 break;
1179 }
1180# endif
1181
204c1674 1182 tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
811d4cf4 1183 (tcg_target_long) s->code_ptr);
811d4cf4
AZ
1184# if TARGET_LONG_BITS == 64
1185 if (opc == 3)
c8d80cef 1186 tcg_out_dat_imm(s, cond, ARITH_ADD, TCG_REG_R13, TCG_REG_R13, 0x10);
811d4cf4
AZ
1187# endif
1188
811d4cf4 1189 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
1190#else /* !CONFIG_SOFTMMU */
1191 if (GUEST_BASE) {
1192 uint32_t offset = GUEST_BASE;
1193 int i;
1194 int rot;
1195
1196 while (offset) {
1197 i = ctz32(offset) & ~1;
1198 rot = ((32 - i) << 7) & 0xf00;
1199
c8d80cef 1200 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R8, addr_reg,
379f6698 1201 ((offset >> i) & 0xff) | rot);
c8d80cef 1202 addr_reg = TCG_REG_R8;
379f6698
PB
1203 offset &= ~(0xff << i);
1204 }
1205 }
811d4cf4
AZ
1206 switch (opc) {
1207 case 0:
1208 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1209 break;
811d4cf4 1210 case 1:
f694a27e 1211 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
811d4cf4
AZ
1212 break;
1213 case 2:
1214 default:
1215 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1216 break;
1217 case 3:
eae6ce52
AZ
1218 /* TODO: use block store -
1219 * check that data_reg2 > data_reg or the other way */
811d4cf4
AZ
1220 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1221 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1222 break;
1223 }
1224#endif
1225}
1226
811d4cf4
AZ
1227static uint8_t *tb_ret_addr;
1228
a9751609 1229static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
811d4cf4
AZ
1230 const TCGArg *args, const int *const_args)
1231{
1232 int c;
1233
1234 switch (opc) {
1235 case INDEX_op_exit_tb:
fe33867b
AZ
1236 {
1237 uint8_t *ld_ptr = s->code_ptr;
1238 if (args[0] >> 8)
c8d80cef 1239 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
fe33867b 1240 else
c8d80cef 1241 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
fe33867b
AZ
1242 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1243 if (args[0] >> 8) {
1244 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1245 tcg_out32(s, args[0]);
1246 }
1247 }
811d4cf4
AZ
1248 break;
1249 case INDEX_op_goto_tb:
1250 if (s->tb_jmp_offset) {
1251 /* Direct jump method */
fe33867b 1252#if defined(USE_DIRECT_JUMP)
811d4cf4
AZ
1253 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1254 tcg_out_b(s, COND_AL, 8);
1255#else
c8d80cef 1256 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
811d4cf4
AZ
1257 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1258 tcg_out32(s, 0);
1259#endif
1260 } else {
1261 /* Indirect jump method */
1262#if 1
1263 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1264 if (c > 0xfff || c < -0xfff) {
1265 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1266 (tcg_target_long) (s->tb_next + args[0]));
c8d80cef 1267 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4 1268 } else
c8d80cef 1269 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
811d4cf4 1270#else
c8d80cef
AJ
1271 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1272 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
811d4cf4
AZ
1273 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1274#endif
1275 }
1276 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1277 break;
1278 case INDEX_op_call:
1279 if (const_args[0])
1280 tcg_out_call(s, COND_AL, args[0]);
1281 else
1282 tcg_out_callr(s, COND_AL, args[0]);
1283 break;
1284 case INDEX_op_jmp:
1285 if (const_args[0])
1286 tcg_out_goto(s, COND_AL, args[0]);
1287 else
1288 tcg_out_bx(s, COND_AL, args[0]);
1289 break;
1290 case INDEX_op_br:
1291 tcg_out_goto_label(s, COND_AL, args[0]);
1292 break;
1293
1294 case INDEX_op_ld8u_i32:
1295 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1296 break;
1297 case INDEX_op_ld8s_i32:
1298 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1299 break;
1300 case INDEX_op_ld16u_i32:
1301 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1302 break;
1303 case INDEX_op_ld16s_i32:
1304 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1305 break;
1306 case INDEX_op_ld_i32:
1307 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1308 break;
1309 case INDEX_op_st8_i32:
f694a27e 1310 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1311 break;
1312 case INDEX_op_st16_i32:
f694a27e 1313 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
811d4cf4
AZ
1314 break;
1315 case INDEX_op_st_i32:
1316 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1317 break;
1318
1319 case INDEX_op_mov_i32:
1320 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1321 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1322 break;
1323 case INDEX_op_movi_i32:
1324 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1325 break;
1326 case INDEX_op_add_i32:
1327 c = ARITH_ADD;
1328 goto gen_arith;
1329 case INDEX_op_sub_i32:
1330 c = ARITH_SUB;
1331 goto gen_arith;
1332 case INDEX_op_and_i32:
1333 c = ARITH_AND;
1334 goto gen_arith;
932234f6
AJ
1335 case INDEX_op_andc_i32:
1336 c = ARITH_BIC;
1337 goto gen_arith;
811d4cf4
AZ
1338 case INDEX_op_or_i32:
1339 c = ARITH_ORR;
1340 goto gen_arith;
1341 case INDEX_op_xor_i32:
1342 c = ARITH_EOR;
1343 /* Fall through. */
1344 gen_arith:
94953e6d
LD
1345 if (const_args[2]) {
1346 int rot;
1347 rot = encode_imm(args[2]);
cb4e581f 1348 tcg_out_dat_imm(s, COND_AL, c,
94953e6d
LD
1349 args[0], args[1], rotl(args[2], rot) | (rot << 7));
1350 } else
cb4e581f
LD
1351 tcg_out_dat_reg(s, COND_AL, c,
1352 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
811d4cf4
AZ
1353 break;
1354 case INDEX_op_add2_i32:
1355 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1356 args[0], args[1], args[2], args[3],
1357 args[4], args[5], SHIFT_IMM_LSL(0));
1358 break;
1359 case INDEX_op_sub2_i32:
1360 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1361 args[0], args[1], args[2], args[3],
1362 args[4], args[5], SHIFT_IMM_LSL(0));
1363 break;
650bbb36
AZ
1364 case INDEX_op_neg_i32:
1365 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1366 break;
f878d2d2
LD
1367 case INDEX_op_not_i32:
1368 tcg_out_dat_reg(s, COND_AL,
1369 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1370 break;
811d4cf4
AZ
1371 case INDEX_op_mul_i32:
1372 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1373 break;
1374 case INDEX_op_mulu2_i32:
1375 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1376 break;
811d4cf4
AZ
1377 /* XXX: Perhaps args[2] & 0x1f is wrong */
1378 case INDEX_op_shl_i32:
1379 c = const_args[2] ?
1380 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1381 goto gen_shift32;
1382 case INDEX_op_shr_i32:
1383 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1384 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1385 goto gen_shift32;
1386 case INDEX_op_sar_i32:
1387 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1388 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1389 /* Fall through. */
1390 gen_shift32:
1391 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1392 break;
1393
1394 case INDEX_op_brcond_i32:
023e77f8
AJ
1395 if (const_args[1]) {
1396 int rot;
1397 rot = encode_imm(args[1]);
c8d80cef
AJ
1398 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1399 args[0], rotl(args[1], rot) | (rot << 7));
023e77f8
AJ
1400 } else {
1401 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1402 args[0], args[1], SHIFT_IMM_LSL(0));
1403 }
811d4cf4
AZ
1404 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1405 break;
1406 case INDEX_op_brcond2_i32:
1407 /* The resulting conditions are:
1408 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1409 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1410 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1411 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1412 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1413 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1414 */
1415 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1416 args[1], args[3], SHIFT_IMM_LSL(0));
1417 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1418 args[0], args[2], SHIFT_IMM_LSL(0));
1419 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1420 break;
f72a6cd7 1421 case INDEX_op_setcond_i32:
023e77f8
AJ
1422 if (const_args[2]) {
1423 int rot;
1424 rot = encode_imm(args[2]);
c8d80cef
AJ
1425 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0,
1426 args[1], rotl(args[2], rot) | (rot << 7));
023e77f8
AJ
1427 } else {
1428 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1429 args[1], args[2], SHIFT_IMM_LSL(0));
1430 }
f72a6cd7
AJ
1431 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1432 ARITH_MOV, args[0], 0, 1);
1433 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1434 ARITH_MOV, args[0], 0, 0);
1435 break;
e0404769
AJ
1436 case INDEX_op_setcond2_i32:
1437 /* See brcond2_i32 comment */
1438 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1439 args[2], args[4], SHIFT_IMM_LSL(0));
1440 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1441 args[1], args[3], SHIFT_IMM_LSL(0));
1442 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1443 ARITH_MOV, args[0], 0, 1);
1444 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1445 ARITH_MOV, args[0], 0, 0);
b525f0a9 1446 break;
811d4cf4
AZ
1447
1448 case INDEX_op_qemu_ld8u:
1449 tcg_out_qemu_ld(s, COND_AL, args, 0);
1450 break;
1451 case INDEX_op_qemu_ld8s:
1452 tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1453 break;
1454 case INDEX_op_qemu_ld16u:
1455 tcg_out_qemu_ld(s, COND_AL, args, 1);
1456 break;
1457 case INDEX_op_qemu_ld16s:
1458 tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1459 break;
86feb1c8 1460 case INDEX_op_qemu_ld32:
811d4cf4
AZ
1461 tcg_out_qemu_ld(s, COND_AL, args, 2);
1462 break;
1463 case INDEX_op_qemu_ld64:
1464 tcg_out_qemu_ld(s, COND_AL, args, 3);
1465 break;
650bbb36 1466
811d4cf4
AZ
1467 case INDEX_op_qemu_st8:
1468 tcg_out_qemu_st(s, COND_AL, args, 0);
1469 break;
1470 case INDEX_op_qemu_st16:
1471 tcg_out_qemu_st(s, COND_AL, args, 1);
1472 break;
1473 case INDEX_op_qemu_st32:
1474 tcg_out_qemu_st(s, COND_AL, args, 2);
1475 break;
1476 case INDEX_op_qemu_st64:
1477 tcg_out_qemu_st(s, COND_AL, args, 3);
1478 break;
1479
1480 case INDEX_op_ext8s_i32:
8f7f749f 1481 if (use_armv6_instructions) {
ac34fb5c
AJ
1482 /* sxtb */
1483 tcg_out32(s, 0xe6af0070 | (args[0] << 12) | args[1]);
1484 } else {
1485 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1486 args[0], 0, args[1], SHIFT_IMM_LSL(24));
1487 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1488 args[0], 0, args[0], SHIFT_IMM_ASR(24));
1489 }
811d4cf4
AZ
1490 break;
1491 case INDEX_op_ext16s_i32:
8f7f749f 1492 if (use_armv6_instructions) {
ac34fb5c
AJ
1493 /* sxth */
1494 tcg_out32(s, 0xe6bf0070 | (args[0] << 12) | args[1]);
1495 } else {
1496 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1497 args[0], 0, args[1], SHIFT_IMM_LSL(16));
1498 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1499 args[0], 0, args[0], SHIFT_IMM_ASR(16));
1500 }
811d4cf4
AZ
1501 break;
1502
1503 default:
1504 tcg_abort();
1505 }
1506}
1507
1508static const TCGTargetOpDef arm_op_defs[] = {
1509 { INDEX_op_exit_tb, { } },
1510 { INDEX_op_goto_tb, { } },
1511 { INDEX_op_call, { "ri" } },
1512 { INDEX_op_jmp, { "ri" } },
1513 { INDEX_op_br, { } },
1514
1515 { INDEX_op_mov_i32, { "r", "r" } },
1516 { INDEX_op_movi_i32, { "r" } },
1517
1518 { INDEX_op_ld8u_i32, { "r", "r" } },
1519 { INDEX_op_ld8s_i32, { "r", "r" } },
1520 { INDEX_op_ld16u_i32, { "r", "r" } },
1521 { INDEX_op_ld16s_i32, { "r", "r" } },
1522 { INDEX_op_ld_i32, { "r", "r" } },
1523 { INDEX_op_st8_i32, { "r", "r" } },
1524 { INDEX_op_st16_i32, { "r", "r" } },
1525 { INDEX_op_st_i32, { "r", "r" } },
1526
1527 /* TODO: "r", "r", "ri" */
cb4e581f
LD
1528 { INDEX_op_add_i32, { "r", "r", "rI" } },
1529 { INDEX_op_sub_i32, { "r", "r", "rI" } },
811d4cf4
AZ
1530 { INDEX_op_mul_i32, { "r", "r", "r" } },
1531 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
cb4e581f 1532 { INDEX_op_and_i32, { "r", "r", "rI" } },
932234f6 1533 { INDEX_op_andc_i32, { "r", "r", "rI" } },
cb4e581f
LD
1534 { INDEX_op_or_i32, { "r", "r", "rI" } },
1535 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1536 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1537 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1538
1539 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1540 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1541 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1542
023e77f8
AJ
1543 { INDEX_op_brcond_i32, { "r", "rI" } },
1544 { INDEX_op_setcond_i32, { "r", "r", "rI" } },
811d4cf4
AZ
1545
1546 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1547 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1548 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1549 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
e0404769 1550 { INDEX_op_setcond2_i32, { "r", "r", "r", "r", "r" } },
811d4cf4 1551
26c5d372
AJ
1552#if TARGET_LONG_BITS == 32
1553 { INDEX_op_qemu_ld8u, { "r", "x" } },
1554 { INDEX_op_qemu_ld8s, { "r", "x" } },
1555 { INDEX_op_qemu_ld16u, { "r", "x" } },
1556 { INDEX_op_qemu_ld16s, { "r", "x" } },
1584c845 1557 { INDEX_op_qemu_ld32, { "r", "x" } },
26c5d372
AJ
1558 { INDEX_op_qemu_ld64, { "d", "r", "x" } },
1559
1560 { INDEX_op_qemu_st8, { "x", "x" } },
1561 { INDEX_op_qemu_st16, { "x", "x" } },
1562 { INDEX_op_qemu_st32, { "x", "x" } },
1563 { INDEX_op_qemu_st64, { "x", "D", "x" } },
1564#else
811d4cf4
AZ
1565 { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1566 { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1567 { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1568 { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
86feb1c8 1569 { INDEX_op_qemu_ld32, { "r", "x", "X" } },
d0660ed4 1570 { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
811d4cf4 1571
3979144c
PB
1572 { INDEX_op_qemu_st8, { "x", "x", "X" } },
1573 { INDEX_op_qemu_st16, { "x", "x", "X" } },
1574 { INDEX_op_qemu_st32, { "x", "x", "X" } },
1575 { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
26c5d372 1576#endif
811d4cf4
AZ
1577
1578 { INDEX_op_ext8s_i32, { "r", "r" } },
1579 { INDEX_op_ext16s_i32, { "r", "r" } },
1580
1581 { -1 },
1582};
1583
1584void tcg_target_init(TCGContext *s)
1585{
20cb400d 1586#if !defined(CONFIG_USER_ONLY)
811d4cf4
AZ
1587 /* fail safe */
1588 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1589 tcg_abort();
20cb400d 1590#endif
811d4cf4 1591
e4a7d5e8 1592 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
811d4cf4 1593 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
e4a7d5e8
AJ
1594 (1 << TCG_REG_R0) |
1595 (1 << TCG_REG_R1) |
1596 (1 << TCG_REG_R2) |
1597 (1 << TCG_REG_R3) |
1598 (1 << TCG_REG_R12) |
1599 (1 << TCG_REG_R14));
811d4cf4
AZ
1600
1601 tcg_regset_clear(s->reserved_regs);
811d4cf4
AZ
1602 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1603 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
e4a7d5e8 1604 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
811d4cf4
AZ
1605
1606 tcg_add_target_add_op_defs(arm_op_defs);
1607}
1608
1609static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1610 int arg1, tcg_target_long arg2)
1611{
1612 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1613}
1614
1615static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1616 int arg1, tcg_target_long arg2)
1617{
1618 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1619}
1620
2d69f359 1621static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
811d4cf4
AZ
1622{
1623 if (val > 0)
1624 if (val < 0x100)
1625 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1626 else
1627 tcg_abort();
1628 else if (val < 0) {
1629 if (val > -0x100)
1630 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1631 else
1632 tcg_abort();
1633 }
1634}
1635
1636static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1637{
1638 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1639}
1640
1641static inline void tcg_out_movi(TCGContext *s, TCGType type,
1642 int ret, tcg_target_long arg)
1643{
1644 tcg_out_movi32(s, COND_AL, ret, arg);
1645}
1646
1647void tcg_target_qemu_prologue(TCGContext *s)
1648{
9e97d8e9
AJ
1649 /* There is no need to save r7, it is used to store the address
1650 of the env structure and is not modified by GCC. */
4e17eae9 1651
9e97d8e9
AJ
1652 /* stmdb sp!, { r4 - r6, r8 - r11, lr } */
1653 tcg_out32(s, (COND_AL << 28) | 0x092d4f70);
811d4cf4
AZ
1654
1655 tcg_out_bx(s, COND_AL, TCG_REG_R0);
1656 tb_ret_addr = s->code_ptr;
1657
9e97d8e9
AJ
1658 /* ldmia sp!, { r4 - r6, r8 - r11, pc } */
1659 tcg_out32(s, (COND_AL << 28) | 0x08bd8f70);
811d4cf4 1660}