]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/arm/tcg-target.c
Make the e1000 the default network adapter for the pc target.
[mirror_qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f
BS
24
25#ifndef NDEBUG
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
27 "%r0",
28 "%r1",
29 "%r2",
30 "%r3",
31 "%r4",
32 "%r5",
33 "%r6",
34 "%r7",
35 "%r8",
36 "%r9",
37 "%r10",
38 "%r11",
39 "%r12",
40 "%r13",
41 "%r14",
42};
d4a9eb1f 43#endif
811d4cf4 44
d4a9eb1f 45static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
46 TCG_REG_R0,
47 TCG_REG_R1,
48 TCG_REG_R2,
49 TCG_REG_R3,
50 TCG_REG_R4,
51 TCG_REG_R5,
52 TCG_REG_R6,
53 TCG_REG_R7,
54 TCG_REG_R8,
55 TCG_REG_R9,
56 TCG_REG_R10,
57 TCG_REG_R11,
58 TCG_REG_R12,
59 TCG_REG_R13,
60 TCG_REG_R14,
61};
62
d4a9eb1f 63static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
64 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
65};
d4a9eb1f 66static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
67 TCG_REG_R0, TCG_REG_R1
68};
69
650bbb36 70static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
71 tcg_target_long value, tcg_target_long addend)
72{
73 switch (type) {
74 case R_ARM_ABS32:
75 *(uint32_t *) code_ptr = value;
76 break;
77
78 case R_ARM_CALL:
79 case R_ARM_JUMP24:
80 default:
81 tcg_abort();
82
83 case R_ARM_PC24:
eae6ce52 84 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
e936243a 85 (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
811d4cf4
AZ
86 break;
87 }
88}
89
90/* maximum number of register used for input function arguments */
91static inline int tcg_target_get_call_iarg_regs_count(int flags)
92{
93 return 4;
94}
95
811d4cf4 96/* parse target specific constraints */
d4a9eb1f 97static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
98{
99 const char *ct_str;
100
101 ct_str = *pct_str;
102 switch (ct_str[0]) {
cb4e581f
LD
103 case 'I':
104 ct->ct |= TCG_CT_CONST_ARM;
105 break;
106
811d4cf4
AZ
107 case 'r':
108#ifndef CONFIG_SOFTMMU
109 case 'd':
110 case 'D':
111 case 'x':
112 case 'X':
113#endif
114 ct->ct |= TCG_CT_REG;
115 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
116 break;
117
118#ifdef CONFIG_SOFTMMU
d0660ed4 119 /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
811d4cf4
AZ
120 case 'x':
121 ct->ct |= TCG_CT_REG;
122 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
811d4cf4
AZ
123 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
124 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
125 break;
126
d0660ed4
AZ
127 /* qemu_ld64 data_reg */
128 case 'd':
129 ct->ct |= TCG_CT_REG;
130 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
131 /* r1 is still needed to load data_reg2, so don't use it. */
132 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
133 break;
134
811d4cf4
AZ
135 /* qemu_ld/st64 data_reg2 */
136 case 'D':
137 ct->ct |= TCG_CT_REG;
138 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
139 /* r0, r1 and optionally r2 will be overwritten by the address
140 * and the low word of data, so don't use these. */
141 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
142 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
143# if TARGET_LONG_BITS == 64
144 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
145# endif
146 break;
147
148# if TARGET_LONG_BITS == 64
149 /* qemu_ld/st addr_reg2 */
150 case 'X':
151 ct->ct |= TCG_CT_REG;
152 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
153 /* r0 will be overwritten by the low word of base, so don't use it. */
154 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 155 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
156 break;
157# endif
158#endif
159
160 case '1':
161 ct->ct |= TCG_CT_REG;
162 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
163 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
164 break;
165
166 case '2':
167 ct->ct |= TCG_CT_REG;
168 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
169 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
171 break;
172
173 default:
174 return -1;
175 }
176 ct_str++;
177 *pct_str = ct_str;
178
179 return 0;
180}
181
94953e6d
LD
182static inline uint32_t rotl(uint32_t val, int n)
183{
184 return (val << n) | (val >> (32 - n));
185}
186
187/* ARM immediates for ALU instructions are made of an unsigned 8-bit
188 right-rotated by an even amount between 0 and 30. */
189static inline int encode_imm(uint32_t imm)
190{
191 /* simple case, only lower bits */
192 if ((imm & ~0xff) == 0)
193 return 0;
194 /* then try a simple even shift */
195 shift = ctz32(imm) & ~1;
196 if (((imm >> shift) & ~0xff) == 0)
197 return 32 - shift;
198 /* now try harder with rotations */
199 if ((rotl(imm, 2) & ~0xff) == 0)
200 return 2;
201 if ((rotl(imm, 4) & ~0xff) == 0)
202 return 4;
203 if ((rotl(imm, 6) & ~0xff) == 0)
204 return 6;
205 /* imm can't be encoded */
206 return -1;
207}
cb4e581f
LD
208
209static inline int check_fit_imm(uint32_t imm)
210{
94953e6d 211 return encode_imm(imm) >= 0;
cb4e581f
LD
212}
213
811d4cf4
AZ
214/* Test if a constant matches the constraint.
215 * TODO: define constraints for:
216 *
217 * ldr/str offset: between -0xfff and 0xfff
218 * ldrh/strh offset: between -0xff and 0xff
219 * mov operand2: values represented with x << (2 * y), x < 0x100
220 * add, sub, eor...: ditto
221 */
222static inline int tcg_target_const_match(tcg_target_long val,
223 const TCGArgConstraint *arg_ct)
224{
225 int ct;
226 ct = arg_ct->ct;
227 if (ct & TCG_CT_CONST)
228 return 1;
cb4e581f
LD
229 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
230 return 1;
811d4cf4
AZ
231 else
232 return 0;
233}
234
235enum arm_data_opc_e {
236 ARITH_AND = 0x0,
237 ARITH_EOR = 0x1,
238 ARITH_SUB = 0x2,
239 ARITH_RSB = 0x3,
240 ARITH_ADD = 0x4,
241 ARITH_ADC = 0x5,
242 ARITH_SBC = 0x6,
243 ARITH_RSC = 0x7,
3979144c 244 ARITH_TST = 0x8,
811d4cf4
AZ
245 ARITH_CMP = 0xa,
246 ARITH_CMN = 0xb,
247 ARITH_ORR = 0xc,
248 ARITH_MOV = 0xd,
249 ARITH_BIC = 0xe,
250 ARITH_MVN = 0xf,
251};
252
3979144c
PB
253#define TO_CPSR(opc) \
254 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
811d4cf4
AZ
255
256#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
257#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
258#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
259#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
260#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
261#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
262#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
263#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
264
265enum arm_cond_code_e {
266 COND_EQ = 0x0,
267 COND_NE = 0x1,
268 COND_CS = 0x2, /* Unsigned greater or equal */
269 COND_CC = 0x3, /* Unsigned less than */
270 COND_MI = 0x4, /* Negative */
271 COND_PL = 0x5, /* Zero or greater */
272 COND_VS = 0x6, /* Overflow */
273 COND_VC = 0x7, /* No overflow */
274 COND_HI = 0x8, /* Unsigned greater than */
275 COND_LS = 0x9, /* Unsigned less or equal */
276 COND_GE = 0xa,
277 COND_LT = 0xb,
278 COND_GT = 0xc,
279 COND_LE = 0xd,
280 COND_AL = 0xe,
281};
282
283static const uint8_t tcg_cond_to_arm_cond[10] = {
284 [TCG_COND_EQ] = COND_EQ,
285 [TCG_COND_NE] = COND_NE,
286 [TCG_COND_LT] = COND_LT,
287 [TCG_COND_GE] = COND_GE,
288 [TCG_COND_LE] = COND_LE,
289 [TCG_COND_GT] = COND_GT,
290 /* unsigned */
291 [TCG_COND_LTU] = COND_CC,
292 [TCG_COND_GEU] = COND_CS,
293 [TCG_COND_LEU] = COND_LS,
294 [TCG_COND_GTU] = COND_HI,
295};
296
297static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
298{
299 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
300}
301
302static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
303{
304 tcg_out32(s, (cond << 28) | 0x0a000000 |
305 (((offset - 8) >> 2) & 0x00ffffff));
306}
307
e936243a
AZ
308static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
309{
e2542fe2 310#ifdef HOST_WORDS_BIGENDIAN
e936243a
AZ
311 tcg_out8(s, (cond << 4) | 0x0a);
312 s->code_ptr += 3;
313#else
314 s->code_ptr += 3;
315 tcg_out8(s, (cond << 4) | 0x0a);
316#endif
317}
318
811d4cf4
AZ
319static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
320{
321 tcg_out32(s, (cond << 28) | 0x0b000000 |
322 (((offset - 8) >> 2) & 0x00ffffff));
323}
324
325static inline void tcg_out_dat_reg(TCGContext *s,
326 int cond, int opc, int rd, int rn, int rm, int shift)
327{
328 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
329 (rn << 16) | (rd << 12) | shift | rm);
330}
331
332static inline void tcg_out_dat_reg2(TCGContext *s,
333 int cond, int opc0, int opc1, int rd0, int rd1,
334 int rn0, int rn1, int rm0, int rm1, int shift)
335{
0c9c3a9e
AZ
336 if (rd0 == rn1 || rd0 == rm1) {
337 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
338 (rn0 << 16) | (8 << 12) | shift | rm0);
339 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
340 (rn1 << 16) | (rd1 << 12) | shift | rm1);
341 tcg_out_dat_reg(s, cond, ARITH_MOV,
342 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
343 } else {
344 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
345 (rn0 << 16) | (rd0 << 12) | shift | rm0);
346 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
347 (rn1 << 16) | (rd1 << 12) | shift | rm1);
348 }
811d4cf4
AZ
349}
350
351static inline void tcg_out_dat_imm(TCGContext *s,
352 int cond, int opc, int rd, int rn, int im)
353{
3979144c 354 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
811d4cf4
AZ
355 (rn << 16) | (rd << 12) | im);
356}
357
358static inline void tcg_out_movi32(TCGContext *s,
359 int cond, int rd, int32_t arg)
360{
361 int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
362
363 /* TODO: This is very suboptimal, we can easily have a constant
364 * pool somewhere after all the instructions. */
365
366 if (arg < 0 && arg > -0x100)
367 return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
368
369 if (offset < 0x100 && offset > -0x100)
370 return offset >= 0 ?
371 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
372 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
373
cb4e581f
LD
374#ifdef __ARM_ARCH_7A__
375 /* use movw/movt */
376 /* movw */
377 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
378 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
379 if (arg & 0xffff0000)
380 /* movt */
381 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
382 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
383#else
811d4cf4
AZ
384 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
385 if (arg & 0x0000ff00)
386 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
387 ((arg >> 8) & 0xff) | 0xc00);
388 if (arg & 0x00ff0000)
389 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
390 ((arg >> 16) & 0xff) | 0x800);
391 if (arg & 0xff000000)
392 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
393 ((arg >> 24) & 0xff) | 0x400);
cb4e581f 394#endif
811d4cf4
AZ
395}
396
397static inline void tcg_out_mul32(TCGContext *s,
398 int cond, int rd, int rs, int rm)
399{
400 if (rd != rm)
401 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
402 (rs << 8) | 0x90 | rm);
403 else if (rd != rs)
404 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
405 (rm << 8) | 0x90 | rs);
406 else {
407 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
408 (rs << 8) | 0x90 | rm);
409 tcg_out_dat_reg(s, cond, ARITH_MOV,
410 rd, 0, 8, SHIFT_IMM_LSL(0));
411 }
412}
413
414static inline void tcg_out_umull32(TCGContext *s,
415 int cond, int rd0, int rd1, int rs, int rm)
416{
417 if (rd0 != rm && rd1 != rm)
418 tcg_out32(s, (cond << 28) | 0x800090 |
419 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
420 else if (rd0 != rs && rd1 != rs)
421 tcg_out32(s, (cond << 28) | 0x800090 |
422 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
423 else {
424 tcg_out_dat_reg(s, cond, ARITH_MOV,
425 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
426 tcg_out32(s, (cond << 28) | 0x800098 |
427 (rd1 << 16) | (rd0 << 12) | (rs << 8));
428 }
429}
430
431static inline void tcg_out_smull32(TCGContext *s,
432 int cond, int rd0, int rd1, int rs, int rm)
433{
434 if (rd0 != rm && rd1 != rm)
435 tcg_out32(s, (cond << 28) | 0xc00090 |
436 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
437 else if (rd0 != rs && rd1 != rs)
438 tcg_out32(s, (cond << 28) | 0xc00090 |
439 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
440 else {
441 tcg_out_dat_reg(s, cond, ARITH_MOV,
442 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
443 tcg_out32(s, (cond << 28) | 0xc00098 |
444 (rd1 << 16) | (rd0 << 12) | (rs << 8));
445 }
446}
447
448static inline void tcg_out_ld32_12(TCGContext *s, int cond,
449 int rd, int rn, tcg_target_long im)
450{
451 if (im >= 0)
452 tcg_out32(s, (cond << 28) | 0x05900000 |
453 (rn << 16) | (rd << 12) | (im & 0xfff));
454 else
455 tcg_out32(s, (cond << 28) | 0x05100000 |
456 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
457}
458
459static inline void tcg_out_st32_12(TCGContext *s, int cond,
460 int rd, int rn, tcg_target_long im)
461{
462 if (im >= 0)
463 tcg_out32(s, (cond << 28) | 0x05800000 |
464 (rn << 16) | (rd << 12) | (im & 0xfff));
465 else
466 tcg_out32(s, (cond << 28) | 0x05000000 |
467 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
468}
469
470static inline void tcg_out_ld32_r(TCGContext *s, int cond,
471 int rd, int rn, int rm)
472{
473 tcg_out32(s, (cond << 28) | 0x07900000 |
474 (rn << 16) | (rd << 12) | rm);
475}
476
477static inline void tcg_out_st32_r(TCGContext *s, int cond,
478 int rd, int rn, int rm)
479{
480 tcg_out32(s, (cond << 28) | 0x07800000 |
481 (rn << 16) | (rd << 12) | rm);
482}
483
3979144c
PB
484/* Register pre-increment with base writeback. */
485static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
486 int rd, int rn, int rm)
487{
488 tcg_out32(s, (cond << 28) | 0x07b00000 |
489 (rn << 16) | (rd << 12) | rm);
490}
491
492static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
493 int rd, int rn, int rm)
494{
495 tcg_out32(s, (cond << 28) | 0x07a00000 |
496 (rn << 16) | (rd << 12) | rm);
497}
498
811d4cf4
AZ
499static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
500 int rd, int rn, tcg_target_long im)
501{
502 if (im >= 0)
503 tcg_out32(s, (cond << 28) | 0x01d000b0 |
504 (rn << 16) | (rd << 12) |
505 ((im & 0xf0) << 4) | (im & 0xf));
506 else
507 tcg_out32(s, (cond << 28) | 0x015000b0 |
508 (rn << 16) | (rd << 12) |
509 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
510}
511
512static inline void tcg_out_st16u_8(TCGContext *s, int cond,
513 int rd, int rn, tcg_target_long im)
514{
515 if (im >= 0)
516 tcg_out32(s, (cond << 28) | 0x01c000b0 |
517 (rn << 16) | (rd << 12) |
518 ((im & 0xf0) << 4) | (im & 0xf));
519 else
520 tcg_out32(s, (cond << 28) | 0x014000b0 |
521 (rn << 16) | (rd << 12) |
522 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
523}
524
525static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
526 int rd, int rn, int rm)
527{
528 tcg_out32(s, (cond << 28) | 0x019000b0 |
529 (rn << 16) | (rd << 12) | rm);
530}
531
532static inline void tcg_out_st16u_r(TCGContext *s, int cond,
533 int rd, int rn, int rm)
534{
535 tcg_out32(s, (cond << 28) | 0x018000b0 |
536 (rn << 16) | (rd << 12) | rm);
537}
538
539static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
540 int rd, int rn, tcg_target_long im)
541{
542 if (im >= 0)
543 tcg_out32(s, (cond << 28) | 0x01d000f0 |
544 (rn << 16) | (rd << 12) |
545 ((im & 0xf0) << 4) | (im & 0xf));
546 else
547 tcg_out32(s, (cond << 28) | 0x015000f0 |
548 (rn << 16) | (rd << 12) |
549 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
550}
551
552static inline void tcg_out_st16s_8(TCGContext *s, int cond,
553 int rd, int rn, tcg_target_long im)
554{
555 if (im >= 0)
556 tcg_out32(s, (cond << 28) | 0x01c000f0 |
557 (rn << 16) | (rd << 12) |
558 ((im & 0xf0) << 4) | (im & 0xf));
559 else
560 tcg_out32(s, (cond << 28) | 0x014000f0 |
561 (rn << 16) | (rd << 12) |
562 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
563}
564
565static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
566 int rd, int rn, int rm)
567{
568 tcg_out32(s, (cond << 28) | 0x019000f0 |
569 (rn << 16) | (rd << 12) | rm);
570}
571
572static inline void tcg_out_st16s_r(TCGContext *s, int cond,
573 int rd, int rn, int rm)
574{
575 tcg_out32(s, (cond << 28) | 0x018000f0 |
576 (rn << 16) | (rd << 12) | rm);
577}
578
579static inline void tcg_out_ld8_12(TCGContext *s, int cond,
580 int rd, int rn, tcg_target_long im)
581{
582 if (im >= 0)
583 tcg_out32(s, (cond << 28) | 0x05d00000 |
584 (rn << 16) | (rd << 12) | (im & 0xfff));
585 else
586 tcg_out32(s, (cond << 28) | 0x05500000 |
587 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
588}
589
590static inline void tcg_out_st8_12(TCGContext *s, int cond,
591 int rd, int rn, tcg_target_long im)
592{
593 if (im >= 0)
594 tcg_out32(s, (cond << 28) | 0x05c00000 |
595 (rn << 16) | (rd << 12) | (im & 0xfff));
596 else
597 tcg_out32(s, (cond << 28) | 0x05400000 |
598 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
599}
600
601static inline void tcg_out_ld8_r(TCGContext *s, int cond,
602 int rd, int rn, int rm)
603{
604 tcg_out32(s, (cond << 28) | 0x07d00000 |
605 (rn << 16) | (rd << 12) | rm);
606}
607
608static inline void tcg_out_st8_r(TCGContext *s, int cond,
609 int rd, int rn, int rm)
610{
611 tcg_out32(s, (cond << 28) | 0x07c00000 |
612 (rn << 16) | (rd << 12) | rm);
613}
614
615static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
616 int rd, int rn, tcg_target_long im)
617{
618 if (im >= 0)
619 tcg_out32(s, (cond << 28) | 0x01d000d0 |
620 (rn << 16) | (rd << 12) |
621 ((im & 0xf0) << 4) | (im & 0xf));
622 else
623 tcg_out32(s, (cond << 28) | 0x015000d0 |
624 (rn << 16) | (rd << 12) |
625 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
626}
627
628static inline void tcg_out_st8s_8(TCGContext *s, int cond,
629 int rd, int rn, tcg_target_long im)
630{
631 if (im >= 0)
632 tcg_out32(s, (cond << 28) | 0x01c000d0 |
633 (rn << 16) | (rd << 12) |
634 ((im & 0xf0) << 4) | (im & 0xf));
635 else
636 tcg_out32(s, (cond << 28) | 0x014000d0 |
637 (rn << 16) | (rd << 12) |
638 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
639}
640
641static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
642 int rd, int rn, int rm)
643{
204c1674 644 tcg_out32(s, (cond << 28) | 0x019000d0 |
811d4cf4
AZ
645 (rn << 16) | (rd << 12) | rm);
646}
647
648static inline void tcg_out_st8s_r(TCGContext *s, int cond,
649 int rd, int rn, int rm)
650{
204c1674 651 tcg_out32(s, (cond << 28) | 0x018000d0 |
811d4cf4
AZ
652 (rn << 16) | (rd << 12) | rm);
653}
654
655static inline void tcg_out_ld32u(TCGContext *s, int cond,
656 int rd, int rn, int32_t offset)
657{
658 if (offset > 0xfff || offset < -0xfff) {
659 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
660 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
661 } else
662 tcg_out_ld32_12(s, cond, rd, rn, offset);
663}
664
665static inline void tcg_out_st32(TCGContext *s, int cond,
666 int rd, int rn, int32_t offset)
667{
668 if (offset > 0xfff || offset < -0xfff) {
669 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
670 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
671 } else
672 tcg_out_st32_12(s, cond, rd, rn, offset);
673}
674
675static inline void tcg_out_ld16u(TCGContext *s, int cond,
676 int rd, int rn, int32_t offset)
677{
678 if (offset > 0xff || offset < -0xff) {
679 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
680 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
681 } else
682 tcg_out_ld16u_8(s, cond, rd, rn, offset);
683}
684
685static inline void tcg_out_ld16s(TCGContext *s, int cond,
686 int rd, int rn, int32_t offset)
687{
688 if (offset > 0xff || offset < -0xff) {
689 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
690 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
691 } else
692 tcg_out_ld16s_8(s, cond, rd, rn, offset);
693}
694
695static inline void tcg_out_st16u(TCGContext *s, int cond,
696 int rd, int rn, int32_t offset)
697{
698 if (offset > 0xff || offset < -0xff) {
699 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
700 tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
701 } else
702 tcg_out_st16u_8(s, cond, rd, rn, offset);
703}
704
705static inline void tcg_out_ld8u(TCGContext *s, int cond,
706 int rd, int rn, int32_t offset)
707{
708 if (offset > 0xfff || offset < -0xfff) {
709 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
710 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
711 } else
712 tcg_out_ld8_12(s, cond, rd, rn, offset);
713}
714
715static inline void tcg_out_ld8s(TCGContext *s, int cond,
716 int rd, int rn, int32_t offset)
717{
718 if (offset > 0xff || offset < -0xff) {
719 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
720 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
721 } else
722 tcg_out_ld8s_8(s, cond, rd, rn, offset);
723}
724
725static inline void tcg_out_st8u(TCGContext *s, int cond,
726 int rd, int rn, int32_t offset)
727{
728 if (offset > 0xfff || offset < -0xfff) {
729 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
730 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
731 } else
732 tcg_out_st8_12(s, cond, rd, rn, offset);
733}
734
735static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
736{
737 int32_t val;
738
739 val = addr - (tcg_target_long) s->code_ptr;
740 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
741 tcg_out_b(s, cond, val);
742 else {
743#if 1
744 tcg_abort();
745#else
746 if (cond == COND_AL) {
747 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
748 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
749 } else {
750 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
751 tcg_out_dat_reg(s, cond, ARITH_ADD,
752 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
753 }
754#endif
755 }
756}
757
758static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
759{
760 int32_t val;
761
762#ifdef SAVE_LR
763 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
764#endif
765
766 val = addr - (tcg_target_long) s->code_ptr;
767 if (val < 0x01fffffd && val > -0x01fffffd)
768 tcg_out_bl(s, cond, val);
769 else {
770#if 1
771 tcg_abort();
772#else
773 if (cond == COND_AL) {
774 tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
775 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
776 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
777 } else {
778 tcg_out_movi32(s, cond, TCG_REG_R9, addr);
779 tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
780 tcg_out_bx(s, cond, TCG_REG_R9);
781 }
782#endif
783 }
784
785#ifdef SAVE_LR
786 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
787#endif
788}
789
790static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
791{
792#ifdef SAVE_LR
793 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
794#endif
795 /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
796 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
797 tcg_out_bx(s, cond, arg);
798#ifdef SAVE_LR
799 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
800#endif
801}
802
803static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
804{
805 TCGLabel *l = &s->labels[label_index];
806
807 if (l->has_value)
808 tcg_out_goto(s, cond, l->u.value);
809 else if (cond == COND_AL) {
810 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
811 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
812 s->code_ptr += 4;
813 } else {
814 /* Probably this should be preferred even for COND_AL... */
815 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 816 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
817 }
818}
819
820static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
821 void *helper_div, void *helper_rem, int shift)
822{
823 int div_reg = args[0];
824 int rem_reg = args[1];
825
826 /* stmdb sp!, { r0 - r3, ip, lr } */
827 /* (Note that we need an even number of registers as per EABI) */
828 tcg_out32(s, (cond << 28) | 0x092d500f);
829
830 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
831 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
832 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
833 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
834
835 tcg_out_call(s, cond, (uint32_t) helper_div);
836 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
837
838 /* ldmia sp, { r0 - r3, fp, lr } */
839 tcg_out32(s, (cond << 28) | 0x089d500f);
840
841 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
842 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
843 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
844 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
845
846 tcg_out_call(s, cond, (uint32_t) helper_rem);
847
848 tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
849 tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
850
851 /* ldr r0, [sp], #4 */
852 if (rem_reg != 0 && div_reg != 0)
853 tcg_out32(s, (cond << 28) | 0x04bd0004);
854 /* ldr r1, [sp], #4 */
855 if (rem_reg != 1 && div_reg != 1)
856 tcg_out32(s, (cond << 28) | 0x04bd1004);
857 /* ldr r2, [sp], #4 */
858 if (rem_reg != 2 && div_reg != 2)
859 tcg_out32(s, (cond << 28) | 0x04bd2004);
860 /* ldr r3, [sp], #4 */
861 if (rem_reg != 3 && div_reg != 3)
862 tcg_out32(s, (cond << 28) | 0x04bd3004);
863 /* ldr ip, [sp], #4 */
864 if (rem_reg != 12 && div_reg != 12)
865 tcg_out32(s, (cond << 28) | 0x04bdc004);
866 /* ldr lr, [sp], #4 */
867 if (rem_reg != 14 && div_reg != 14)
868 tcg_out32(s, (cond << 28) | 0x04bde004);
869}
870
871#ifdef CONFIG_SOFTMMU
79383c9c
BS
872
873#include "../../softmmu_defs.h"
811d4cf4
AZ
874
875static void *qemu_ld_helpers[4] = {
876 __ldb_mmu,
877 __ldw_mmu,
878 __ldl_mmu,
879 __ldq_mmu,
880};
881
882static void *qemu_st_helpers[4] = {
883 __stb_mmu,
884 __stw_mmu,
885 __stl_mmu,
886 __stq_mmu,
887};
888#endif
889
3979144c
PB
890#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
891
811d4cf4
AZ
892static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
893 const TCGArg *args, int opc)
894{
895 int addr_reg, data_reg, data_reg2;
896#ifdef CONFIG_SOFTMMU
897 int mem_index, s_bits;
898# if TARGET_LONG_BITS == 64
899 int addr_reg2;
900# endif
811d4cf4 901 uint32_t *label_ptr;
811d4cf4
AZ
902#endif
903
904 data_reg = *args++;
905 if (opc == 3)
906 data_reg2 = *args++;
907 else
908 data_reg2 = 0; /* surpress warning */
909 addr_reg = *args++;
811d4cf4 910#ifdef CONFIG_SOFTMMU
aef3a282
AZ
911# if TARGET_LONG_BITS == 64
912 addr_reg2 = *args++;
913# endif
811d4cf4
AZ
914 mem_index = *args;
915 s_bits = opc & 3;
916
91a3c1b0 917 /* Should generate something like the following:
3979144c 918 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 919 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 920 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0
AZ
921 */
922# if CPU_TLB_BITS > 8
923# error
924# endif
811d4cf4 925 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
3979144c 926 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4
AZ
927 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
928 0, 8, CPU_TLB_SIZE - 1);
929 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
930 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
931 /* In the
932 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
933 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
934 * not exceed otherwise, so use an
935 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
936 * before.
937 */
225b4376
AZ
938 if (mem_index)
939 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
940 (mem_index << (TLB_SHIFT & 1)) |
941 ((16 - (TLB_SHIFT >> 1)) << 8));
811d4cf4 942 tcg_out_ld32_12(s, COND_AL, 1, 0,
225b4376 943 offsetof(CPUState, tlb_table[0][0].addr_read));
811d4cf4
AZ
944 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
945 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
946 /* Check alignment. */
947 if (s_bits)
948 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
949 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
950# if TARGET_LONG_BITS == 64
951 /* XXX: possibly we could use a block data load or writeback in
952 * the first access. */
953 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 954 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
811d4cf4
AZ
955 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
956 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
957# endif
958 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 959 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
960
961 switch (opc) {
962 case 0:
963 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
964 break;
965 case 0 | 4:
966 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
967 break;
968 case 1:
969 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
970 break;
971 case 1 | 4:
972 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
973 break;
974 case 2:
975 default:
976 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
977 break;
978 case 3:
3979144c 979 tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
811d4cf4
AZ
980 tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
981 break;
982 }
983
984 label_ptr = (void *) s->code_ptr;
985 tcg_out_b(s, COND_EQ, 8);
811d4cf4
AZ
986
987# ifdef SAVE_LR
988 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
989# endif
990
991 /* TODO: move this code to where the constants pool will be */
992 if (addr_reg)
993 tcg_out_dat_reg(s, cond, ARITH_MOV,
994 0, 0, addr_reg, SHIFT_IMM_LSL(0));
995# if TARGET_LONG_BITS == 32
996 tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
997# else
998 if (addr_reg2 != 1)
999 tcg_out_dat_reg(s, cond, ARITH_MOV,
1000 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1001 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1002# endif
650bbb36 1003 tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
811d4cf4
AZ
1004 (tcg_target_long) s->code_ptr);
1005
1006 switch (opc) {
1007 case 0 | 4:
1008 tcg_out_dat_reg(s, cond, ARITH_MOV,
1009 0, 0, 0, SHIFT_IMM_LSL(24));
1010 tcg_out_dat_reg(s, cond, ARITH_MOV,
1011 data_reg, 0, 0, SHIFT_IMM_ASR(24));
1012 break;
1013 case 1 | 4:
1014 tcg_out_dat_reg(s, cond, ARITH_MOV,
1015 0, 0, 0, SHIFT_IMM_LSL(16));
1016 tcg_out_dat_reg(s, cond, ARITH_MOV,
1017 data_reg, 0, 0, SHIFT_IMM_ASR(16));
1018 break;
1019 case 0:
1020 case 1:
1021 case 2:
1022 default:
1023 if (data_reg)
1024 tcg_out_dat_reg(s, cond, ARITH_MOV,
1025 data_reg, 0, 0, SHIFT_IMM_LSL(0));
1026 break;
1027 case 3:
d0660ed4
AZ
1028 if (data_reg != 0)
1029 tcg_out_dat_reg(s, cond, ARITH_MOV,
1030 data_reg, 0, 0, SHIFT_IMM_LSL(0));
811d4cf4
AZ
1031 if (data_reg2 != 1)
1032 tcg_out_dat_reg(s, cond, ARITH_MOV,
1033 data_reg2, 0, 1, SHIFT_IMM_LSL(0));
811d4cf4
AZ
1034 break;
1035 }
1036
1037# ifdef SAVE_LR
1038 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1039# endif
1040
811d4cf4 1041 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
1042#else /* !CONFIG_SOFTMMU */
1043 if (GUEST_BASE) {
1044 uint32_t offset = GUEST_BASE;
1045 int i;
1046 int rot;
1047
1048 while (offset) {
1049 i = ctz32(offset) & ~1;
1050 rot = ((32 - i) << 7) & 0xf00;
1051
1052 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1053 ((offset >> i) & 0xff) | rot);
1054 addr_reg = 8;
1055 offset &= ~(0xff << i);
1056 }
1057 }
811d4cf4
AZ
1058 switch (opc) {
1059 case 0:
1060 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1061 break;
1062 case 0 | 4:
1063 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1064 break;
1065 case 1:
1066 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1067 break;
1068 case 1 | 4:
1069 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1070 break;
1071 case 2:
1072 default:
1073 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1074 break;
1075 case 3:
eae6ce52
AZ
1076 /* TODO: use block load -
1077 * check that data_reg2 > data_reg or the other way */
419bafa5
AJ
1078 if (data_reg == addr_reg) {
1079 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1080 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1081 } else {
1082 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1083 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1084 }
811d4cf4
AZ
1085 break;
1086 }
1087#endif
1088}
1089
1090static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1091 const TCGArg *args, int opc)
1092{
1093 int addr_reg, data_reg, data_reg2;
1094#ifdef CONFIG_SOFTMMU
1095 int mem_index, s_bits;
1096# if TARGET_LONG_BITS == 64
1097 int addr_reg2;
1098# endif
811d4cf4 1099 uint32_t *label_ptr;
811d4cf4
AZ
1100#endif
1101
1102 data_reg = *args++;
1103 if (opc == 3)
1104 data_reg2 = *args++;
1105 else
1106 data_reg2 = 0; /* surpress warning */
1107 addr_reg = *args++;
811d4cf4 1108#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1109# if TARGET_LONG_BITS == 64
1110 addr_reg2 = *args++;
1111# endif
811d4cf4
AZ
1112 mem_index = *args;
1113 s_bits = opc & 3;
1114
91a3c1b0 1115 /* Should generate something like the following:
3979144c 1116 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1117 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1118 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0 1119 */
811d4cf4 1120 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
3979144c 1121 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4
AZ
1122 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1123 0, 8, CPU_TLB_SIZE - 1);
1124 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1125 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
1126 /* In the
1127 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1128 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1129 * not exceed otherwise, so use an
1130 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1131 * before.
1132 */
225b4376
AZ
1133 if (mem_index)
1134 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
1135 (mem_index << (TLB_SHIFT & 1)) |
1136 ((16 - (TLB_SHIFT >> 1)) << 8));
811d4cf4 1137 tcg_out_ld32_12(s, COND_AL, 1, 0,
225b4376 1138 offsetof(CPUState, tlb_table[0][0].addr_write));
811d4cf4
AZ
1139 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
1140 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1141 /* Check alignment. */
1142 if (s_bits)
1143 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1144 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
1145# if TARGET_LONG_BITS == 64
1146 /* XXX: possibly we could use a block data load or writeback in
1147 * the first access. */
1148 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 1149 offsetof(CPUState, tlb_table[0][0].addr_write)
811d4cf4
AZ
1150 + 4);
1151 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
1152 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
1153# endif
1154 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 1155 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
1156
1157 switch (opc) {
1158 case 0:
1159 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
1160 break;
1161 case 0 | 4:
1162 tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
1163 break;
1164 case 1:
1165 tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
1166 break;
1167 case 1 | 4:
1168 tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
1169 break;
1170 case 2:
1171 default:
1172 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
1173 break;
1174 case 3:
3979144c 1175 tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
811d4cf4
AZ
1176 tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
1177 break;
1178 }
1179
1180 label_ptr = (void *) s->code_ptr;
1181 tcg_out_b(s, COND_EQ, 8);
811d4cf4 1182
811d4cf4
AZ
1183 /* TODO: move this code to where the constants pool will be */
1184 if (addr_reg)
1185 tcg_out_dat_reg(s, cond, ARITH_MOV,
1186 0, 0, addr_reg, SHIFT_IMM_LSL(0));
1187# if TARGET_LONG_BITS == 32
1188 switch (opc) {
1189 case 0:
1190 tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
1191 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1192 break;
1193 case 1:
1194 tcg_out_dat_reg(s, cond, ARITH_MOV,
1195 1, 0, data_reg, SHIFT_IMM_LSL(16));
1196 tcg_out_dat_reg(s, cond, ARITH_MOV,
1197 1, 0, 1, SHIFT_IMM_LSR(16));
1198 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1199 break;
1200 case 2:
1201 if (data_reg != 1)
1202 tcg_out_dat_reg(s, cond, ARITH_MOV,
1203 1, 0, data_reg, SHIFT_IMM_LSL(0));
1204 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1205 break;
1206 case 3:
1207 if (data_reg != 1)
1208 tcg_out_dat_reg(s, cond, ARITH_MOV,
1209 1, 0, data_reg, SHIFT_IMM_LSL(0));
1210 if (data_reg2 != 2)
1211 tcg_out_dat_reg(s, cond, ARITH_MOV,
1212 2, 0, data_reg2, SHIFT_IMM_LSL(0));
1213 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1214 break;
1215 }
1216# else
1217 if (addr_reg2 != 1)
1218 tcg_out_dat_reg(s, cond, ARITH_MOV,
1219 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1220 switch (opc) {
1221 case 0:
1222 tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
1223 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1224 break;
1225 case 1:
1226 tcg_out_dat_reg(s, cond, ARITH_MOV,
1227 2, 0, data_reg, SHIFT_IMM_LSL(16));
1228 tcg_out_dat_reg(s, cond, ARITH_MOV,
1229 2, 0, 2, SHIFT_IMM_LSR(16));
1230 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1231 break;
1232 case 2:
1233 if (data_reg != 2)
1234 tcg_out_dat_reg(s, cond, ARITH_MOV,
1235 2, 0, data_reg, SHIFT_IMM_LSL(0));
1236 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1237 break;
1238 case 3:
91a3c1b0
AZ
1239 tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
1240 tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
811d4cf4
AZ
1241 if (data_reg != 2)
1242 tcg_out_dat_reg(s, cond, ARITH_MOV,
1243 2, 0, data_reg, SHIFT_IMM_LSL(0));
1244 if (data_reg2 != 3)
1245 tcg_out_dat_reg(s, cond, ARITH_MOV,
1246 3, 0, data_reg2, SHIFT_IMM_LSL(0));
1247 break;
1248 }
1249# endif
1250
91a3c1b0
AZ
1251# ifdef SAVE_LR
1252 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
1253# endif
1254
204c1674 1255 tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
811d4cf4 1256 (tcg_target_long) s->code_ptr);
811d4cf4
AZ
1257# if TARGET_LONG_BITS == 64
1258 if (opc == 3)
1259 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
1260# endif
1261
1262# ifdef SAVE_LR
1263 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1264# endif
1265
811d4cf4 1266 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
1267#else /* !CONFIG_SOFTMMU */
1268 if (GUEST_BASE) {
1269 uint32_t offset = GUEST_BASE;
1270 int i;
1271 int rot;
1272
1273 while (offset) {
1274 i = ctz32(offset) & ~1;
1275 rot = ((32 - i) << 7) & 0xf00;
1276
1277 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1278 ((offset >> i) & 0xff) | rot);
1279 addr_reg = 8;
1280 offset &= ~(0xff << i);
1281 }
1282 }
811d4cf4
AZ
1283 switch (opc) {
1284 case 0:
1285 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1286 break;
1287 case 0 | 4:
204c1674 1288 tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
811d4cf4
AZ
1289 break;
1290 case 1:
1291 tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
1292 break;
1293 case 1 | 4:
1294 tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
1295 break;
1296 case 2:
1297 default:
1298 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1299 break;
1300 case 3:
eae6ce52
AZ
1301 /* TODO: use block store -
1302 * check that data_reg2 > data_reg or the other way */
811d4cf4
AZ
1303 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1304 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1305 break;
1306 }
1307#endif
1308}
1309
811d4cf4
AZ
1310static uint8_t *tb_ret_addr;
1311
650bbb36 1312static inline void tcg_out_op(TCGContext *s, int opc,
811d4cf4
AZ
1313 const TCGArg *args, const int *const_args)
1314{
1315 int c;
1316
1317 switch (opc) {
1318 case INDEX_op_exit_tb:
1319#ifdef SAVE_LR
1320 if (args[0] >> 8)
1321 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1322 else
1323 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1324 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
1325 if (args[0] >> 8)
1326 tcg_out32(s, args[0]);
1327#else
fe33867b
AZ
1328 {
1329 uint8_t *ld_ptr = s->code_ptr;
1330 if (args[0] >> 8)
1331 tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
1332 else
1333 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
1334 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1335 if (args[0] >> 8) {
1336 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1337 tcg_out32(s, args[0]);
1338 }
1339 }
811d4cf4
AZ
1340#endif
1341 break;
1342 case INDEX_op_goto_tb:
1343 if (s->tb_jmp_offset) {
1344 /* Direct jump method */
fe33867b 1345#if defined(USE_DIRECT_JUMP)
811d4cf4
AZ
1346 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1347 tcg_out_b(s, COND_AL, 8);
1348#else
1349 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
1350 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1351 tcg_out32(s, 0);
1352#endif
1353 } else {
1354 /* Indirect jump method */
1355#if 1
1356 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1357 if (c > 0xfff || c < -0xfff) {
1358 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1359 (tcg_target_long) (s->tb_next + args[0]));
1360 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1361 } else
1362 tcg_out_ld32_12(s, COND_AL, 15, 15, c);
1363#else
1364 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1365 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1366 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1367#endif
1368 }
1369 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1370 break;
1371 case INDEX_op_call:
1372 if (const_args[0])
1373 tcg_out_call(s, COND_AL, args[0]);
1374 else
1375 tcg_out_callr(s, COND_AL, args[0]);
1376 break;
1377 case INDEX_op_jmp:
1378 if (const_args[0])
1379 tcg_out_goto(s, COND_AL, args[0]);
1380 else
1381 tcg_out_bx(s, COND_AL, args[0]);
1382 break;
1383 case INDEX_op_br:
1384 tcg_out_goto_label(s, COND_AL, args[0]);
1385 break;
1386
1387 case INDEX_op_ld8u_i32:
1388 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1389 break;
1390 case INDEX_op_ld8s_i32:
1391 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1392 break;
1393 case INDEX_op_ld16u_i32:
1394 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1395 break;
1396 case INDEX_op_ld16s_i32:
1397 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1398 break;
1399 case INDEX_op_ld_i32:
1400 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1401 break;
1402 case INDEX_op_st8_i32:
1403 tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
1404 break;
1405 case INDEX_op_st16_i32:
1406 tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
1407 break;
1408 case INDEX_op_st_i32:
1409 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1410 break;
1411
1412 case INDEX_op_mov_i32:
1413 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1414 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1415 break;
1416 case INDEX_op_movi_i32:
1417 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1418 break;
1419 case INDEX_op_add_i32:
1420 c = ARITH_ADD;
1421 goto gen_arith;
1422 case INDEX_op_sub_i32:
1423 c = ARITH_SUB;
1424 goto gen_arith;
1425 case INDEX_op_and_i32:
1426 c = ARITH_AND;
1427 goto gen_arith;
1428 case INDEX_op_or_i32:
1429 c = ARITH_ORR;
1430 goto gen_arith;
1431 case INDEX_op_xor_i32:
1432 c = ARITH_EOR;
1433 /* Fall through. */
1434 gen_arith:
94953e6d
LD
1435 if (const_args[2]) {
1436 int rot;
1437 rot = encode_imm(args[2]);
cb4e581f 1438 tcg_out_dat_imm(s, COND_AL, c,
94953e6d
LD
1439 args[0], args[1], rotl(args[2], rot) | (rot << 7));
1440 } else
cb4e581f
LD
1441 tcg_out_dat_reg(s, COND_AL, c,
1442 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
811d4cf4
AZ
1443 break;
1444 case INDEX_op_add2_i32:
1445 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1446 args[0], args[1], args[2], args[3],
1447 args[4], args[5], SHIFT_IMM_LSL(0));
1448 break;
1449 case INDEX_op_sub2_i32:
1450 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1451 args[0], args[1], args[2], args[3],
1452 args[4], args[5], SHIFT_IMM_LSL(0));
1453 break;
650bbb36
AZ
1454 case INDEX_op_neg_i32:
1455 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1456 break;
f878d2d2
LD
1457 case INDEX_op_not_i32:
1458 tcg_out_dat_reg(s, COND_AL,
1459 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1460 break;
811d4cf4
AZ
1461 case INDEX_op_mul_i32:
1462 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1463 break;
1464 case INDEX_op_mulu2_i32:
1465 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1466 break;
1467 case INDEX_op_div2_i32:
1468 tcg_out_div_helper(s, COND_AL, args,
1469 tcg_helper_div_i64, tcg_helper_rem_i64,
1470 SHIFT_IMM_ASR(31));
1471 break;
1472 case INDEX_op_divu2_i32:
1473 tcg_out_div_helper(s, COND_AL, args,
1474 tcg_helper_divu_i64, tcg_helper_remu_i64,
1475 SHIFT_IMM_LSR(31));
1476 break;
1477 /* XXX: Perhaps args[2] & 0x1f is wrong */
1478 case INDEX_op_shl_i32:
1479 c = const_args[2] ?
1480 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1481 goto gen_shift32;
1482 case INDEX_op_shr_i32:
1483 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1484 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1485 goto gen_shift32;
1486 case INDEX_op_sar_i32:
1487 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1488 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1489 /* Fall through. */
1490 gen_shift32:
1491 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1492 break;
1493
1494 case INDEX_op_brcond_i32:
1495 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1496 args[0], args[1], SHIFT_IMM_LSL(0));
1497 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1498 break;
1499 case INDEX_op_brcond2_i32:
1500 /* The resulting conditions are:
1501 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1502 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1503 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1504 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1505 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1506 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1507 */
1508 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1509 args[1], args[3], SHIFT_IMM_LSL(0));
1510 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1511 args[0], args[2], SHIFT_IMM_LSL(0));
1512 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1513 break;
1514
1515 case INDEX_op_qemu_ld8u:
1516 tcg_out_qemu_ld(s, COND_AL, args, 0);
1517 break;
1518 case INDEX_op_qemu_ld8s:
1519 tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1520 break;
1521 case INDEX_op_qemu_ld16u:
1522 tcg_out_qemu_ld(s, COND_AL, args, 1);
1523 break;
1524 case INDEX_op_qemu_ld16s:
1525 tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1526 break;
1527 case INDEX_op_qemu_ld32u:
1528 tcg_out_qemu_ld(s, COND_AL, args, 2);
1529 break;
1530 case INDEX_op_qemu_ld64:
1531 tcg_out_qemu_ld(s, COND_AL, args, 3);
1532 break;
650bbb36 1533
811d4cf4
AZ
1534 case INDEX_op_qemu_st8:
1535 tcg_out_qemu_st(s, COND_AL, args, 0);
1536 break;
1537 case INDEX_op_qemu_st16:
1538 tcg_out_qemu_st(s, COND_AL, args, 1);
1539 break;
1540 case INDEX_op_qemu_st32:
1541 tcg_out_qemu_st(s, COND_AL, args, 2);
1542 break;
1543 case INDEX_op_qemu_st64:
1544 tcg_out_qemu_st(s, COND_AL, args, 3);
1545 break;
1546
1547 case INDEX_op_ext8s_i32:
1548 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1549 args[0], 0, args[1], SHIFT_IMM_LSL(24));
1550 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1551 args[0], 0, args[0], SHIFT_IMM_ASR(24));
1552 break;
1553 case INDEX_op_ext16s_i32:
1554 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1555 args[0], 0, args[1], SHIFT_IMM_LSL(16));
1556 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1557 args[0], 0, args[0], SHIFT_IMM_ASR(16));
1558 break;
1559
1560 default:
1561 tcg_abort();
1562 }
1563}
1564
1565static const TCGTargetOpDef arm_op_defs[] = {
1566 { INDEX_op_exit_tb, { } },
1567 { INDEX_op_goto_tb, { } },
1568 { INDEX_op_call, { "ri" } },
1569 { INDEX_op_jmp, { "ri" } },
1570 { INDEX_op_br, { } },
1571
1572 { INDEX_op_mov_i32, { "r", "r" } },
1573 { INDEX_op_movi_i32, { "r" } },
1574
1575 { INDEX_op_ld8u_i32, { "r", "r" } },
1576 { INDEX_op_ld8s_i32, { "r", "r" } },
1577 { INDEX_op_ld16u_i32, { "r", "r" } },
1578 { INDEX_op_ld16s_i32, { "r", "r" } },
1579 { INDEX_op_ld_i32, { "r", "r" } },
1580 { INDEX_op_st8_i32, { "r", "r" } },
1581 { INDEX_op_st16_i32, { "r", "r" } },
1582 { INDEX_op_st_i32, { "r", "r" } },
1583
1584 /* TODO: "r", "r", "ri" */
cb4e581f
LD
1585 { INDEX_op_add_i32, { "r", "r", "rI" } },
1586 { INDEX_op_sub_i32, { "r", "r", "rI" } },
811d4cf4
AZ
1587 { INDEX_op_mul_i32, { "r", "r", "r" } },
1588 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1589 { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
1590 { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
cb4e581f
LD
1591 { INDEX_op_and_i32, { "r", "r", "rI" } },
1592 { INDEX_op_or_i32, { "r", "r", "rI" } },
1593 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1594 { INDEX_op_neg_i32, { "r", "r" } },
f878d2d2 1595 { INDEX_op_not_i32, { "r", "r" } },
811d4cf4
AZ
1596
1597 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1598 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1599 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1600
1601 { INDEX_op_brcond_i32, { "r", "r" } },
1602
1603 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1604 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1605 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1606 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1607
1608 { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1609 { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1610 { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1611 { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
1612 { INDEX_op_qemu_ld32u, { "r", "x", "X" } },
d0660ed4 1613 { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
811d4cf4 1614
3979144c
PB
1615 { INDEX_op_qemu_st8, { "x", "x", "X" } },
1616 { INDEX_op_qemu_st16, { "x", "x", "X" } },
1617 { INDEX_op_qemu_st32, { "x", "x", "X" } },
1618 { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
811d4cf4
AZ
1619
1620 { INDEX_op_ext8s_i32, { "r", "r" } },
1621 { INDEX_op_ext16s_i32, { "r", "r" } },
1622
1623 { -1 },
1624};
1625
1626void tcg_target_init(TCGContext *s)
1627{
1628 /* fail safe */
1629 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1630 tcg_abort();
1631
1632 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
1633 ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
1634 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1635 ((2 << TCG_REG_R3) - 1) |
1636 (1 << TCG_REG_R12) | (1 << TCG_REG_R14));
1637
1638 tcg_regset_clear(s->reserved_regs);
1639#ifdef SAVE_LR
1640 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
1641#endif
1642 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1643 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1644
1645 tcg_add_target_add_op_defs(arm_op_defs);
1646}
1647
1648static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1649 int arg1, tcg_target_long arg2)
1650{
1651 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1652}
1653
1654static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1655 int arg1, tcg_target_long arg2)
1656{
1657 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1658}
1659
2d69f359 1660static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
811d4cf4
AZ
1661{
1662 if (val > 0)
1663 if (val < 0x100)
1664 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1665 else
1666 tcg_abort();
1667 else if (val < 0) {
1668 if (val > -0x100)
1669 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1670 else
1671 tcg_abort();
1672 }
1673}
1674
1675static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1676{
1677 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1678}
1679
1680static inline void tcg_out_movi(TCGContext *s, TCGType type,
1681 int ret, tcg_target_long arg)
1682{
1683 tcg_out_movi32(s, COND_AL, ret, arg);
1684}
1685
1686void tcg_target_qemu_prologue(TCGContext *s)
1687{
1688 /* stmdb sp!, { r9 - r11, lr } */
1689 tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
1690
1691 tcg_out_bx(s, COND_AL, TCG_REG_R0);
1692 tb_ret_addr = s->code_ptr;
1693
1694 /* ldmia sp!, { r9 - r11, pc } */
1695 tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);
1696}