]> git.proxmox.com Git - qemu.git/blame - tcg/arm/tcg-target.c
rename DEBUG_EXEC to CONFIG_DEBUG_EXEC
[qemu.git] / tcg / arm / tcg-target.c
CommitLineData
811d4cf4
AZ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
d4a9eb1f
BS
24
25#ifndef NDEBUG
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
811d4cf4
AZ
27 "%r0",
28 "%r1",
29 "%r2",
30 "%r3",
31 "%r4",
32 "%r5",
33 "%r6",
34 "%r7",
35 "%r8",
36 "%r9",
37 "%r10",
38 "%r11",
39 "%r12",
40 "%r13",
41 "%r14",
42};
d4a9eb1f 43#endif
811d4cf4 44
d4a9eb1f 45static const int tcg_target_reg_alloc_order[] = {
811d4cf4
AZ
46 TCG_REG_R0,
47 TCG_REG_R1,
48 TCG_REG_R2,
49 TCG_REG_R3,
50 TCG_REG_R4,
51 TCG_REG_R5,
52 TCG_REG_R6,
53 TCG_REG_R7,
54 TCG_REG_R8,
55 TCG_REG_R9,
56 TCG_REG_R10,
57 TCG_REG_R11,
58 TCG_REG_R12,
59 TCG_REG_R13,
60 TCG_REG_R14,
61};
62
d4a9eb1f 63static const int tcg_target_call_iarg_regs[4] = {
811d4cf4
AZ
64 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
65};
d4a9eb1f 66static const int tcg_target_call_oarg_regs[2] = {
811d4cf4
AZ
67 TCG_REG_R0, TCG_REG_R1
68};
69
650bbb36 70static void patch_reloc(uint8_t *code_ptr, int type,
811d4cf4
AZ
71 tcg_target_long value, tcg_target_long addend)
72{
73 switch (type) {
74 case R_ARM_ABS32:
75 *(uint32_t *) code_ptr = value;
76 break;
77
78 case R_ARM_CALL:
79 case R_ARM_JUMP24:
80 default:
81 tcg_abort();
82
83 case R_ARM_PC24:
eae6ce52 84 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & 0xff000000) |
e936243a 85 (((value - ((tcg_target_long) code_ptr + 8)) >> 2) & 0xffffff);
811d4cf4
AZ
86 break;
87 }
88}
89
90/* maximum number of register used for input function arguments */
91static inline int tcg_target_get_call_iarg_regs_count(int flags)
92{
93 return 4;
94}
95
811d4cf4 96/* parse target specific constraints */
d4a9eb1f 97static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
811d4cf4
AZ
98{
99 const char *ct_str;
100
101 ct_str = *pct_str;
102 switch (ct_str[0]) {
cb4e581f
LD
103 case 'I':
104 ct->ct |= TCG_CT_CONST_ARM;
105 break;
106
811d4cf4
AZ
107 case 'r':
108#ifndef CONFIG_SOFTMMU
109 case 'd':
110 case 'D':
111 case 'x':
112 case 'X':
113#endif
114 ct->ct |= TCG_CT_REG;
115 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
116 break;
117
118#ifdef CONFIG_SOFTMMU
d0660ed4 119 /* qemu_ld/st inputs (unless 'X', 'd' or 'D') */
811d4cf4
AZ
120 case 'x':
121 ct->ct |= TCG_CT_REG;
122 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
811d4cf4
AZ
123 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
124 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
125 break;
126
d0660ed4
AZ
127 /* qemu_ld64 data_reg */
128 case 'd':
129 ct->ct |= TCG_CT_REG;
130 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
131 /* r1 is still needed to load data_reg2, so don't use it. */
132 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
133 break;
134
811d4cf4
AZ
135 /* qemu_ld/st64 data_reg2 */
136 case 'D':
137 ct->ct |= TCG_CT_REG;
138 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
139 /* r0, r1 and optionally r2 will be overwritten by the address
140 * and the low word of data, so don't use these. */
141 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
142 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
143# if TARGET_LONG_BITS == 64
144 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
145# endif
146 break;
147
148# if TARGET_LONG_BITS == 64
149 /* qemu_ld/st addr_reg2 */
150 case 'X':
151 ct->ct |= TCG_CT_REG;
152 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
153 /* r0 will be overwritten by the low word of base, so don't use it. */
154 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
811d4cf4 155 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
811d4cf4
AZ
156 break;
157# endif
158#endif
159
160 case '1':
161 ct->ct |= TCG_CT_REG;
162 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
163 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
164 break;
165
166 case '2':
167 ct->ct |= TCG_CT_REG;
168 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
169 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
170 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
171 break;
172
173 default:
174 return -1;
175 }
176 ct_str++;
177 *pct_str = ct_str;
178
179 return 0;
180}
181
cb4e581f
LD
182
183static inline int check_fit_imm(uint32_t imm)
184{
185 /* XXX: use rotation */
186 return (imm & ~0xff) == 0;
187}
188
811d4cf4
AZ
189/* Test if a constant matches the constraint.
190 * TODO: define constraints for:
191 *
192 * ldr/str offset: between -0xfff and 0xfff
193 * ldrh/strh offset: between -0xff and 0xff
194 * mov operand2: values represented with x << (2 * y), x < 0x100
195 * add, sub, eor...: ditto
196 */
197static inline int tcg_target_const_match(tcg_target_long val,
198 const TCGArgConstraint *arg_ct)
199{
200 int ct;
201 ct = arg_ct->ct;
202 if (ct & TCG_CT_CONST)
203 return 1;
cb4e581f
LD
204 else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val))
205 return 1;
811d4cf4
AZ
206 else
207 return 0;
208}
209
210enum arm_data_opc_e {
211 ARITH_AND = 0x0,
212 ARITH_EOR = 0x1,
213 ARITH_SUB = 0x2,
214 ARITH_RSB = 0x3,
215 ARITH_ADD = 0x4,
216 ARITH_ADC = 0x5,
217 ARITH_SBC = 0x6,
218 ARITH_RSC = 0x7,
3979144c 219 ARITH_TST = 0x8,
811d4cf4
AZ
220 ARITH_CMP = 0xa,
221 ARITH_CMN = 0xb,
222 ARITH_ORR = 0xc,
223 ARITH_MOV = 0xd,
224 ARITH_BIC = 0xe,
225 ARITH_MVN = 0xf,
226};
227
3979144c
PB
228#define TO_CPSR(opc) \
229 ((opc == ARITH_CMP || opc == ARITH_CMN || opc == ARITH_TST) << 20)
811d4cf4
AZ
230
231#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
232#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
233#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
234#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
235#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
236#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
237#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
238#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
239
240enum arm_cond_code_e {
241 COND_EQ = 0x0,
242 COND_NE = 0x1,
243 COND_CS = 0x2, /* Unsigned greater or equal */
244 COND_CC = 0x3, /* Unsigned less than */
245 COND_MI = 0x4, /* Negative */
246 COND_PL = 0x5, /* Zero or greater */
247 COND_VS = 0x6, /* Overflow */
248 COND_VC = 0x7, /* No overflow */
249 COND_HI = 0x8, /* Unsigned greater than */
250 COND_LS = 0x9, /* Unsigned less or equal */
251 COND_GE = 0xa,
252 COND_LT = 0xb,
253 COND_GT = 0xc,
254 COND_LE = 0xd,
255 COND_AL = 0xe,
256};
257
258static const uint8_t tcg_cond_to_arm_cond[10] = {
259 [TCG_COND_EQ] = COND_EQ,
260 [TCG_COND_NE] = COND_NE,
261 [TCG_COND_LT] = COND_LT,
262 [TCG_COND_GE] = COND_GE,
263 [TCG_COND_LE] = COND_LE,
264 [TCG_COND_GT] = COND_GT,
265 /* unsigned */
266 [TCG_COND_LTU] = COND_CC,
267 [TCG_COND_GEU] = COND_CS,
268 [TCG_COND_LEU] = COND_LS,
269 [TCG_COND_GTU] = COND_HI,
270};
271
272static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
273{
274 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
275}
276
277static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
278{
279 tcg_out32(s, (cond << 28) | 0x0a000000 |
280 (((offset - 8) >> 2) & 0x00ffffff));
281}
282
e936243a
AZ
283static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
284{
285#ifdef WORDS_BIGENDIAN
286 tcg_out8(s, (cond << 4) | 0x0a);
287 s->code_ptr += 3;
288#else
289 s->code_ptr += 3;
290 tcg_out8(s, (cond << 4) | 0x0a);
291#endif
292}
293
811d4cf4
AZ
294static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
295{
296 tcg_out32(s, (cond << 28) | 0x0b000000 |
297 (((offset - 8) >> 2) & 0x00ffffff));
298}
299
300static inline void tcg_out_dat_reg(TCGContext *s,
301 int cond, int opc, int rd, int rn, int rm, int shift)
302{
303 tcg_out32(s, (cond << 28) | (0 << 25) | (opc << 21) | TO_CPSR(opc) |
304 (rn << 16) | (rd << 12) | shift | rm);
305}
306
307static inline void tcg_out_dat_reg2(TCGContext *s,
308 int cond, int opc0, int opc1, int rd0, int rd1,
309 int rn0, int rn1, int rm0, int rm1, int shift)
310{
0c9c3a9e
AZ
311 if (rd0 == rn1 || rd0 == rm1) {
312 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
313 (rn0 << 16) | (8 << 12) | shift | rm0);
314 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
315 (rn1 << 16) | (rd1 << 12) | shift | rm1);
316 tcg_out_dat_reg(s, cond, ARITH_MOV,
317 rd0, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
318 } else {
319 tcg_out32(s, (cond << 28) | (0 << 25) | (opc0 << 21) | (1 << 20) |
320 (rn0 << 16) | (rd0 << 12) | shift | rm0);
321 tcg_out32(s, (cond << 28) | (0 << 25) | (opc1 << 21) |
322 (rn1 << 16) | (rd1 << 12) | shift | rm1);
323 }
811d4cf4
AZ
324}
325
326static inline void tcg_out_dat_imm(TCGContext *s,
327 int cond, int opc, int rd, int rn, int im)
328{
3979144c 329 tcg_out32(s, (cond << 28) | (1 << 25) | (opc << 21) | TO_CPSR(opc) |
811d4cf4
AZ
330 (rn << 16) | (rd << 12) | im);
331}
332
333static inline void tcg_out_movi32(TCGContext *s,
334 int cond, int rd, int32_t arg)
335{
336 int offset = (uint32_t) arg - ((uint32_t) s->code_ptr + 8);
337
338 /* TODO: This is very suboptimal, we can easily have a constant
339 * pool somewhere after all the instructions. */
340
341 if (arg < 0 && arg > -0x100)
342 return tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, (~arg) & 0xff);
343
344 if (offset < 0x100 && offset > -0x100)
345 return offset >= 0 ?
346 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, 15, offset) :
347 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, 15, -offset);
348
cb4e581f
LD
349#ifdef __ARM_ARCH_7A__
350 /* use movw/movt */
351 /* movw */
352 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
353 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
354 if (arg & 0xffff0000)
355 /* movt */
356 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
357 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
358#else
811d4cf4
AZ
359 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, arg & 0xff);
360 if (arg & 0x0000ff00)
361 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
362 ((arg >> 8) & 0xff) | 0xc00);
363 if (arg & 0x00ff0000)
364 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
365 ((arg >> 16) & 0xff) | 0x800);
366 if (arg & 0xff000000)
367 tcg_out_dat_imm(s, cond, ARITH_ORR, rd, rd,
368 ((arg >> 24) & 0xff) | 0x400);
cb4e581f 369#endif
811d4cf4
AZ
370}
371
372static inline void tcg_out_mul32(TCGContext *s,
373 int cond, int rd, int rs, int rm)
374{
375 if (rd != rm)
376 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
377 (rs << 8) | 0x90 | rm);
378 else if (rd != rs)
379 tcg_out32(s, (cond << 28) | (rd << 16) | (0 << 12) |
380 (rm << 8) | 0x90 | rs);
381 else {
382 tcg_out32(s, (cond << 28) | ( 8 << 16) | (0 << 12) |
383 (rs << 8) | 0x90 | rm);
384 tcg_out_dat_reg(s, cond, ARITH_MOV,
385 rd, 0, 8, SHIFT_IMM_LSL(0));
386 }
387}
388
389static inline void tcg_out_umull32(TCGContext *s,
390 int cond, int rd0, int rd1, int rs, int rm)
391{
392 if (rd0 != rm && rd1 != rm)
393 tcg_out32(s, (cond << 28) | 0x800090 |
394 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
395 else if (rd0 != rs && rd1 != rs)
396 tcg_out32(s, (cond << 28) | 0x800090 |
397 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
398 else {
399 tcg_out_dat_reg(s, cond, ARITH_MOV,
400 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
401 tcg_out32(s, (cond << 28) | 0x800098 |
402 (rd1 << 16) | (rd0 << 12) | (rs << 8));
403 }
404}
405
406static inline void tcg_out_smull32(TCGContext *s,
407 int cond, int rd0, int rd1, int rs, int rm)
408{
409 if (rd0 != rm && rd1 != rm)
410 tcg_out32(s, (cond << 28) | 0xc00090 |
411 (rd1 << 16) | (rd0 << 12) | (rs << 8) | rm);
412 else if (rd0 != rs && rd1 != rs)
413 tcg_out32(s, (cond << 28) | 0xc00090 |
414 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rs);
415 else {
416 tcg_out_dat_reg(s, cond, ARITH_MOV,
417 TCG_REG_R8, 0, rm, SHIFT_IMM_LSL(0));
418 tcg_out32(s, (cond << 28) | 0xc00098 |
419 (rd1 << 16) | (rd0 << 12) | (rs << 8));
420 }
421}
422
423static inline void tcg_out_ld32_12(TCGContext *s, int cond,
424 int rd, int rn, tcg_target_long im)
425{
426 if (im >= 0)
427 tcg_out32(s, (cond << 28) | 0x05900000 |
428 (rn << 16) | (rd << 12) | (im & 0xfff));
429 else
430 tcg_out32(s, (cond << 28) | 0x05100000 |
431 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
432}
433
434static inline void tcg_out_st32_12(TCGContext *s, int cond,
435 int rd, int rn, tcg_target_long im)
436{
437 if (im >= 0)
438 tcg_out32(s, (cond << 28) | 0x05800000 |
439 (rn << 16) | (rd << 12) | (im & 0xfff));
440 else
441 tcg_out32(s, (cond << 28) | 0x05000000 |
442 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
443}
444
445static inline void tcg_out_ld32_r(TCGContext *s, int cond,
446 int rd, int rn, int rm)
447{
448 tcg_out32(s, (cond << 28) | 0x07900000 |
449 (rn << 16) | (rd << 12) | rm);
450}
451
452static inline void tcg_out_st32_r(TCGContext *s, int cond,
453 int rd, int rn, int rm)
454{
455 tcg_out32(s, (cond << 28) | 0x07800000 |
456 (rn << 16) | (rd << 12) | rm);
457}
458
3979144c
PB
459/* Register pre-increment with base writeback. */
460static inline void tcg_out_ld32_rwb(TCGContext *s, int cond,
461 int rd, int rn, int rm)
462{
463 tcg_out32(s, (cond << 28) | 0x07b00000 |
464 (rn << 16) | (rd << 12) | rm);
465}
466
467static inline void tcg_out_st32_rwb(TCGContext *s, int cond,
468 int rd, int rn, int rm)
469{
470 tcg_out32(s, (cond << 28) | 0x07a00000 |
471 (rn << 16) | (rd << 12) | rm);
472}
473
811d4cf4
AZ
474static inline void tcg_out_ld16u_8(TCGContext *s, int cond,
475 int rd, int rn, tcg_target_long im)
476{
477 if (im >= 0)
478 tcg_out32(s, (cond << 28) | 0x01d000b0 |
479 (rn << 16) | (rd << 12) |
480 ((im & 0xf0) << 4) | (im & 0xf));
481 else
482 tcg_out32(s, (cond << 28) | 0x015000b0 |
483 (rn << 16) | (rd << 12) |
484 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
485}
486
487static inline void tcg_out_st16u_8(TCGContext *s, int cond,
488 int rd, int rn, tcg_target_long im)
489{
490 if (im >= 0)
491 tcg_out32(s, (cond << 28) | 0x01c000b0 |
492 (rn << 16) | (rd << 12) |
493 ((im & 0xf0) << 4) | (im & 0xf));
494 else
495 tcg_out32(s, (cond << 28) | 0x014000b0 |
496 (rn << 16) | (rd << 12) |
497 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
498}
499
500static inline void tcg_out_ld16u_r(TCGContext *s, int cond,
501 int rd, int rn, int rm)
502{
503 tcg_out32(s, (cond << 28) | 0x019000b0 |
504 (rn << 16) | (rd << 12) | rm);
505}
506
507static inline void tcg_out_st16u_r(TCGContext *s, int cond,
508 int rd, int rn, int rm)
509{
510 tcg_out32(s, (cond << 28) | 0x018000b0 |
511 (rn << 16) | (rd << 12) | rm);
512}
513
514static inline void tcg_out_ld16s_8(TCGContext *s, int cond,
515 int rd, int rn, tcg_target_long im)
516{
517 if (im >= 0)
518 tcg_out32(s, (cond << 28) | 0x01d000f0 |
519 (rn << 16) | (rd << 12) |
520 ((im & 0xf0) << 4) | (im & 0xf));
521 else
522 tcg_out32(s, (cond << 28) | 0x015000f0 |
523 (rn << 16) | (rd << 12) |
524 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
525}
526
527static inline void tcg_out_st16s_8(TCGContext *s, int cond,
528 int rd, int rn, tcg_target_long im)
529{
530 if (im >= 0)
531 tcg_out32(s, (cond << 28) | 0x01c000f0 |
532 (rn << 16) | (rd << 12) |
533 ((im & 0xf0) << 4) | (im & 0xf));
534 else
535 tcg_out32(s, (cond << 28) | 0x014000f0 |
536 (rn << 16) | (rd << 12) |
537 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
538}
539
540static inline void tcg_out_ld16s_r(TCGContext *s, int cond,
541 int rd, int rn, int rm)
542{
543 tcg_out32(s, (cond << 28) | 0x019000f0 |
544 (rn << 16) | (rd << 12) | rm);
545}
546
547static inline void tcg_out_st16s_r(TCGContext *s, int cond,
548 int rd, int rn, int rm)
549{
550 tcg_out32(s, (cond << 28) | 0x018000f0 |
551 (rn << 16) | (rd << 12) | rm);
552}
553
554static inline void tcg_out_ld8_12(TCGContext *s, int cond,
555 int rd, int rn, tcg_target_long im)
556{
557 if (im >= 0)
558 tcg_out32(s, (cond << 28) | 0x05d00000 |
559 (rn << 16) | (rd << 12) | (im & 0xfff));
560 else
561 tcg_out32(s, (cond << 28) | 0x05500000 |
562 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
563}
564
565static inline void tcg_out_st8_12(TCGContext *s, int cond,
566 int rd, int rn, tcg_target_long im)
567{
568 if (im >= 0)
569 tcg_out32(s, (cond << 28) | 0x05c00000 |
570 (rn << 16) | (rd << 12) | (im & 0xfff));
571 else
572 tcg_out32(s, (cond << 28) | 0x05400000 |
573 (rn << 16) | (rd << 12) | ((-im) & 0xfff));
574}
575
576static inline void tcg_out_ld8_r(TCGContext *s, int cond,
577 int rd, int rn, int rm)
578{
579 tcg_out32(s, (cond << 28) | 0x07d00000 |
580 (rn << 16) | (rd << 12) | rm);
581}
582
583static inline void tcg_out_st8_r(TCGContext *s, int cond,
584 int rd, int rn, int rm)
585{
586 tcg_out32(s, (cond << 28) | 0x07c00000 |
587 (rn << 16) | (rd << 12) | rm);
588}
589
590static inline void tcg_out_ld8s_8(TCGContext *s, int cond,
591 int rd, int rn, tcg_target_long im)
592{
593 if (im >= 0)
594 tcg_out32(s, (cond << 28) | 0x01d000d0 |
595 (rn << 16) | (rd << 12) |
596 ((im & 0xf0) << 4) | (im & 0xf));
597 else
598 tcg_out32(s, (cond << 28) | 0x015000d0 |
599 (rn << 16) | (rd << 12) |
600 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
601}
602
603static inline void tcg_out_st8s_8(TCGContext *s, int cond,
604 int rd, int rn, tcg_target_long im)
605{
606 if (im >= 0)
607 tcg_out32(s, (cond << 28) | 0x01c000d0 |
608 (rn << 16) | (rd << 12) |
609 ((im & 0xf0) << 4) | (im & 0xf));
610 else
611 tcg_out32(s, (cond << 28) | 0x014000d0 |
612 (rn << 16) | (rd << 12) |
613 (((-im) & 0xf0) << 4) | ((-im) & 0xf));
614}
615
616static inline void tcg_out_ld8s_r(TCGContext *s, int cond,
617 int rd, int rn, int rm)
618{
204c1674 619 tcg_out32(s, (cond << 28) | 0x019000d0 |
811d4cf4
AZ
620 (rn << 16) | (rd << 12) | rm);
621}
622
623static inline void tcg_out_st8s_r(TCGContext *s, int cond,
624 int rd, int rn, int rm)
625{
204c1674 626 tcg_out32(s, (cond << 28) | 0x018000d0 |
811d4cf4
AZ
627 (rn << 16) | (rd << 12) | rm);
628}
629
630static inline void tcg_out_ld32u(TCGContext *s, int cond,
631 int rd, int rn, int32_t offset)
632{
633 if (offset > 0xfff || offset < -0xfff) {
634 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
635 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_R8);
636 } else
637 tcg_out_ld32_12(s, cond, rd, rn, offset);
638}
639
640static inline void tcg_out_st32(TCGContext *s, int cond,
641 int rd, int rn, int32_t offset)
642{
643 if (offset > 0xfff || offset < -0xfff) {
644 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
645 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_R8);
646 } else
647 tcg_out_st32_12(s, cond, rd, rn, offset);
648}
649
650static inline void tcg_out_ld16u(TCGContext *s, int cond,
651 int rd, int rn, int32_t offset)
652{
653 if (offset > 0xff || offset < -0xff) {
654 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
655 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_R8);
656 } else
657 tcg_out_ld16u_8(s, cond, rd, rn, offset);
658}
659
660static inline void tcg_out_ld16s(TCGContext *s, int cond,
661 int rd, int rn, int32_t offset)
662{
663 if (offset > 0xff || offset < -0xff) {
664 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
665 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_R8);
666 } else
667 tcg_out_ld16s_8(s, cond, rd, rn, offset);
668}
669
670static inline void tcg_out_st16u(TCGContext *s, int cond,
671 int rd, int rn, int32_t offset)
672{
673 if (offset > 0xff || offset < -0xff) {
674 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
675 tcg_out_st16u_r(s, cond, rd, rn, TCG_REG_R8);
676 } else
677 tcg_out_st16u_8(s, cond, rd, rn, offset);
678}
679
680static inline void tcg_out_ld8u(TCGContext *s, int cond,
681 int rd, int rn, int32_t offset)
682{
683 if (offset > 0xfff || offset < -0xfff) {
684 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
685 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_R8);
686 } else
687 tcg_out_ld8_12(s, cond, rd, rn, offset);
688}
689
690static inline void tcg_out_ld8s(TCGContext *s, int cond,
691 int rd, int rn, int32_t offset)
692{
693 if (offset > 0xff || offset < -0xff) {
694 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
695 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_R8);
696 } else
697 tcg_out_ld8s_8(s, cond, rd, rn, offset);
698}
699
700static inline void tcg_out_st8u(TCGContext *s, int cond,
701 int rd, int rn, int32_t offset)
702{
703 if (offset > 0xfff || offset < -0xfff) {
704 tcg_out_movi32(s, cond, TCG_REG_R8, offset);
705 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_R8);
706 } else
707 tcg_out_st8_12(s, cond, rd, rn, offset);
708}
709
710static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
711{
712 int32_t val;
713
714 val = addr - (tcg_target_long) s->code_ptr;
715 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
716 tcg_out_b(s, cond, val);
717 else {
718#if 1
719 tcg_abort();
720#else
721 if (cond == COND_AL) {
722 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
723 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
724 } else {
725 tcg_out_movi32(s, cond, TCG_REG_R8, val - 8);
726 tcg_out_dat_reg(s, cond, ARITH_ADD,
727 15, 15, TCG_REG_R8, SHIFT_IMM_LSL(0));
728 }
729#endif
730 }
731}
732
733static inline void tcg_out_call(TCGContext *s, int cond, uint32_t addr)
734{
735 int32_t val;
736
737#ifdef SAVE_LR
738 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
739#endif
740
741 val = addr - (tcg_target_long) s->code_ptr;
742 if (val < 0x01fffffd && val > -0x01fffffd)
743 tcg_out_bl(s, cond, val);
744 else {
745#if 1
746 tcg_abort();
747#else
748 if (cond == COND_AL) {
749 tcg_out_dat_imm(s, cond, ARITH_ADD, 14, 15, 4);
750 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
751 tcg_out32(s, addr); /* XXX: This is l->u.value, can we use it? */
752 } else {
753 tcg_out_movi32(s, cond, TCG_REG_R9, addr);
754 tcg_out_dat_imm(s, cond, ARITH_MOV, 14, 0, 15);
755 tcg_out_bx(s, cond, TCG_REG_R9);
756 }
757#endif
758 }
759
760#ifdef SAVE_LR
761 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
762#endif
763}
764
765static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
766{
767#ifdef SAVE_LR
768 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R8, 0, 14, SHIFT_IMM_LSL(0));
769#endif
770 /* TODO: on ARMv5 and ARMv6 replace with tcg_out_blx(s, cond, arg); */
771 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 15, SHIFT_IMM_LSL(0));
772 tcg_out_bx(s, cond, arg);
773#ifdef SAVE_LR
774 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, TCG_REG_R8, SHIFT_IMM_LSL(0));
775#endif
776}
777
778static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
779{
780 TCGLabel *l = &s->labels[label_index];
781
782 if (l->has_value)
783 tcg_out_goto(s, cond, l->u.value);
784 else if (cond == COND_AL) {
785 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
786 tcg_out_reloc(s, s->code_ptr, R_ARM_ABS32, label_index, 31337);
787 s->code_ptr += 4;
788 } else {
789 /* Probably this should be preferred even for COND_AL... */
790 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
e936243a 791 tcg_out_b_noaddr(s, cond);
811d4cf4
AZ
792 }
793}
794
795static void tcg_out_div_helper(TCGContext *s, int cond, const TCGArg *args,
796 void *helper_div, void *helper_rem, int shift)
797{
798 int div_reg = args[0];
799 int rem_reg = args[1];
800
801 /* stmdb sp!, { r0 - r3, ip, lr } */
802 /* (Note that we need an even number of registers as per EABI) */
803 tcg_out32(s, (cond << 28) | 0x092d500f);
804
805 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
806 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
807 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
808 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
809
810 tcg_out_call(s, cond, (uint32_t) helper_div);
811 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 0, SHIFT_IMM_LSL(0));
812
813 /* ldmia sp, { r0 - r3, fp, lr } */
814 tcg_out32(s, (cond << 28) | 0x089d500f);
815
816 tcg_out_dat_reg(s, cond, ARITH_MOV, 0, 0, args[2], SHIFT_IMM_LSL(0));
817 tcg_out_dat_reg(s, cond, ARITH_MOV, 1, 0, args[3], SHIFT_IMM_LSL(0));
818 tcg_out_dat_reg(s, cond, ARITH_MOV, 2, 0, args[4], SHIFT_IMM_LSL(0));
819 tcg_out_dat_reg(s, cond, ARITH_MOV, 3, 0, 2, shift);
820
821 tcg_out_call(s, cond, (uint32_t) helper_rem);
822
823 tcg_out_dat_reg(s, cond, ARITH_MOV, rem_reg, 0, 0, SHIFT_IMM_LSL(0));
824 tcg_out_dat_reg(s, cond, ARITH_MOV, div_reg, 0, 8, SHIFT_IMM_LSL(0));
825
826 /* ldr r0, [sp], #4 */
827 if (rem_reg != 0 && div_reg != 0)
828 tcg_out32(s, (cond << 28) | 0x04bd0004);
829 /* ldr r1, [sp], #4 */
830 if (rem_reg != 1 && div_reg != 1)
831 tcg_out32(s, (cond << 28) | 0x04bd1004);
832 /* ldr r2, [sp], #4 */
833 if (rem_reg != 2 && div_reg != 2)
834 tcg_out32(s, (cond << 28) | 0x04bd2004);
835 /* ldr r3, [sp], #4 */
836 if (rem_reg != 3 && div_reg != 3)
837 tcg_out32(s, (cond << 28) | 0x04bd3004);
838 /* ldr ip, [sp], #4 */
839 if (rem_reg != 12 && div_reg != 12)
840 tcg_out32(s, (cond << 28) | 0x04bdc004);
841 /* ldr lr, [sp], #4 */
842 if (rem_reg != 14 && div_reg != 14)
843 tcg_out32(s, (cond << 28) | 0x04bde004);
844}
845
846#ifdef CONFIG_SOFTMMU
79383c9c
BS
847
848#include "../../softmmu_defs.h"
811d4cf4
AZ
849
850static void *qemu_ld_helpers[4] = {
851 __ldb_mmu,
852 __ldw_mmu,
853 __ldl_mmu,
854 __ldq_mmu,
855};
856
857static void *qemu_st_helpers[4] = {
858 __stb_mmu,
859 __stw_mmu,
860 __stl_mmu,
861 __stq_mmu,
862};
863#endif
864
3979144c
PB
865#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
866
811d4cf4
AZ
867static inline void tcg_out_qemu_ld(TCGContext *s, int cond,
868 const TCGArg *args, int opc)
869{
870 int addr_reg, data_reg, data_reg2;
871#ifdef CONFIG_SOFTMMU
872 int mem_index, s_bits;
873# if TARGET_LONG_BITS == 64
874 int addr_reg2;
875# endif
811d4cf4 876 uint32_t *label_ptr;
811d4cf4
AZ
877#endif
878
879 data_reg = *args++;
880 if (opc == 3)
881 data_reg2 = *args++;
882 else
883 data_reg2 = 0; /* surpress warning */
884 addr_reg = *args++;
811d4cf4 885#ifdef CONFIG_SOFTMMU
aef3a282
AZ
886# if TARGET_LONG_BITS == 64
887 addr_reg2 = *args++;
888# endif
811d4cf4
AZ
889 mem_index = *args;
890 s_bits = opc & 3;
891
91a3c1b0 892 /* Should generate something like the following:
3979144c 893 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 894 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 895 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0
AZ
896 */
897# if CPU_TLB_BITS > 8
898# error
899# endif
811d4cf4 900 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
3979144c 901 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4
AZ
902 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
903 0, 8, CPU_TLB_SIZE - 1);
904 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
905 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
906 /* In the
907 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_read))]
908 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
909 * not exceed otherwise, so use an
910 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
911 * before.
912 */
225b4376
AZ
913 if (mem_index)
914 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
915 (mem_index << (TLB_SHIFT & 1)) |
916 ((16 - (TLB_SHIFT >> 1)) << 8));
811d4cf4 917 tcg_out_ld32_12(s, COND_AL, 1, 0,
225b4376 918 offsetof(CPUState, tlb_table[0][0].addr_read));
811d4cf4
AZ
919 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
920 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
921 /* Check alignment. */
922 if (s_bits)
923 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
924 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
925# if TARGET_LONG_BITS == 64
926 /* XXX: possibly we could use a block data load or writeback in
927 * the first access. */
928 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 929 offsetof(CPUState, tlb_table[0][0].addr_read) + 4);
811d4cf4
AZ
930 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
931 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
932# endif
933 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 934 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
935
936 switch (opc) {
937 case 0:
938 tcg_out_ld8_r(s, COND_EQ, data_reg, addr_reg, 1);
939 break;
940 case 0 | 4:
941 tcg_out_ld8s_r(s, COND_EQ, data_reg, addr_reg, 1);
942 break;
943 case 1:
944 tcg_out_ld16u_r(s, COND_EQ, data_reg, addr_reg, 1);
945 break;
946 case 1 | 4:
947 tcg_out_ld16s_r(s, COND_EQ, data_reg, addr_reg, 1);
948 break;
949 case 2:
950 default:
951 tcg_out_ld32_r(s, COND_EQ, data_reg, addr_reg, 1);
952 break;
953 case 3:
3979144c 954 tcg_out_ld32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
811d4cf4
AZ
955 tcg_out_ld32_12(s, COND_EQ, data_reg2, 1, 4);
956 break;
957 }
958
959 label_ptr = (void *) s->code_ptr;
960 tcg_out_b(s, COND_EQ, 8);
811d4cf4
AZ
961
962# ifdef SAVE_LR
963 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
964# endif
965
966 /* TODO: move this code to where the constants pool will be */
967 if (addr_reg)
968 tcg_out_dat_reg(s, cond, ARITH_MOV,
969 0, 0, addr_reg, SHIFT_IMM_LSL(0));
970# if TARGET_LONG_BITS == 32
971 tcg_out_dat_imm(s, cond, ARITH_MOV, 1, 0, mem_index);
972# else
973 if (addr_reg2 != 1)
974 tcg_out_dat_reg(s, cond, ARITH_MOV,
975 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
976 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
977# endif
650bbb36 978 tcg_out_bl(s, cond, (tcg_target_long) qemu_ld_helpers[s_bits] -
811d4cf4
AZ
979 (tcg_target_long) s->code_ptr);
980
981 switch (opc) {
982 case 0 | 4:
983 tcg_out_dat_reg(s, cond, ARITH_MOV,
984 0, 0, 0, SHIFT_IMM_LSL(24));
985 tcg_out_dat_reg(s, cond, ARITH_MOV,
986 data_reg, 0, 0, SHIFT_IMM_ASR(24));
987 break;
988 case 1 | 4:
989 tcg_out_dat_reg(s, cond, ARITH_MOV,
990 0, 0, 0, SHIFT_IMM_LSL(16));
991 tcg_out_dat_reg(s, cond, ARITH_MOV,
992 data_reg, 0, 0, SHIFT_IMM_ASR(16));
993 break;
994 case 0:
995 case 1:
996 case 2:
997 default:
998 if (data_reg)
999 tcg_out_dat_reg(s, cond, ARITH_MOV,
1000 data_reg, 0, 0, SHIFT_IMM_LSL(0));
1001 break;
1002 case 3:
d0660ed4
AZ
1003 if (data_reg != 0)
1004 tcg_out_dat_reg(s, cond, ARITH_MOV,
1005 data_reg, 0, 0, SHIFT_IMM_LSL(0));
811d4cf4
AZ
1006 if (data_reg2 != 1)
1007 tcg_out_dat_reg(s, cond, ARITH_MOV,
1008 data_reg2, 0, 1, SHIFT_IMM_LSL(0));
811d4cf4
AZ
1009 break;
1010 }
1011
1012# ifdef SAVE_LR
1013 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1014# endif
1015
811d4cf4 1016 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
1017#else /* !CONFIG_SOFTMMU */
1018 if (GUEST_BASE) {
1019 uint32_t offset = GUEST_BASE;
1020 int i;
1021 int rot;
1022
1023 while (offset) {
1024 i = ctz32(offset) & ~1;
1025 rot = ((32 - i) << 7) & 0xf00;
1026
1027 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1028 ((offset >> i) & 0xff) | rot);
1029 addr_reg = 8;
1030 offset &= ~(0xff << i);
1031 }
1032 }
811d4cf4
AZ
1033 switch (opc) {
1034 case 0:
1035 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1036 break;
1037 case 0 | 4:
1038 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1039 break;
1040 case 1:
1041 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1042 break;
1043 case 1 | 4:
1044 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1045 break;
1046 case 2:
1047 default:
1048 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1049 break;
1050 case 3:
eae6ce52
AZ
1051 /* TODO: use block load -
1052 * check that data_reg2 > data_reg or the other way */
419bafa5
AJ
1053 if (data_reg == addr_reg) {
1054 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1055 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1056 } else {
1057 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1058 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
1059 }
811d4cf4
AZ
1060 break;
1061 }
1062#endif
1063}
1064
1065static inline void tcg_out_qemu_st(TCGContext *s, int cond,
1066 const TCGArg *args, int opc)
1067{
1068 int addr_reg, data_reg, data_reg2;
1069#ifdef CONFIG_SOFTMMU
1070 int mem_index, s_bits;
1071# if TARGET_LONG_BITS == 64
1072 int addr_reg2;
1073# endif
811d4cf4 1074 uint32_t *label_ptr;
811d4cf4
AZ
1075#endif
1076
1077 data_reg = *args++;
1078 if (opc == 3)
1079 data_reg2 = *args++;
1080 else
1081 data_reg2 = 0; /* surpress warning */
1082 addr_reg = *args++;
811d4cf4 1083#ifdef CONFIG_SOFTMMU
aef3a282
AZ
1084# if TARGET_LONG_BITS == 64
1085 addr_reg2 = *args++;
1086# endif
811d4cf4
AZ
1087 mem_index = *args;
1088 s_bits = opc & 3;
1089
91a3c1b0 1090 /* Should generate something like the following:
3979144c 1091 * shr r8, addr_reg, #TARGET_PAGE_BITS
91a3c1b0 1092 * and r0, r8, #(CPU_TLB_SIZE - 1) @ Assumption: CPU_TLB_BITS <= 8
3979144c 1093 * add r0, env, r0 lsl #CPU_TLB_ENTRY_BITS
91a3c1b0 1094 */
811d4cf4 1095 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
3979144c 1096 8, 0, addr_reg, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
811d4cf4
AZ
1097 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1098 0, 8, CPU_TLB_SIZE - 1);
1099 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1100 0, TCG_AREG0, 0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
91a3c1b0
AZ
1101 /* In the
1102 * ldr r1 [r0, #(offsetof(CPUState, tlb_table[mem_index][0].addr_write))]
1103 * below, the offset is likely to exceed 12 bits if mem_index != 0 and
1104 * not exceed otherwise, so use an
1105 * add r0, r0, #(mem_index * sizeof *CPUState.tlb_table)
1106 * before.
1107 */
225b4376
AZ
1108 if (mem_index)
1109 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 0, 0,
1110 (mem_index << (TLB_SHIFT & 1)) |
1111 ((16 - (TLB_SHIFT >> 1)) << 8));
811d4cf4 1112 tcg_out_ld32_12(s, COND_AL, 1, 0,
225b4376 1113 offsetof(CPUState, tlb_table[0][0].addr_write));
811d4cf4
AZ
1114 tcg_out_dat_reg(s, COND_AL, ARITH_CMP,
1115 0, 1, 8, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
3979144c
PB
1116 /* Check alignment. */
1117 if (s_bits)
1118 tcg_out_dat_imm(s, COND_EQ, ARITH_TST,
1119 0, addr_reg, (1 << s_bits) - 1);
811d4cf4
AZ
1120# if TARGET_LONG_BITS == 64
1121 /* XXX: possibly we could use a block data load or writeback in
1122 * the first access. */
1123 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 1124 offsetof(CPUState, tlb_table[0][0].addr_write)
811d4cf4
AZ
1125 + 4);
1126 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP,
1127 0, 1, addr_reg2, SHIFT_IMM_LSL(0));
1128# endif
1129 tcg_out_ld32_12(s, COND_EQ, 1, 0,
225b4376 1130 offsetof(CPUState, tlb_table[0][0].addend));
811d4cf4
AZ
1131
1132 switch (opc) {
1133 case 0:
1134 tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, 1);
1135 break;
1136 case 0 | 4:
1137 tcg_out_st8s_r(s, COND_EQ, data_reg, addr_reg, 1);
1138 break;
1139 case 1:
1140 tcg_out_st16u_r(s, COND_EQ, data_reg, addr_reg, 1);
1141 break;
1142 case 1 | 4:
1143 tcg_out_st16s_r(s, COND_EQ, data_reg, addr_reg, 1);
1144 break;
1145 case 2:
1146 default:
1147 tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, 1);
1148 break;
1149 case 3:
3979144c 1150 tcg_out_st32_rwb(s, COND_EQ, data_reg, 1, addr_reg);
811d4cf4
AZ
1151 tcg_out_st32_12(s, COND_EQ, data_reg2, 1, 4);
1152 break;
1153 }
1154
1155 label_ptr = (void *) s->code_ptr;
1156 tcg_out_b(s, COND_EQ, 8);
811d4cf4 1157
811d4cf4
AZ
1158 /* TODO: move this code to where the constants pool will be */
1159 if (addr_reg)
1160 tcg_out_dat_reg(s, cond, ARITH_MOV,
1161 0, 0, addr_reg, SHIFT_IMM_LSL(0));
1162# if TARGET_LONG_BITS == 32
1163 switch (opc) {
1164 case 0:
1165 tcg_out_dat_imm(s, cond, ARITH_AND, 1, data_reg, 0xff);
1166 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1167 break;
1168 case 1:
1169 tcg_out_dat_reg(s, cond, ARITH_MOV,
1170 1, 0, data_reg, SHIFT_IMM_LSL(16));
1171 tcg_out_dat_reg(s, cond, ARITH_MOV,
1172 1, 0, 1, SHIFT_IMM_LSR(16));
1173 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1174 break;
1175 case 2:
1176 if (data_reg != 1)
1177 tcg_out_dat_reg(s, cond, ARITH_MOV,
1178 1, 0, data_reg, SHIFT_IMM_LSL(0));
1179 tcg_out_dat_imm(s, cond, ARITH_MOV, 2, 0, mem_index);
1180 break;
1181 case 3:
1182 if (data_reg != 1)
1183 tcg_out_dat_reg(s, cond, ARITH_MOV,
1184 1, 0, data_reg, SHIFT_IMM_LSL(0));
1185 if (data_reg2 != 2)
1186 tcg_out_dat_reg(s, cond, ARITH_MOV,
1187 2, 0, data_reg2, SHIFT_IMM_LSL(0));
1188 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1189 break;
1190 }
1191# else
1192 if (addr_reg2 != 1)
1193 tcg_out_dat_reg(s, cond, ARITH_MOV,
1194 1, 0, addr_reg2, SHIFT_IMM_LSL(0));
1195 switch (opc) {
1196 case 0:
1197 tcg_out_dat_imm(s, cond, ARITH_AND, 2, data_reg, 0xff);
1198 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1199 break;
1200 case 1:
1201 tcg_out_dat_reg(s, cond, ARITH_MOV,
1202 2, 0, data_reg, SHIFT_IMM_LSL(16));
1203 tcg_out_dat_reg(s, cond, ARITH_MOV,
1204 2, 0, 2, SHIFT_IMM_LSR(16));
1205 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1206 break;
1207 case 2:
1208 if (data_reg != 2)
1209 tcg_out_dat_reg(s, cond, ARITH_MOV,
1210 2, 0, data_reg, SHIFT_IMM_LSL(0));
1211 tcg_out_dat_imm(s, cond, ARITH_MOV, 3, 0, mem_index);
1212 break;
1213 case 3:
91a3c1b0
AZ
1214 tcg_out_dat_imm(s, cond, ARITH_MOV, 8, 0, mem_index);
1215 tcg_out32(s, (cond << 28) | 0x052d8010); /* str r8, [sp, #-0x10]! */
811d4cf4
AZ
1216 if (data_reg != 2)
1217 tcg_out_dat_reg(s, cond, ARITH_MOV,
1218 2, 0, data_reg, SHIFT_IMM_LSL(0));
1219 if (data_reg2 != 3)
1220 tcg_out_dat_reg(s, cond, ARITH_MOV,
1221 3, 0, data_reg2, SHIFT_IMM_LSL(0));
1222 break;
1223 }
1224# endif
1225
91a3c1b0
AZ
1226# ifdef SAVE_LR
1227 tcg_out_dat_reg(s, cond, ARITH_MOV, 8, 0, 14, SHIFT_IMM_LSL(0));
1228# endif
1229
204c1674 1230 tcg_out_bl(s, cond, (tcg_target_long) qemu_st_helpers[s_bits] -
811d4cf4 1231 (tcg_target_long) s->code_ptr);
811d4cf4
AZ
1232# if TARGET_LONG_BITS == 64
1233 if (opc == 3)
1234 tcg_out_dat_imm(s, cond, ARITH_ADD, 13, 13, 0x10);
1235# endif
1236
1237# ifdef SAVE_LR
1238 tcg_out_dat_reg(s, cond, ARITH_MOV, 14, 0, 8, SHIFT_IMM_LSL(0));
1239# endif
1240
811d4cf4 1241 *label_ptr += ((void *) s->code_ptr - (void *) label_ptr - 8) >> 2;
379f6698
PB
1242#else /* !CONFIG_SOFTMMU */
1243 if (GUEST_BASE) {
1244 uint32_t offset = GUEST_BASE;
1245 int i;
1246 int rot;
1247
1248 while (offset) {
1249 i = ctz32(offset) & ~1;
1250 rot = ((32 - i) << 7) & 0xf00;
1251
1252 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, 8, addr_reg,
1253 ((offset >> i) & 0xff) | rot);
1254 addr_reg = 8;
1255 offset &= ~(0xff << i);
1256 }
1257 }
811d4cf4
AZ
1258 switch (opc) {
1259 case 0:
1260 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1261 break;
1262 case 0 | 4:
204c1674 1263 tcg_out_st8s_8(s, COND_AL, data_reg, addr_reg, 0);
811d4cf4
AZ
1264 break;
1265 case 1:
1266 tcg_out_st16u_8(s, COND_AL, data_reg, addr_reg, 0);
1267 break;
1268 case 1 | 4:
1269 tcg_out_st16s_8(s, COND_AL, data_reg, addr_reg, 0);
1270 break;
1271 case 2:
1272 default:
1273 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1274 break;
1275 case 3:
eae6ce52
AZ
1276 /* TODO: use block store -
1277 * check that data_reg2 > data_reg or the other way */
811d4cf4
AZ
1278 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1279 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1280 break;
1281 }
1282#endif
1283}
1284
811d4cf4
AZ
1285static uint8_t *tb_ret_addr;
1286
650bbb36 1287static inline void tcg_out_op(TCGContext *s, int opc,
811d4cf4
AZ
1288 const TCGArg *args, const int *const_args)
1289{
1290 int c;
1291
1292 switch (opc) {
1293 case INDEX_op_exit_tb:
1294#ifdef SAVE_LR
1295 if (args[0] >> 8)
1296 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1297 else
1298 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, TCG_REG_R0, 0, args[0]);
1299 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 15, 0, 14, SHIFT_IMM_LSL(0));
1300 if (args[0] >> 8)
1301 tcg_out32(s, args[0]);
1302#else
fe33867b
AZ
1303 {
1304 uint8_t *ld_ptr = s->code_ptr;
1305 if (args[0] >> 8)
1306 tcg_out_ld32_12(s, COND_AL, 0, 15, 0);
1307 else
1308 tcg_out_dat_imm(s, COND_AL, ARITH_MOV, 0, 0, args[0]);
1309 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1310 if (args[0] >> 8) {
1311 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1312 tcg_out32(s, args[0]);
1313 }
1314 }
811d4cf4
AZ
1315#endif
1316 break;
1317 case INDEX_op_goto_tb:
1318 if (s->tb_jmp_offset) {
1319 /* Direct jump method */
fe33867b 1320#if defined(USE_DIRECT_JUMP)
811d4cf4
AZ
1321 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1322 tcg_out_b(s, COND_AL, 8);
1323#else
1324 tcg_out_ld32_12(s, COND_AL, 15, 15, -4);
1325 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1326 tcg_out32(s, 0);
1327#endif
1328 } else {
1329 /* Indirect jump method */
1330#if 1
1331 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1332 if (c > 0xfff || c < -0xfff) {
1333 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1334 (tcg_target_long) (s->tb_next + args[0]));
1335 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1336 } else
1337 tcg_out_ld32_12(s, COND_AL, 15, 15, c);
1338#else
1339 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, 15, 0);
1340 tcg_out_ld32_12(s, COND_AL, 15, TCG_REG_R0, 0);
1341 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1342#endif
1343 }
1344 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1345 break;
1346 case INDEX_op_call:
1347 if (const_args[0])
1348 tcg_out_call(s, COND_AL, args[0]);
1349 else
1350 tcg_out_callr(s, COND_AL, args[0]);
1351 break;
1352 case INDEX_op_jmp:
1353 if (const_args[0])
1354 tcg_out_goto(s, COND_AL, args[0]);
1355 else
1356 tcg_out_bx(s, COND_AL, args[0]);
1357 break;
1358 case INDEX_op_br:
1359 tcg_out_goto_label(s, COND_AL, args[0]);
1360 break;
1361
1362 case INDEX_op_ld8u_i32:
1363 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1364 break;
1365 case INDEX_op_ld8s_i32:
1366 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1367 break;
1368 case INDEX_op_ld16u_i32:
1369 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1370 break;
1371 case INDEX_op_ld16s_i32:
1372 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1373 break;
1374 case INDEX_op_ld_i32:
1375 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1376 break;
1377 case INDEX_op_st8_i32:
1378 tcg_out_st8u(s, COND_AL, args[0], args[1], args[2]);
1379 break;
1380 case INDEX_op_st16_i32:
1381 tcg_out_st16u(s, COND_AL, args[0], args[1], args[2]);
1382 break;
1383 case INDEX_op_st_i32:
1384 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1385 break;
1386
1387 case INDEX_op_mov_i32:
1388 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1389 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1390 break;
1391 case INDEX_op_movi_i32:
1392 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1393 break;
1394 case INDEX_op_add_i32:
1395 c = ARITH_ADD;
1396 goto gen_arith;
1397 case INDEX_op_sub_i32:
1398 c = ARITH_SUB;
1399 goto gen_arith;
1400 case INDEX_op_and_i32:
1401 c = ARITH_AND;
1402 goto gen_arith;
1403 case INDEX_op_or_i32:
1404 c = ARITH_ORR;
1405 goto gen_arith;
1406 case INDEX_op_xor_i32:
1407 c = ARITH_EOR;
1408 /* Fall through. */
1409 gen_arith:
cb4e581f
LD
1410 if (const_args[2])
1411 tcg_out_dat_imm(s, COND_AL, c,
1412 args[0], args[1], args[2]);
1413 else
1414 tcg_out_dat_reg(s, COND_AL, c,
1415 args[0], args[1], args[2], SHIFT_IMM_LSL(0));
811d4cf4
AZ
1416 break;
1417 case INDEX_op_add2_i32:
1418 tcg_out_dat_reg2(s, COND_AL, ARITH_ADD, ARITH_ADC,
1419 args[0], args[1], args[2], args[3],
1420 args[4], args[5], SHIFT_IMM_LSL(0));
1421 break;
1422 case INDEX_op_sub2_i32:
1423 tcg_out_dat_reg2(s, COND_AL, ARITH_SUB, ARITH_SBC,
1424 args[0], args[1], args[2], args[3],
1425 args[4], args[5], SHIFT_IMM_LSL(0));
1426 break;
650bbb36
AZ
1427 case INDEX_op_neg_i32:
1428 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1429 break;
811d4cf4
AZ
1430 case INDEX_op_mul_i32:
1431 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1432 break;
1433 case INDEX_op_mulu2_i32:
1434 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1435 break;
1436 case INDEX_op_div2_i32:
1437 tcg_out_div_helper(s, COND_AL, args,
1438 tcg_helper_div_i64, tcg_helper_rem_i64,
1439 SHIFT_IMM_ASR(31));
1440 break;
1441 case INDEX_op_divu2_i32:
1442 tcg_out_div_helper(s, COND_AL, args,
1443 tcg_helper_divu_i64, tcg_helper_remu_i64,
1444 SHIFT_IMM_LSR(31));
1445 break;
1446 /* XXX: Perhaps args[2] & 0x1f is wrong */
1447 case INDEX_op_shl_i32:
1448 c = const_args[2] ?
1449 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1450 goto gen_shift32;
1451 case INDEX_op_shr_i32:
1452 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1453 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1454 goto gen_shift32;
1455 case INDEX_op_sar_i32:
1456 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1457 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1458 /* Fall through. */
1459 gen_shift32:
1460 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1461 break;
1462
1463 case INDEX_op_brcond_i32:
1464 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1465 args[0], args[1], SHIFT_IMM_LSL(0));
1466 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1467 break;
1468 case INDEX_op_brcond2_i32:
1469 /* The resulting conditions are:
1470 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1471 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1472 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1473 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1474 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1475 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1476 */
1477 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0,
1478 args[1], args[3], SHIFT_IMM_LSL(0));
1479 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1480 args[0], args[2], SHIFT_IMM_LSL(0));
1481 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1482 break;
1483
1484 case INDEX_op_qemu_ld8u:
1485 tcg_out_qemu_ld(s, COND_AL, args, 0);
1486 break;
1487 case INDEX_op_qemu_ld8s:
1488 tcg_out_qemu_ld(s, COND_AL, args, 0 | 4);
1489 break;
1490 case INDEX_op_qemu_ld16u:
1491 tcg_out_qemu_ld(s, COND_AL, args, 1);
1492 break;
1493 case INDEX_op_qemu_ld16s:
1494 tcg_out_qemu_ld(s, COND_AL, args, 1 | 4);
1495 break;
1496 case INDEX_op_qemu_ld32u:
1497 tcg_out_qemu_ld(s, COND_AL, args, 2);
1498 break;
1499 case INDEX_op_qemu_ld64:
1500 tcg_out_qemu_ld(s, COND_AL, args, 3);
1501 break;
650bbb36 1502
811d4cf4
AZ
1503 case INDEX_op_qemu_st8:
1504 tcg_out_qemu_st(s, COND_AL, args, 0);
1505 break;
1506 case INDEX_op_qemu_st16:
1507 tcg_out_qemu_st(s, COND_AL, args, 1);
1508 break;
1509 case INDEX_op_qemu_st32:
1510 tcg_out_qemu_st(s, COND_AL, args, 2);
1511 break;
1512 case INDEX_op_qemu_st64:
1513 tcg_out_qemu_st(s, COND_AL, args, 3);
1514 break;
1515
1516 case INDEX_op_ext8s_i32:
1517 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1518 args[0], 0, args[1], SHIFT_IMM_LSL(24));
1519 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1520 args[0], 0, args[0], SHIFT_IMM_ASR(24));
1521 break;
1522 case INDEX_op_ext16s_i32:
1523 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1524 args[0], 0, args[1], SHIFT_IMM_LSL(16));
1525 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1526 args[0], 0, args[0], SHIFT_IMM_ASR(16));
1527 break;
1528
1529 default:
1530 tcg_abort();
1531 }
1532}
1533
1534static const TCGTargetOpDef arm_op_defs[] = {
1535 { INDEX_op_exit_tb, { } },
1536 { INDEX_op_goto_tb, { } },
1537 { INDEX_op_call, { "ri" } },
1538 { INDEX_op_jmp, { "ri" } },
1539 { INDEX_op_br, { } },
1540
1541 { INDEX_op_mov_i32, { "r", "r" } },
1542 { INDEX_op_movi_i32, { "r" } },
1543
1544 { INDEX_op_ld8u_i32, { "r", "r" } },
1545 { INDEX_op_ld8s_i32, { "r", "r" } },
1546 { INDEX_op_ld16u_i32, { "r", "r" } },
1547 { INDEX_op_ld16s_i32, { "r", "r" } },
1548 { INDEX_op_ld_i32, { "r", "r" } },
1549 { INDEX_op_st8_i32, { "r", "r" } },
1550 { INDEX_op_st16_i32, { "r", "r" } },
1551 { INDEX_op_st_i32, { "r", "r" } },
1552
1553 /* TODO: "r", "r", "ri" */
cb4e581f
LD
1554 { INDEX_op_add_i32, { "r", "r", "rI" } },
1555 { INDEX_op_sub_i32, { "r", "r", "rI" } },
811d4cf4
AZ
1556 { INDEX_op_mul_i32, { "r", "r", "r" } },
1557 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1558 { INDEX_op_div2_i32, { "r", "r", "r", "1", "2" } },
1559 { INDEX_op_divu2_i32, { "r", "r", "r", "1", "2" } },
cb4e581f
LD
1560 { INDEX_op_and_i32, { "r", "r", "rI" } },
1561 { INDEX_op_or_i32, { "r", "r", "rI" } },
1562 { INDEX_op_xor_i32, { "r", "r", "rI" } },
650bbb36 1563 { INDEX_op_neg_i32, { "r", "r" } },
811d4cf4
AZ
1564
1565 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1566 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1567 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1568
1569 { INDEX_op_brcond_i32, { "r", "r" } },
1570
1571 /* TODO: "r", "r", "r", "r", "ri", "ri" */
1572 { INDEX_op_add2_i32, { "r", "r", "r", "r", "r", "r" } },
1573 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "r", "r" } },
1574 { INDEX_op_brcond2_i32, { "r", "r", "r", "r" } },
1575
1576 { INDEX_op_qemu_ld8u, { "r", "x", "X" } },
1577 { INDEX_op_qemu_ld8s, { "r", "x", "X" } },
1578 { INDEX_op_qemu_ld16u, { "r", "x", "X" } },
1579 { INDEX_op_qemu_ld16s, { "r", "x", "X" } },
1580 { INDEX_op_qemu_ld32u, { "r", "x", "X" } },
d0660ed4 1581 { INDEX_op_qemu_ld64, { "d", "r", "x", "X" } },
811d4cf4 1582
3979144c
PB
1583 { INDEX_op_qemu_st8, { "x", "x", "X" } },
1584 { INDEX_op_qemu_st16, { "x", "x", "X" } },
1585 { INDEX_op_qemu_st32, { "x", "x", "X" } },
1586 { INDEX_op_qemu_st64, { "x", "D", "x", "X" } },
811d4cf4
AZ
1587
1588 { INDEX_op_ext8s_i32, { "r", "r" } },
1589 { INDEX_op_ext16s_i32, { "r", "r" } },
1590
1591 { -1 },
1592};
1593
1594void tcg_target_init(TCGContext *s)
1595{
1596 /* fail safe */
1597 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry))
1598 tcg_abort();
1599
1600 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
1601 ((2 << TCG_REG_R14) - 1) & ~(1 << TCG_REG_R8));
1602 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1603 ((2 << TCG_REG_R3) - 1) |
1604 (1 << TCG_REG_R12) | (1 << TCG_REG_R14));
1605
1606 tcg_regset_clear(s->reserved_regs);
1607#ifdef SAVE_LR
1608 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R14);
1609#endif
1610 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
1611 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R8);
1612
1613 tcg_add_target_add_op_defs(arm_op_defs);
1614}
1615
1616static inline void tcg_out_ld(TCGContext *s, TCGType type, int arg,
1617 int arg1, tcg_target_long arg2)
1618{
1619 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
1620}
1621
1622static inline void tcg_out_st(TCGContext *s, TCGType type, int arg,
1623 int arg1, tcg_target_long arg2)
1624{
1625 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
1626}
1627
2d69f359 1628static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
811d4cf4
AZ
1629{
1630 if (val > 0)
1631 if (val < 0x100)
1632 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, reg, reg, val);
1633 else
1634 tcg_abort();
1635 else if (val < 0) {
1636 if (val > -0x100)
1637 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, reg, reg, -val);
1638 else
1639 tcg_abort();
1640 }
1641}
1642
1643static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
1644{
1645 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
1646}
1647
1648static inline void tcg_out_movi(TCGContext *s, TCGType type,
1649 int ret, tcg_target_long arg)
1650{
1651 tcg_out_movi32(s, COND_AL, ret, arg);
1652}
1653
1654void tcg_target_qemu_prologue(TCGContext *s)
1655{
1656 /* stmdb sp!, { r9 - r11, lr } */
1657 tcg_out32(s, (COND_AL << 28) | 0x092d4e00);
1658
1659 tcg_out_bx(s, COND_AL, TCG_REG_R0);
1660 tb_ret_addr = s->code_ptr;
1661
1662 /* ldmia sp!, { r9 - r11, pc } */
1663 tcg_out32(s, (COND_AL << 28) | 0x08bd8e00);
1664}