]> git.proxmox.com Git - qemu.git/blob - tcg/arm/tcg-target.c
Merge remote-tracking branch 'afaerber/tags/qom-cpu-for-anthony' into staging
[qemu.git] / tcg / arm / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
26 #ifndef __ARM_ARCH
27 # if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
28 || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
29 || defined(__ARM_ARCH_7EM__)
30 # define __ARM_ARCH 7
31 # elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
32 || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
33 || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
34 # define __ARM_ARCH 6
35 # elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
36 || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
37 || defined(__ARM_ARCH_5TEJ__)
38 # define __ARM_ARCH 5
39 # else
40 # define __ARM_ARCH 4
41 # endif
42 #endif
43
44 static int arm_arch = __ARM_ARCH;
45
46 #if defined(__ARM_ARCH_5T__) \
47 || defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
48 # define use_armv5t_instructions 1
49 #else
50 # define use_armv5t_instructions use_armv6_instructions
51 #endif
52
53 #define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
54 #define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
55
56 #ifndef use_idiv_instructions
57 bool use_idiv_instructions;
58 #endif
59 #ifdef CONFIG_GETAUXVAL
60 # include <sys/auxv.h>
61 #endif
62
63 #ifndef NDEBUG
64 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
65 "%r0",
66 "%r1",
67 "%r2",
68 "%r3",
69 "%r4",
70 "%r5",
71 "%r6",
72 "%r7",
73 "%r8",
74 "%r9",
75 "%r10",
76 "%r11",
77 "%r12",
78 "%r13",
79 "%r14",
80 "%pc",
81 };
82 #endif
83
84 static const int tcg_target_reg_alloc_order[] = {
85 TCG_REG_R4,
86 TCG_REG_R5,
87 TCG_REG_R6,
88 TCG_REG_R7,
89 TCG_REG_R8,
90 TCG_REG_R9,
91 TCG_REG_R10,
92 TCG_REG_R11,
93 TCG_REG_R13,
94 TCG_REG_R0,
95 TCG_REG_R1,
96 TCG_REG_R2,
97 TCG_REG_R3,
98 TCG_REG_R12,
99 TCG_REG_R14,
100 };
101
102 static const int tcg_target_call_iarg_regs[4] = {
103 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
104 };
105 static const int tcg_target_call_oarg_regs[2] = {
106 TCG_REG_R0, TCG_REG_R1
107 };
108
109 #define TCG_REG_TMP TCG_REG_R12
110
111 static inline void reloc_abs32(void *code_ptr, intptr_t target)
112 {
113 *(uint32_t *) code_ptr = target;
114 }
115
116 static inline void reloc_pc24(void *code_ptr, intptr_t target)
117 {
118 uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
119
120 *(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
121 | (offset & 0xffffff);
122 }
123
124 static void patch_reloc(uint8_t *code_ptr, int type,
125 intptr_t value, intptr_t addend)
126 {
127 switch (type) {
128 case R_ARM_ABS32:
129 reloc_abs32(code_ptr, value);
130 break;
131
132 case R_ARM_CALL:
133 case R_ARM_JUMP24:
134 default:
135 tcg_abort();
136
137 case R_ARM_PC24:
138 reloc_pc24(code_ptr, value);
139 break;
140 }
141 }
142
143 #define TCG_CT_CONST_ARM 0x100
144 #define TCG_CT_CONST_INV 0x200
145 #define TCG_CT_CONST_NEG 0x400
146 #define TCG_CT_CONST_ZERO 0x800
147
148 /* parse target specific constraints */
149 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
150 {
151 const char *ct_str;
152
153 ct_str = *pct_str;
154 switch (ct_str[0]) {
155 case 'I':
156 ct->ct |= TCG_CT_CONST_ARM;
157 break;
158 case 'K':
159 ct->ct |= TCG_CT_CONST_INV;
160 break;
161 case 'N': /* The gcc constraint letter is L, already used here. */
162 ct->ct |= TCG_CT_CONST_NEG;
163 break;
164 case 'Z':
165 ct->ct |= TCG_CT_CONST_ZERO;
166 break;
167
168 case 'r':
169 ct->ct |= TCG_CT_REG;
170 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
171 break;
172
173 /* qemu_ld address */
174 case 'l':
175 ct->ct |= TCG_CT_REG;
176 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
177 #ifdef CONFIG_SOFTMMU
178 /* r0-r2 will be overwritten when reading the tlb entry,
179 so don't use these. */
180 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
181 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
182 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
183 #endif
184 break;
185 case 'L':
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
188 #ifdef CONFIG_SOFTMMU
189 /* r1 is still needed to load data_reg or data_reg2,
190 so don't use it. */
191 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
192 #endif
193 break;
194
195 /* qemu_st address & data_reg */
196 case 's':
197 ct->ct |= TCG_CT_REG;
198 tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
199 /* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
200 and r0-r1 doing the byte swapping, so don't use these. */
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
202 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
203 #if defined(CONFIG_SOFTMMU)
204 /* Avoid clashes with registers being used for helper args */
205 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
206 #if TARGET_LONG_BITS == 64
207 /* Avoid clashes with registers being used for helper args */
208 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
209 #endif
210 #endif
211 break;
212
213 default:
214 return -1;
215 }
216 ct_str++;
217 *pct_str = ct_str;
218
219 return 0;
220 }
221
222 static inline uint32_t rotl(uint32_t val, int n)
223 {
224 return (val << n) | (val >> (32 - n));
225 }
226
227 /* ARM immediates for ALU instructions are made of an unsigned 8-bit
228 right-rotated by an even amount between 0 and 30. */
229 static inline int encode_imm(uint32_t imm)
230 {
231 int shift;
232
233 /* simple case, only lower bits */
234 if ((imm & ~0xff) == 0)
235 return 0;
236 /* then try a simple even shift */
237 shift = ctz32(imm) & ~1;
238 if (((imm >> shift) & ~0xff) == 0)
239 return 32 - shift;
240 /* now try harder with rotations */
241 if ((rotl(imm, 2) & ~0xff) == 0)
242 return 2;
243 if ((rotl(imm, 4) & ~0xff) == 0)
244 return 4;
245 if ((rotl(imm, 6) & ~0xff) == 0)
246 return 6;
247 /* imm can't be encoded */
248 return -1;
249 }
250
251 static inline int check_fit_imm(uint32_t imm)
252 {
253 return encode_imm(imm) >= 0;
254 }
255
256 /* Test if a constant matches the constraint.
257 * TODO: define constraints for:
258 *
259 * ldr/str offset: between -0xfff and 0xfff
260 * ldrh/strh offset: between -0xff and 0xff
261 * mov operand2: values represented with x << (2 * y), x < 0x100
262 * add, sub, eor...: ditto
263 */
264 static inline int tcg_target_const_match(tcg_target_long val,
265 const TCGArgConstraint *arg_ct)
266 {
267 int ct;
268 ct = arg_ct->ct;
269 if (ct & TCG_CT_CONST) {
270 return 1;
271 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
272 return 1;
273 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
274 return 1;
275 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
276 return 1;
277 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
278 return 1;
279 } else {
280 return 0;
281 }
282 }
283
284 #define TO_CPSR (1 << 20)
285
286 typedef enum {
287 ARITH_AND = 0x0 << 21,
288 ARITH_EOR = 0x1 << 21,
289 ARITH_SUB = 0x2 << 21,
290 ARITH_RSB = 0x3 << 21,
291 ARITH_ADD = 0x4 << 21,
292 ARITH_ADC = 0x5 << 21,
293 ARITH_SBC = 0x6 << 21,
294 ARITH_RSC = 0x7 << 21,
295 ARITH_TST = 0x8 << 21 | TO_CPSR,
296 ARITH_CMP = 0xa << 21 | TO_CPSR,
297 ARITH_CMN = 0xb << 21 | TO_CPSR,
298 ARITH_ORR = 0xc << 21,
299 ARITH_MOV = 0xd << 21,
300 ARITH_BIC = 0xe << 21,
301 ARITH_MVN = 0xf << 21,
302
303 INSN_LDR_IMM = 0x04100000,
304 INSN_LDR_REG = 0x06100000,
305 INSN_STR_IMM = 0x04000000,
306 INSN_STR_REG = 0x06000000,
307
308 INSN_LDRH_IMM = 0x005000b0,
309 INSN_LDRH_REG = 0x001000b0,
310 INSN_LDRSH_IMM = 0x005000f0,
311 INSN_LDRSH_REG = 0x001000f0,
312 INSN_STRH_IMM = 0x004000b0,
313 INSN_STRH_REG = 0x000000b0,
314
315 INSN_LDRB_IMM = 0x04500000,
316 INSN_LDRB_REG = 0x06500000,
317 INSN_LDRSB_IMM = 0x005000d0,
318 INSN_LDRSB_REG = 0x001000d0,
319 INSN_STRB_IMM = 0x04400000,
320 INSN_STRB_REG = 0x06400000,
321
322 INSN_LDRD_IMM = 0x004000d0,
323 } ARMInsn;
324
325 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
326 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
327 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
328 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
329 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
330 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
331 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
332 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
333
334 enum arm_cond_code_e {
335 COND_EQ = 0x0,
336 COND_NE = 0x1,
337 COND_CS = 0x2, /* Unsigned greater or equal */
338 COND_CC = 0x3, /* Unsigned less than */
339 COND_MI = 0x4, /* Negative */
340 COND_PL = 0x5, /* Zero or greater */
341 COND_VS = 0x6, /* Overflow */
342 COND_VC = 0x7, /* No overflow */
343 COND_HI = 0x8, /* Unsigned greater than */
344 COND_LS = 0x9, /* Unsigned less or equal */
345 COND_GE = 0xa,
346 COND_LT = 0xb,
347 COND_GT = 0xc,
348 COND_LE = 0xd,
349 COND_AL = 0xe,
350 };
351
352 static const uint8_t tcg_cond_to_arm_cond[] = {
353 [TCG_COND_EQ] = COND_EQ,
354 [TCG_COND_NE] = COND_NE,
355 [TCG_COND_LT] = COND_LT,
356 [TCG_COND_GE] = COND_GE,
357 [TCG_COND_LE] = COND_LE,
358 [TCG_COND_GT] = COND_GT,
359 /* unsigned */
360 [TCG_COND_LTU] = COND_CC,
361 [TCG_COND_GEU] = COND_CS,
362 [TCG_COND_LEU] = COND_LS,
363 [TCG_COND_GTU] = COND_HI,
364 };
365
366 static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
367 {
368 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
369 }
370
371 static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
372 {
373 tcg_out32(s, (cond << 28) | 0x0a000000 |
374 (((offset - 8) >> 2) & 0x00ffffff));
375 }
376
377 static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
378 {
379 /* We pay attention here to not modify the branch target by skipping
380 the corresponding bytes. This ensure that caches and memory are
381 kept coherent during retranslation. */
382 #ifdef HOST_WORDS_BIGENDIAN
383 tcg_out8(s, (cond << 4) | 0x0a);
384 s->code_ptr += 3;
385 #else
386 s->code_ptr += 3;
387 tcg_out8(s, (cond << 4) | 0x0a);
388 #endif
389 }
390
391 static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
392 {
393 tcg_out32(s, (cond << 28) | 0x0b000000 |
394 (((offset - 8) >> 2) & 0x00ffffff));
395 }
396
397 static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
398 {
399 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
400 }
401
402 static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
403 {
404 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
405 (((offset - 8) >> 2) & 0x00ffffff));
406 }
407
408 static inline void tcg_out_dat_reg(TCGContext *s,
409 int cond, int opc, int rd, int rn, int rm, int shift)
410 {
411 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
412 (rn << 16) | (rd << 12) | shift | rm);
413 }
414
415 static inline void tcg_out_nop(TCGContext *s)
416 {
417 if (use_armv7_instructions) {
418 /* Architected nop introduced in v6k. */
419 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
420 also Just So Happened to do nothing on pre-v6k so that we
421 don't need to conditionalize it? */
422 tcg_out32(s, 0xe320f000);
423 } else {
424 /* Prior to that the assembler uses mov r0, r0. */
425 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0));
426 }
427 }
428
429 static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
430 {
431 /* Simple reg-reg move, optimising out the 'do nothing' case */
432 if (rd != rm) {
433 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
434 }
435 }
436
437 static inline void tcg_out_dat_imm(TCGContext *s,
438 int cond, int opc, int rd, int rn, int im)
439 {
440 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
441 (rn << 16) | (rd << 12) | im);
442 }
443
444 static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
445 {
446 int rot, opc, rn;
447
448 /* For armv7, make sure not to use movw+movt when mov/mvn would do.
449 Speed things up by only checking when movt would be required.
450 Prior to armv7, have one go at fully rotated immediates before
451 doing the decomposition thing below. */
452 if (!use_armv7_instructions || (arg & 0xffff0000)) {
453 rot = encode_imm(arg);
454 if (rot >= 0) {
455 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
456 rotl(arg, rot) | (rot << 7));
457 return;
458 }
459 rot = encode_imm(~arg);
460 if (rot >= 0) {
461 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
462 rotl(~arg, rot) | (rot << 7));
463 return;
464 }
465 }
466
467 /* Use movw + movt. */
468 if (use_armv7_instructions) {
469 /* movw */
470 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
471 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
472 if (arg & 0xffff0000) {
473 /* movt */
474 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
475 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
476 }
477 return;
478 }
479
480 /* TODO: This is very suboptimal, we can easily have a constant
481 pool somewhere after all the instructions. */
482 opc = ARITH_MOV;
483 rn = 0;
484 /* If we have lots of leading 1's, we can shorten the sequence by
485 beginning with mvn and then clearing higher bits with eor. */
486 if (clz32(~arg) > clz32(arg)) {
487 opc = ARITH_MVN, arg = ~arg;
488 }
489 do {
490 int i = ctz32(arg) & ~1;
491 rot = ((32 - i) << 7) & 0xf00;
492 tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
493 arg &= ~(0xff << i);
494
495 opc = ARITH_EOR;
496 rn = rd;
497 } while (arg);
498 }
499
500 static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
501 TCGArg lhs, TCGArg rhs, int rhs_is_const)
502 {
503 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
504 * rhs must satisfy the "rI" constraint.
505 */
506 if (rhs_is_const) {
507 int rot = encode_imm(rhs);
508 assert(rot >= 0);
509 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
510 } else {
511 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
512 }
513 }
514
515 static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
516 TCGReg dst, TCGReg lhs, TCGArg rhs,
517 bool rhs_is_const)
518 {
519 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
520 * rhs must satisfy the "rIK" constraint.
521 */
522 if (rhs_is_const) {
523 int rot = encode_imm(rhs);
524 if (rot < 0) {
525 rhs = ~rhs;
526 rot = encode_imm(rhs);
527 assert(rot >= 0);
528 opc = opinv;
529 }
530 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
531 } else {
532 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
533 }
534 }
535
536 static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
537 TCGArg dst, TCGArg lhs, TCGArg rhs,
538 bool rhs_is_const)
539 {
540 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
541 * rhs must satisfy the "rIN" constraint.
542 */
543 if (rhs_is_const) {
544 int rot = encode_imm(rhs);
545 if (rot < 0) {
546 rhs = -rhs;
547 rot = encode_imm(rhs);
548 assert(rot >= 0);
549 opc = opneg;
550 }
551 tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
552 } else {
553 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
554 }
555 }
556
557 static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
558 TCGReg rn, TCGReg rm)
559 {
560 /* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
561 if (!use_armv6_instructions && rd == rn) {
562 if (rd == rm) {
563 /* rd == rn == rm; copy an input to tmp first. */
564 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
565 rm = rn = TCG_REG_TMP;
566 } else {
567 rn = rm;
568 rm = rd;
569 }
570 }
571 /* mul */
572 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
573 }
574
575 static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
576 TCGReg rd1, TCGReg rn, TCGReg rm)
577 {
578 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
579 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
580 if (rd0 == rm || rd1 == rm) {
581 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
582 rn = TCG_REG_TMP;
583 } else {
584 TCGReg t = rn;
585 rn = rm;
586 rm = t;
587 }
588 }
589 /* umull */
590 tcg_out32(s, (cond << 28) | 0x00800090 |
591 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
592 }
593
594 static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
595 TCGReg rd1, TCGReg rn, TCGReg rm)
596 {
597 /* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
598 if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
599 if (rd0 == rm || rd1 == rm) {
600 tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
601 rn = TCG_REG_TMP;
602 } else {
603 TCGReg t = rn;
604 rn = rm;
605 rm = t;
606 }
607 }
608 /* smull */
609 tcg_out32(s, (cond << 28) | 0x00c00090 |
610 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
611 }
612
613 static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
614 {
615 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
616 }
617
618 static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
619 {
620 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
621 }
622
623 static inline void tcg_out_ext8s(TCGContext *s, int cond,
624 int rd, int rn)
625 {
626 if (use_armv6_instructions) {
627 /* sxtb */
628 tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
629 } else {
630 tcg_out_dat_reg(s, cond, ARITH_MOV,
631 rd, 0, rn, SHIFT_IMM_LSL(24));
632 tcg_out_dat_reg(s, cond, ARITH_MOV,
633 rd, 0, rd, SHIFT_IMM_ASR(24));
634 }
635 }
636
637 static inline void tcg_out_ext8u(TCGContext *s, int cond,
638 int rd, int rn)
639 {
640 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
641 }
642
643 static inline void tcg_out_ext16s(TCGContext *s, int cond,
644 int rd, int rn)
645 {
646 if (use_armv6_instructions) {
647 /* sxth */
648 tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
649 } else {
650 tcg_out_dat_reg(s, cond, ARITH_MOV,
651 rd, 0, rn, SHIFT_IMM_LSL(16));
652 tcg_out_dat_reg(s, cond, ARITH_MOV,
653 rd, 0, rd, SHIFT_IMM_ASR(16));
654 }
655 }
656
657 static inline void tcg_out_ext16u(TCGContext *s, int cond,
658 int rd, int rn)
659 {
660 if (use_armv6_instructions) {
661 /* uxth */
662 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
663 } else {
664 tcg_out_dat_reg(s, cond, ARITH_MOV,
665 rd, 0, rn, SHIFT_IMM_LSL(16));
666 tcg_out_dat_reg(s, cond, ARITH_MOV,
667 rd, 0, rd, SHIFT_IMM_LSR(16));
668 }
669 }
670
671 static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
672 {
673 if (use_armv6_instructions) {
674 /* revsh */
675 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
676 } else {
677 tcg_out_dat_reg(s, cond, ARITH_MOV,
678 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
679 tcg_out_dat_reg(s, cond, ARITH_MOV,
680 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
681 tcg_out_dat_reg(s, cond, ARITH_ORR,
682 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
683 }
684 }
685
686 static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
687 {
688 if (use_armv6_instructions) {
689 /* rev16 */
690 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
691 } else {
692 tcg_out_dat_reg(s, cond, ARITH_MOV,
693 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
694 tcg_out_dat_reg(s, cond, ARITH_MOV,
695 TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
696 tcg_out_dat_reg(s, cond, ARITH_ORR,
697 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
698 }
699 }
700
701 /* swap the two low bytes assuming that the two high input bytes and the
702 two high output bit can hold any value. */
703 static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
704 {
705 if (use_armv6_instructions) {
706 /* rev16 */
707 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
708 } else {
709 tcg_out_dat_reg(s, cond, ARITH_MOV,
710 TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
711 tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
712 tcg_out_dat_reg(s, cond, ARITH_ORR,
713 rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
714 }
715 }
716
717 static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
718 {
719 if (use_armv6_instructions) {
720 /* rev */
721 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
722 } else {
723 tcg_out_dat_reg(s, cond, ARITH_EOR,
724 TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
725 tcg_out_dat_imm(s, cond, ARITH_BIC,
726 TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
727 tcg_out_dat_reg(s, cond, ARITH_MOV,
728 rd, 0, rn, SHIFT_IMM_ROR(8));
729 tcg_out_dat_reg(s, cond, ARITH_EOR,
730 rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
731 }
732 }
733
734 bool tcg_target_deposit_valid(int ofs, int len)
735 {
736 /* ??? Without bfi, we could improve over generic code by combining
737 the right-shift from a non-zero ofs with the orr. We do run into
738 problems when rd == rs, and the mask generated from ofs+len doesn't
739 fit into an immediate. We would have to be careful not to pessimize
740 wrt the optimizations performed on the expanded code. */
741 return use_armv7_instructions;
742 }
743
744 static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
745 TCGArg a1, int ofs, int len, bool const_a1)
746 {
747 if (const_a1) {
748 /* bfi becomes bfc with rn == 15. */
749 a1 = 15;
750 }
751 /* bfi/bfc */
752 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
753 | (ofs << 7) | ((ofs + len - 1) << 16));
754 }
755
756 /* Note that this routine is used for both LDR and LDRH formats, so we do
757 not wish to include an immediate shift at this point. */
758 static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
759 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
760 {
761 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
762 | (w << 21) | (rn << 16) | (rt << 12) | rm);
763 }
764
765 static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
766 TCGReg rn, int imm8, bool p, bool w)
767 {
768 bool u = 1;
769 if (imm8 < 0) {
770 imm8 = -imm8;
771 u = 0;
772 }
773 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
774 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
775 }
776
777 static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
778 TCGReg rn, int imm12, bool p, bool w)
779 {
780 bool u = 1;
781 if (imm12 < 0) {
782 imm12 = -imm12;
783 u = 0;
784 }
785 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
786 (rn << 16) | (rt << 12) | imm12);
787 }
788
789 static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
790 TCGReg rn, int imm12)
791 {
792 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
793 }
794
795 static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
796 TCGReg rn, int imm12)
797 {
798 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
799 }
800
801 static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
802 TCGReg rn, TCGReg rm)
803 {
804 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
805 }
806
807 static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
808 TCGReg rn, TCGReg rm)
809 {
810 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
811 }
812
813 /* Register pre-increment with base writeback. */
814 static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
815 TCGReg rn, TCGReg rm)
816 {
817 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
818 }
819
820 static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
821 TCGReg rn, TCGReg rm)
822 {
823 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
824 }
825
826 static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
827 TCGReg rn, int imm8)
828 {
829 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
830 }
831
832 static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
833 TCGReg rn, int imm8)
834 {
835 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
836 }
837
838 static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
839 TCGReg rn, TCGReg rm)
840 {
841 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
842 }
843
844 static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
845 TCGReg rn, TCGReg rm)
846 {
847 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
848 }
849
850 static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
851 TCGReg rn, int imm8)
852 {
853 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
854 }
855
856 static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
857 TCGReg rn, TCGReg rm)
858 {
859 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
860 }
861
862 static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
863 TCGReg rn, int imm12)
864 {
865 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
866 }
867
868 static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
869 TCGReg rn, int imm12)
870 {
871 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
872 }
873
874 static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
875 TCGReg rn, TCGReg rm)
876 {
877 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
878 }
879
880 static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
881 TCGReg rn, TCGReg rm)
882 {
883 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
884 }
885
886 static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
887 TCGReg rn, int imm8)
888 {
889 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
890 }
891
892 static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
893 TCGReg rn, TCGReg rm)
894 {
895 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
896 }
897
898 static inline void tcg_out_ld32u(TCGContext *s, int cond,
899 int rd, int rn, int32_t offset)
900 {
901 if (offset > 0xfff || offset < -0xfff) {
902 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
903 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
904 } else
905 tcg_out_ld32_12(s, cond, rd, rn, offset);
906 }
907
908 static inline void tcg_out_st32(TCGContext *s, int cond,
909 int rd, int rn, int32_t offset)
910 {
911 if (offset > 0xfff || offset < -0xfff) {
912 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
913 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
914 } else
915 tcg_out_st32_12(s, cond, rd, rn, offset);
916 }
917
918 static inline void tcg_out_ld16u(TCGContext *s, int cond,
919 int rd, int rn, int32_t offset)
920 {
921 if (offset > 0xff || offset < -0xff) {
922 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
923 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
924 } else
925 tcg_out_ld16u_8(s, cond, rd, rn, offset);
926 }
927
928 static inline void tcg_out_ld16s(TCGContext *s, int cond,
929 int rd, int rn, int32_t offset)
930 {
931 if (offset > 0xff || offset < -0xff) {
932 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
933 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
934 } else
935 tcg_out_ld16s_8(s, cond, rd, rn, offset);
936 }
937
938 static inline void tcg_out_st16(TCGContext *s, int cond,
939 int rd, int rn, int32_t offset)
940 {
941 if (offset > 0xff || offset < -0xff) {
942 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
943 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
944 } else
945 tcg_out_st16_8(s, cond, rd, rn, offset);
946 }
947
948 static inline void tcg_out_ld8u(TCGContext *s, int cond,
949 int rd, int rn, int32_t offset)
950 {
951 if (offset > 0xfff || offset < -0xfff) {
952 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
953 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
954 } else
955 tcg_out_ld8_12(s, cond, rd, rn, offset);
956 }
957
958 static inline void tcg_out_ld8s(TCGContext *s, int cond,
959 int rd, int rn, int32_t offset)
960 {
961 if (offset > 0xff || offset < -0xff) {
962 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
963 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
964 } else
965 tcg_out_ld8s_8(s, cond, rd, rn, offset);
966 }
967
968 static inline void tcg_out_st8(TCGContext *s, int cond,
969 int rd, int rn, int32_t offset)
970 {
971 if (offset > 0xfff || offset < -0xfff) {
972 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
973 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
974 } else
975 tcg_out_st8_12(s, cond, rd, rn, offset);
976 }
977
978 /* The _goto case is normally between TBs within the same code buffer,
979 * and with the code buffer limited to 16MB we shouldn't need the long
980 * case.
981 *
982 * .... except to the prologue that is in its own buffer.
983 */
984 static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
985 {
986 int32_t val;
987
988 if (addr & 1) {
989 /* goto to a Thumb destination isn't supported */
990 tcg_abort();
991 }
992
993 val = addr - (tcg_target_long) s->code_ptr;
994 if (val - 8 < 0x01fffffd && val - 8 > -0x01fffffd)
995 tcg_out_b(s, cond, val);
996 else {
997 if (cond == COND_AL) {
998 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
999 tcg_out32(s, addr);
1000 } else {
1001 tcg_out_movi32(s, cond, TCG_REG_TMP, val - 8);
1002 tcg_out_dat_reg(s, cond, ARITH_ADD,
1003 TCG_REG_PC, TCG_REG_PC,
1004 TCG_REG_TMP, SHIFT_IMM_LSL(0));
1005 }
1006 }
1007 }
1008
1009 /* The call case is mostly used for helpers - so it's not unreasonable
1010 * for them to be beyond branch range */
1011 static inline void tcg_out_call(TCGContext *s, uint32_t addr)
1012 {
1013 int32_t val;
1014
1015 val = addr - (tcg_target_long) s->code_ptr;
1016 if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
1017 if (addr & 1) {
1018 /* Use BLX if the target is in Thumb mode */
1019 if (!use_armv5t_instructions) {
1020 tcg_abort();
1021 }
1022 tcg_out_blx_imm(s, val);
1023 } else {
1024 tcg_out_bl(s, COND_AL, val);
1025 }
1026 } else if (use_armv7_instructions) {
1027 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
1028 tcg_out_blx(s, COND_AL, TCG_REG_TMP);
1029 } else {
1030 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
1031 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1032 tcg_out32(s, addr);
1033 }
1034 }
1035
1036 static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
1037 {
1038 if (use_armv5t_instructions) {
1039 tcg_out_blx(s, cond, arg);
1040 } else {
1041 tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
1042 TCG_REG_PC, SHIFT_IMM_LSL(0));
1043 tcg_out_bx(s, cond, arg);
1044 }
1045 }
1046
1047 static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
1048 {
1049 TCGLabel *l = &s->labels[label_index];
1050
1051 if (l->has_value) {
1052 tcg_out_goto(s, cond, l->u.value);
1053 } else {
1054 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
1055 tcg_out_b_noaddr(s, cond);
1056 }
1057 }
1058
1059 #ifdef CONFIG_SOFTMMU
1060
1061 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1062 int mmu_idx) */
1063 static const void * const qemu_ld_helpers[4] = {
1064 helper_ldb_mmu,
1065 helper_ldw_mmu,
1066 helper_ldl_mmu,
1067 helper_ldq_mmu,
1068 };
1069
1070 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1071 uintxx_t val, int mmu_idx) */
1072 static const void * const qemu_st_helpers[4] = {
1073 helper_stb_mmu,
1074 helper_stw_mmu,
1075 helper_stl_mmu,
1076 helper_stq_mmu,
1077 };
1078
1079 /* Helper routines for marshalling helper function arguments into
1080 * the correct registers and stack.
1081 * argreg is where we want to put this argument, arg is the argument itself.
1082 * Return value is the updated argreg ready for the next call.
1083 * Note that argreg 0..3 is real registers, 4+ on stack.
1084 *
1085 * We provide routines for arguments which are: immediate, 32 bit
1086 * value in register, 16 and 8 bit values in register (which must be zero
1087 * extended before use) and 64 bit value in a lo:hi register pair.
1088 */
1089 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1090 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1091 { \
1092 if (argreg < 4) { \
1093 MOV_ARG(s, COND_AL, argreg, arg); \
1094 } else { \
1095 int ofs = (argreg - 4) * 4; \
1096 EXT_ARG; \
1097 assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1098 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1099 } \
1100 return argreg + 1; \
1101 }
1102
1103 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1104 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1105 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
1106 (tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1107 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
1108 (tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1109 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1110
1111 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1112 TCGReg arglo, TCGReg arghi)
1113 {
1114 /* 64 bit arguments must go in even/odd register pairs
1115 * and in 8-aligned stack slots.
1116 */
1117 if (argreg & 1) {
1118 argreg++;
1119 }
1120 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1121 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1122 return argreg;
1123 }
1124
1125 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1126
1127 /* Load and compare a TLB entry, leaving the flags set. Leaves R2 pointing
1128 to the tlb entry. Clobbers R1 and TMP. */
1129
1130 static void tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1131 int s_bits, int tlb_offset)
1132 {
1133 TCGReg base = TCG_AREG0;
1134
1135 /* Should generate something like the following:
1136 * pre-v7:
1137 * shr tmp, addr_reg, #TARGET_PAGE_BITS (1)
1138 * add r2, env, #off & 0xff00
1139 * and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
1140 * add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
1141 * ldr r0, [r2, #off & 0xff]! (4)
1142 * tst addr_reg, #s_mask
1143 * cmpeq r0, tmp, lsl #TARGET_PAGE_BITS (5)
1144 *
1145 * v7 (not implemented yet):
1146 * ubfx r2, addr_reg, #TARGET_PAGE_BITS, #CPU_TLB_BITS (1)
1147 * movw tmp, #~TARGET_PAGE_MASK & ~s_mask
1148 * movw r0, #off
1149 * add r2, env, r2, lsl #CPU_TLB_ENTRY_BITS (2)
1150 * bic tmp, addr_reg, tmp
1151 * ldr r0, [r2, r0]! (3)
1152 * cmp r0, tmp (4)
1153 */
1154 # if CPU_TLB_BITS > 8
1155 # error
1156 # endif
1157 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
1158 0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1159
1160 /* We assume that the offset is contained within 16 bits. */
1161 assert((tlb_offset & ~0xffff) == 0);
1162 if (tlb_offset > 0xff) {
1163 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1164 (24 << 7) | (tlb_offset >> 8));
1165 tlb_offset &= 0xff;
1166 base = TCG_REG_R2;
1167 }
1168
1169 tcg_out_dat_imm(s, COND_AL, ARITH_AND,
1170 TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
1171 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
1172 TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
1173
1174 /* Load the tlb comparator. Use ldrd if needed and available,
1175 but due to how the pointer needs setting up, ldm isn't useful.
1176 Base arm5 doesn't have ldrd, but armv5te does. */
1177 if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
1178 tcg_out_memop_8(s, COND_AL, INSN_LDRD_IMM, TCG_REG_R0,
1179 TCG_REG_R2, tlb_offset, 1, 1);
1180 } else {
1181 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R0,
1182 TCG_REG_R2, tlb_offset, 1, 1);
1183 if (TARGET_LONG_BITS == 64) {
1184 tcg_out_memop_12(s, COND_AL, INSN_LDR_IMM, TCG_REG_R1,
1185 TCG_REG_R2, 4, 1, 0);
1186 }
1187 }
1188
1189 /* Check alignment. */
1190 if (s_bits) {
1191 tcg_out_dat_imm(s, COND_AL, ARITH_TST,
1192 0, addrlo, (1 << s_bits) - 1);
1193 }
1194
1195 tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0,
1196 TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1197
1198 if (TARGET_LONG_BITS == 64) {
1199 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
1200 TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
1201 }
1202 }
1203
1204 /* Record the context of a call to the out of line helper code for the slow
1205 path for a load or store, so that we can later generate the correct
1206 helper code. */
1207 static void add_qemu_ldst_label(TCGContext *s, int is_ld, int opc,
1208 int data_reg, int data_reg2, int addrlo_reg,
1209 int addrhi_reg, int mem_index,
1210 uint8_t *raddr, uint8_t *label_ptr)
1211 {
1212 int idx;
1213 TCGLabelQemuLdst *label;
1214
1215 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
1216 tcg_abort();
1217 }
1218
1219 idx = s->nb_qemu_ldst_labels++;
1220 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
1221 label->is_ld = is_ld;
1222 label->opc = opc;
1223 label->datalo_reg = data_reg;
1224 label->datahi_reg = data_reg2;
1225 label->addrlo_reg = addrlo_reg;
1226 label->addrhi_reg = addrhi_reg;
1227 label->mem_index = mem_index;
1228 label->raddr = raddr;
1229 label->label_ptr[0] = label_ptr;
1230 }
1231
1232 static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1233 {
1234 TCGReg argreg, data_reg, data_reg2;
1235 uint8_t *start;
1236
1237 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1238
1239 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1240 if (TARGET_LONG_BITS == 64) {
1241 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1242 } else {
1243 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1244 }
1245 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1246 tcg_out_call(s, (tcg_target_long) qemu_ld_helpers[lb->opc & 3]);
1247
1248 data_reg = lb->datalo_reg;
1249 data_reg2 = lb->datahi_reg;
1250
1251 start = s->code_ptr;
1252 switch (lb->opc) {
1253 case 0 | 4:
1254 tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
1255 break;
1256 case 1 | 4:
1257 tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
1258 break;
1259 case 0:
1260 case 1:
1261 case 2:
1262 default:
1263 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1264 break;
1265 case 3:
1266 tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
1267 tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
1268 break;
1269 }
1270
1271 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1272 the call and the branch back to straight-line code. Note that the
1273 moves above could be elided by register allocation, nor do we know
1274 which code alternative we chose for extension. */
1275 switch (s->code_ptr - start) {
1276 case 0:
1277 tcg_out_nop(s);
1278 /* FALLTHRU */
1279 case 4:
1280 tcg_out_nop(s);
1281 /* FALLTHRU */
1282 case 8:
1283 break;
1284 default:
1285 abort();
1286 }
1287
1288 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1289 }
1290
1291 static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1292 {
1293 TCGReg argreg, data_reg, data_reg2;
1294
1295 reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
1296
1297 argreg = TCG_REG_R0;
1298 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1299 if (TARGET_LONG_BITS == 64) {
1300 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1301 } else {
1302 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1303 }
1304
1305 data_reg = lb->datalo_reg;
1306 data_reg2 = lb->datahi_reg;
1307 switch (lb->opc) {
1308 case 0:
1309 argreg = tcg_out_arg_reg8(s, argreg, data_reg);
1310 break;
1311 case 1:
1312 argreg = tcg_out_arg_reg16(s, argreg, data_reg);
1313 break;
1314 case 2:
1315 argreg = tcg_out_arg_reg32(s, argreg, data_reg);
1316 break;
1317 case 3:
1318 argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
1319 break;
1320 }
1321
1322 argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
1323 tcg_out_call(s, (tcg_target_long) qemu_st_helpers[lb->opc & 3]);
1324
1325 /* For GETPC_LDST in exec-all.h, we architect exactly 2 insns between
1326 the call and the branch back to straight-line code. */
1327 tcg_out_nop(s);
1328 tcg_out_nop(s);
1329 tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
1330 }
1331 #endif /* SOFTMMU */
1332
1333 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1334 {
1335 TCGReg addr_reg, data_reg, data_reg2;
1336 bool bswap;
1337 #ifdef CONFIG_SOFTMMU
1338 int mem_index, s_bits;
1339 TCGReg addr_reg2;
1340 uint8_t *label_ptr;
1341 #endif
1342 #ifdef TARGET_WORDS_BIGENDIAN
1343 bswap = 1;
1344 #else
1345 bswap = 0;
1346 #endif
1347
1348 data_reg = *args++;
1349 data_reg2 = (opc == 3 ? *args++ : 0);
1350 addr_reg = *args++;
1351 #ifdef CONFIG_SOFTMMU
1352 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1353 mem_index = *args;
1354 s_bits = opc & 3;
1355
1356 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1357 offsetof(CPUArchState, tlb_table[mem_index][0].addr_read));
1358
1359 label_ptr = s->code_ptr;
1360 tcg_out_b_noaddr(s, COND_NE);
1361
1362 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
1363 offsetof(CPUTLBEntry, addend)
1364 - offsetof(CPUTLBEntry, addr_read));
1365
1366 switch (opc) {
1367 case 0:
1368 tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1369 break;
1370 case 0 | 4:
1371 tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1372 break;
1373 case 1:
1374 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1375 if (bswap) {
1376 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1377 }
1378 break;
1379 case 1 | 4:
1380 if (bswap) {
1381 tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1382 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1383 } else {
1384 tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1385 }
1386 break;
1387 case 2:
1388 default:
1389 tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1390 if (bswap) {
1391 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1392 }
1393 break;
1394 case 3:
1395 if (bswap) {
1396 tcg_out_ld32_rwb(s, COND_AL, data_reg2, TCG_REG_R1, addr_reg);
1397 tcg_out_ld32_12(s, COND_AL, data_reg, TCG_REG_R1, 4);
1398 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1399 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1400 } else {
1401 tcg_out_ld32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1402 tcg_out_ld32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
1403 }
1404 break;
1405 }
1406
1407 add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1408 mem_index, s->code_ptr, label_ptr);
1409 #else /* !CONFIG_SOFTMMU */
1410 if (GUEST_BASE) {
1411 uint32_t offset = GUEST_BASE;
1412 int i, rot;
1413
1414 while (offset) {
1415 i = ctz32(offset) & ~1;
1416 rot = ((32 - i) << 7) & 0xf00;
1417
1418 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
1419 ((offset >> i) & 0xff) | rot);
1420 addr_reg = TCG_REG_TMP;
1421 offset &= ~(0xff << i);
1422 }
1423 }
1424 switch (opc) {
1425 case 0:
1426 tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
1427 break;
1428 case 0 | 4:
1429 tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
1430 break;
1431 case 1:
1432 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1433 if (bswap) {
1434 tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
1435 }
1436 break;
1437 case 1 | 4:
1438 if (bswap) {
1439 tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
1440 tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
1441 } else {
1442 tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
1443 }
1444 break;
1445 case 2:
1446 default:
1447 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
1448 if (bswap) {
1449 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1450 }
1451 break;
1452 case 3:
1453 /* TODO: use block load -
1454 * check that data_reg2 > data_reg or the other way */
1455 if (data_reg == addr_reg) {
1456 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1457 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1458 } else {
1459 tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
1460 tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
1461 }
1462 if (bswap) {
1463 tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
1464 tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
1465 }
1466 break;
1467 }
1468 #endif
1469 }
1470
1471 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1472 {
1473 TCGReg addr_reg, data_reg, data_reg2;
1474 bool bswap;
1475 #ifdef CONFIG_SOFTMMU
1476 int mem_index, s_bits;
1477 TCGReg addr_reg2;
1478 uint8_t *label_ptr;
1479 #endif
1480 #ifdef TARGET_WORDS_BIGENDIAN
1481 bswap = 1;
1482 #else
1483 bswap = 0;
1484 #endif
1485
1486 data_reg = *args++;
1487 data_reg2 = (opc == 3 ? *args++ : 0);
1488 addr_reg = *args++;
1489 #ifdef CONFIG_SOFTMMU
1490 addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1491 mem_index = *args;
1492 s_bits = opc & 3;
1493
1494 tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits,
1495 offsetof(CPUArchState,
1496 tlb_table[mem_index][0].addr_write));
1497
1498 label_ptr = s->code_ptr;
1499 tcg_out_b_noaddr(s, COND_NE);
1500
1501 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2,
1502 offsetof(CPUTLBEntry, addend)
1503 - offsetof(CPUTLBEntry, addr_write));
1504
1505 switch (opc) {
1506 case 0:
1507 tcg_out_st8_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1508 break;
1509 case 1:
1510 if (bswap) {
1511 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
1512 tcg_out_st16_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
1513 } else {
1514 tcg_out_st16_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1515 }
1516 break;
1517 case 2:
1518 default:
1519 if (bswap) {
1520 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1521 tcg_out_st32_r(s, COND_AL, TCG_REG_R0, addr_reg, TCG_REG_R1);
1522 } else {
1523 tcg_out_st32_r(s, COND_AL, data_reg, addr_reg, TCG_REG_R1);
1524 }
1525 break;
1526 case 3:
1527 if (bswap) {
1528 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1529 tcg_out_st32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R1, addr_reg);
1530 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1531 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R1, 4);
1532 } else {
1533 tcg_out_st32_rwb(s, COND_AL, data_reg, TCG_REG_R1, addr_reg);
1534 tcg_out_st32_12(s, COND_AL, data_reg2, TCG_REG_R1, 4);
1535 }
1536 break;
1537 }
1538
1539 add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2,
1540 mem_index, s->code_ptr, label_ptr);
1541 #else /* !CONFIG_SOFTMMU */
1542 if (GUEST_BASE) {
1543 uint32_t offset = GUEST_BASE;
1544 int i;
1545 int rot;
1546
1547 while (offset) {
1548 i = ctz32(offset) & ~1;
1549 rot = ((32 - i) << 7) & 0xf00;
1550
1551 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
1552 ((offset >> i) & 0xff) | rot);
1553 addr_reg = TCG_REG_R1;
1554 offset &= ~(0xff << i);
1555 }
1556 }
1557 switch (opc) {
1558 case 0:
1559 tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
1560 break;
1561 case 1:
1562 if (bswap) {
1563 tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
1564 tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1565 } else {
1566 tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
1567 }
1568 break;
1569 case 2:
1570 default:
1571 if (bswap) {
1572 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1573 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1574 } else {
1575 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1576 }
1577 break;
1578 case 3:
1579 /* TODO: use block store -
1580 * check that data_reg2 > data_reg or the other way */
1581 if (bswap) {
1582 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
1583 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
1584 tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
1585 tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
1586 } else {
1587 tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
1588 tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
1589 }
1590 break;
1591 }
1592 #endif
1593 }
1594
1595 static uint8_t *tb_ret_addr;
1596
1597 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
1598 const TCGArg *args, const int *const_args)
1599 {
1600 TCGArg a0, a1, a2, a3, a4, a5;
1601 int c;
1602
1603 switch (opc) {
1604 case INDEX_op_exit_tb:
1605 if (use_armv7_instructions || check_fit_imm(args[0])) {
1606 tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
1607 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1608 } else {
1609 uint8_t *ld_ptr = s->code_ptr;
1610 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1611 tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
1612 *ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
1613 tcg_out32(s, args[0]);
1614 }
1615 break;
1616 case INDEX_op_goto_tb:
1617 if (s->tb_jmp_offset) {
1618 /* Direct jump method */
1619 #if defined(USE_DIRECT_JUMP)
1620 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1621 tcg_out_b_noaddr(s, COND_AL);
1622 #else
1623 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
1624 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1625 tcg_out32(s, 0);
1626 #endif
1627 } else {
1628 /* Indirect jump method */
1629 #if 1
1630 c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
1631 if (c > 0xfff || c < -0xfff) {
1632 tcg_out_movi32(s, COND_AL, TCG_REG_R0,
1633 (tcg_target_long) (s->tb_next + args[0]));
1634 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1635 } else
1636 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
1637 #else
1638 tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
1639 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
1640 tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
1641 #endif
1642 }
1643 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1644 break;
1645 case INDEX_op_call:
1646 if (const_args[0])
1647 tcg_out_call(s, args[0]);
1648 else
1649 tcg_out_callr(s, COND_AL, args[0]);
1650 break;
1651 case INDEX_op_br:
1652 tcg_out_goto_label(s, COND_AL, args[0]);
1653 break;
1654
1655 case INDEX_op_ld8u_i32:
1656 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
1657 break;
1658 case INDEX_op_ld8s_i32:
1659 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
1660 break;
1661 case INDEX_op_ld16u_i32:
1662 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
1663 break;
1664 case INDEX_op_ld16s_i32:
1665 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
1666 break;
1667 case INDEX_op_ld_i32:
1668 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
1669 break;
1670 case INDEX_op_st8_i32:
1671 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
1672 break;
1673 case INDEX_op_st16_i32:
1674 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
1675 break;
1676 case INDEX_op_st_i32:
1677 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
1678 break;
1679
1680 case INDEX_op_mov_i32:
1681 tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
1682 args[0], 0, args[1], SHIFT_IMM_LSL(0));
1683 break;
1684 case INDEX_op_movi_i32:
1685 tcg_out_movi32(s, COND_AL, args[0], args[1]);
1686 break;
1687 case INDEX_op_movcond_i32:
1688 /* Constraints mean that v2 is always in the same register as dest,
1689 * so we only need to do "if condition passed, move v1 to dest".
1690 */
1691 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1692 args[1], args[2], const_args[2]);
1693 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
1694 ARITH_MVN, args[0], 0, args[3], const_args[3]);
1695 break;
1696 case INDEX_op_add_i32:
1697 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1698 args[0], args[1], args[2], const_args[2]);
1699 break;
1700 case INDEX_op_sub_i32:
1701 if (const_args[1]) {
1702 if (const_args[2]) {
1703 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
1704 } else {
1705 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
1706 args[0], args[2], args[1], 1);
1707 }
1708 } else {
1709 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
1710 args[0], args[1], args[2], const_args[2]);
1711 }
1712 break;
1713 case INDEX_op_and_i32:
1714 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
1715 args[0], args[1], args[2], const_args[2]);
1716 break;
1717 case INDEX_op_andc_i32:
1718 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
1719 args[0], args[1], args[2], const_args[2]);
1720 break;
1721 case INDEX_op_or_i32:
1722 c = ARITH_ORR;
1723 goto gen_arith;
1724 case INDEX_op_xor_i32:
1725 c = ARITH_EOR;
1726 /* Fall through. */
1727 gen_arith:
1728 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
1729 break;
1730 case INDEX_op_add2_i32:
1731 a0 = args[0], a1 = args[1], a2 = args[2];
1732 a3 = args[3], a4 = args[4], a5 = args[5];
1733 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
1734 a0 = TCG_REG_TMP;
1735 }
1736 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
1737 a0, a2, a4, const_args[4]);
1738 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
1739 a1, a3, a5, const_args[5]);
1740 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1741 break;
1742 case INDEX_op_sub2_i32:
1743 a0 = args[0], a1 = args[1], a2 = args[2];
1744 a3 = args[3], a4 = args[4], a5 = args[5];
1745 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
1746 a0 = TCG_REG_TMP;
1747 }
1748 if (const_args[2]) {
1749 if (const_args[4]) {
1750 tcg_out_movi32(s, COND_AL, a0, a4);
1751 a4 = a0;
1752 }
1753 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
1754 } else {
1755 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
1756 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
1757 }
1758 if (const_args[3]) {
1759 if (const_args[5]) {
1760 tcg_out_movi32(s, COND_AL, a1, a5);
1761 a5 = a1;
1762 }
1763 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
1764 } else {
1765 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
1766 a1, a3, a5, const_args[5]);
1767 }
1768 tcg_out_mov_reg(s, COND_AL, args[0], a0);
1769 break;
1770 case INDEX_op_neg_i32:
1771 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
1772 break;
1773 case INDEX_op_not_i32:
1774 tcg_out_dat_reg(s, COND_AL,
1775 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
1776 break;
1777 case INDEX_op_mul_i32:
1778 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
1779 break;
1780 case INDEX_op_mulu2_i32:
1781 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1782 break;
1783 case INDEX_op_muls2_i32:
1784 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
1785 break;
1786 /* XXX: Perhaps args[2] & 0x1f is wrong */
1787 case INDEX_op_shl_i32:
1788 c = const_args[2] ?
1789 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
1790 goto gen_shift32;
1791 case INDEX_op_shr_i32:
1792 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
1793 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
1794 goto gen_shift32;
1795 case INDEX_op_sar_i32:
1796 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
1797 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
1798 goto gen_shift32;
1799 case INDEX_op_rotr_i32:
1800 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
1801 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
1802 /* Fall through. */
1803 gen_shift32:
1804 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
1805 break;
1806
1807 case INDEX_op_rotl_i32:
1808 if (const_args[2]) {
1809 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1810 ((0x20 - args[2]) & 0x1f) ?
1811 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
1812 SHIFT_IMM_LSL(0));
1813 } else {
1814 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
1815 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
1816 SHIFT_REG_ROR(TCG_REG_TMP));
1817 }
1818 break;
1819
1820 case INDEX_op_brcond_i32:
1821 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1822 args[0], args[1], const_args[1]);
1823 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
1824 break;
1825 case INDEX_op_brcond2_i32:
1826 /* The resulting conditions are:
1827 * TCG_COND_EQ --> a0 == a2 && a1 == a3,
1828 * TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
1829 * TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
1830 * TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
1831 * TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
1832 * TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
1833 */
1834 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1835 args[1], args[3], const_args[3]);
1836 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1837 args[0], args[2], const_args[2]);
1838 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
1839 break;
1840 case INDEX_op_setcond_i32:
1841 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1842 args[1], args[2], const_args[2]);
1843 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
1844 ARITH_MOV, args[0], 0, 1);
1845 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
1846 ARITH_MOV, args[0], 0, 0);
1847 break;
1848 case INDEX_op_setcond2_i32:
1849 /* See brcond2_i32 comment */
1850 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
1851 args[2], args[4], const_args[4]);
1852 tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
1853 args[1], args[3], const_args[3]);
1854 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
1855 ARITH_MOV, args[0], 0, 1);
1856 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
1857 ARITH_MOV, args[0], 0, 0);
1858 break;
1859
1860 case INDEX_op_qemu_ld8u:
1861 tcg_out_qemu_ld(s, args, 0);
1862 break;
1863 case INDEX_op_qemu_ld8s:
1864 tcg_out_qemu_ld(s, args, 0 | 4);
1865 break;
1866 case INDEX_op_qemu_ld16u:
1867 tcg_out_qemu_ld(s, args, 1);
1868 break;
1869 case INDEX_op_qemu_ld16s:
1870 tcg_out_qemu_ld(s, args, 1 | 4);
1871 break;
1872 case INDEX_op_qemu_ld32:
1873 tcg_out_qemu_ld(s, args, 2);
1874 break;
1875 case INDEX_op_qemu_ld64:
1876 tcg_out_qemu_ld(s, args, 3);
1877 break;
1878
1879 case INDEX_op_qemu_st8:
1880 tcg_out_qemu_st(s, args, 0);
1881 break;
1882 case INDEX_op_qemu_st16:
1883 tcg_out_qemu_st(s, args, 1);
1884 break;
1885 case INDEX_op_qemu_st32:
1886 tcg_out_qemu_st(s, args, 2);
1887 break;
1888 case INDEX_op_qemu_st64:
1889 tcg_out_qemu_st(s, args, 3);
1890 break;
1891
1892 case INDEX_op_bswap16_i32:
1893 tcg_out_bswap16(s, COND_AL, args[0], args[1]);
1894 break;
1895 case INDEX_op_bswap32_i32:
1896 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
1897 break;
1898
1899 case INDEX_op_ext8s_i32:
1900 tcg_out_ext8s(s, COND_AL, args[0], args[1]);
1901 break;
1902 case INDEX_op_ext16s_i32:
1903 tcg_out_ext16s(s, COND_AL, args[0], args[1]);
1904 break;
1905 case INDEX_op_ext16u_i32:
1906 tcg_out_ext16u(s, COND_AL, args[0], args[1]);
1907 break;
1908
1909 case INDEX_op_deposit_i32:
1910 tcg_out_deposit(s, COND_AL, args[0], args[2],
1911 args[3], args[4], const_args[2]);
1912 break;
1913
1914 case INDEX_op_div_i32:
1915 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
1916 break;
1917 case INDEX_op_divu_i32:
1918 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
1919 break;
1920
1921 default:
1922 tcg_abort();
1923 }
1924 }
1925
1926 #ifdef CONFIG_SOFTMMU
1927 /* Generate TB finalization at the end of block. */
1928 void tcg_out_tb_finalize(TCGContext *s)
1929 {
1930 int i;
1931 for (i = 0; i < s->nb_qemu_ldst_labels; i++) {
1932 TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
1933 if (label->is_ld) {
1934 tcg_out_qemu_ld_slow_path(s, label);
1935 } else {
1936 tcg_out_qemu_st_slow_path(s, label);
1937 }
1938 }
1939 }
1940 #endif /* SOFTMMU */
1941
1942 static const TCGTargetOpDef arm_op_defs[] = {
1943 { INDEX_op_exit_tb, { } },
1944 { INDEX_op_goto_tb, { } },
1945 { INDEX_op_call, { "ri" } },
1946 { INDEX_op_br, { } },
1947
1948 { INDEX_op_mov_i32, { "r", "r" } },
1949 { INDEX_op_movi_i32, { "r" } },
1950
1951 { INDEX_op_ld8u_i32, { "r", "r" } },
1952 { INDEX_op_ld8s_i32, { "r", "r" } },
1953 { INDEX_op_ld16u_i32, { "r", "r" } },
1954 { INDEX_op_ld16s_i32, { "r", "r" } },
1955 { INDEX_op_ld_i32, { "r", "r" } },
1956 { INDEX_op_st8_i32, { "r", "r" } },
1957 { INDEX_op_st16_i32, { "r", "r" } },
1958 { INDEX_op_st_i32, { "r", "r" } },
1959
1960 /* TODO: "r", "r", "ri" */
1961 { INDEX_op_add_i32, { "r", "r", "rIN" } },
1962 { INDEX_op_sub_i32, { "r", "rI", "rIN" } },
1963 { INDEX_op_mul_i32, { "r", "r", "r" } },
1964 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1965 { INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
1966 { INDEX_op_and_i32, { "r", "r", "rIK" } },
1967 { INDEX_op_andc_i32, { "r", "r", "rIK" } },
1968 { INDEX_op_or_i32, { "r", "r", "rI" } },
1969 { INDEX_op_xor_i32, { "r", "r", "rI" } },
1970 { INDEX_op_neg_i32, { "r", "r" } },
1971 { INDEX_op_not_i32, { "r", "r" } },
1972
1973 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1974 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1975 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1976 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1977 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1978
1979 { INDEX_op_brcond_i32, { "r", "rIN" } },
1980 { INDEX_op_setcond_i32, { "r", "r", "rIN" } },
1981 { INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
1982
1983 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
1984 { INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
1985 { INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
1986 { INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
1987
1988 #if TARGET_LONG_BITS == 32
1989 { INDEX_op_qemu_ld8u, { "r", "l" } },
1990 { INDEX_op_qemu_ld8s, { "r", "l" } },
1991 { INDEX_op_qemu_ld16u, { "r", "l" } },
1992 { INDEX_op_qemu_ld16s, { "r", "l" } },
1993 { INDEX_op_qemu_ld32, { "r", "l" } },
1994 { INDEX_op_qemu_ld64, { "L", "L", "l" } },
1995
1996 { INDEX_op_qemu_st8, { "s", "s" } },
1997 { INDEX_op_qemu_st16, { "s", "s" } },
1998 { INDEX_op_qemu_st32, { "s", "s" } },
1999 { INDEX_op_qemu_st64, { "s", "s", "s" } },
2000 #else
2001 { INDEX_op_qemu_ld8u, { "r", "l", "l" } },
2002 { INDEX_op_qemu_ld8s, { "r", "l", "l" } },
2003 { INDEX_op_qemu_ld16u, { "r", "l", "l" } },
2004 { INDEX_op_qemu_ld16s, { "r", "l", "l" } },
2005 { INDEX_op_qemu_ld32, { "r", "l", "l" } },
2006 { INDEX_op_qemu_ld64, { "L", "L", "l", "l" } },
2007
2008 { INDEX_op_qemu_st8, { "s", "s", "s" } },
2009 { INDEX_op_qemu_st16, { "s", "s", "s" } },
2010 { INDEX_op_qemu_st32, { "s", "s", "s" } },
2011 { INDEX_op_qemu_st64, { "s", "s", "s", "s" } },
2012 #endif
2013
2014 { INDEX_op_bswap16_i32, { "r", "r" } },
2015 { INDEX_op_bswap32_i32, { "r", "r" } },
2016
2017 { INDEX_op_ext8s_i32, { "r", "r" } },
2018 { INDEX_op_ext16s_i32, { "r", "r" } },
2019 { INDEX_op_ext16u_i32, { "r", "r" } },
2020
2021 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2022
2023 { INDEX_op_div_i32, { "r", "r", "r" } },
2024 { INDEX_op_divu_i32, { "r", "r", "r" } },
2025
2026 { -1 },
2027 };
2028
2029 static void tcg_target_init(TCGContext *s)
2030 {
2031 #if defined(CONFIG_GETAUXVAL)
2032 /* Only probe for the platform and capabilities if we havn't already
2033 determined maximum values at compile time. */
2034 # if !defined(use_idiv_instructions)
2035 {
2036 unsigned long hwcap = getauxval(AT_HWCAP);
2037 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2038 }
2039 # endif
2040 if (__ARM_ARCH < 7) {
2041 const char *pl = (const char *)getauxval(AT_PLATFORM);
2042 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2043 arm_arch = pl[1] - '0';
2044 }
2045 }
2046 #endif /* GETAUXVAL */
2047
2048 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
2049 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
2050 (1 << TCG_REG_R0) |
2051 (1 << TCG_REG_R1) |
2052 (1 << TCG_REG_R2) |
2053 (1 << TCG_REG_R3) |
2054 (1 << TCG_REG_R12) |
2055 (1 << TCG_REG_R14));
2056
2057 tcg_regset_clear(s->reserved_regs);
2058 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2059 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2060 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2061
2062 tcg_add_target_add_op_defs(arm_op_defs);
2063 }
2064
2065 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2066 TCGReg arg1, intptr_t arg2)
2067 {
2068 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2069 }
2070
2071 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2072 TCGReg arg1, intptr_t arg2)
2073 {
2074 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2075 }
2076
2077 static inline void tcg_out_mov(TCGContext *s, TCGType type,
2078 TCGReg ret, TCGReg arg)
2079 {
2080 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
2081 }
2082
2083 static inline void tcg_out_movi(TCGContext *s, TCGType type,
2084 TCGReg ret, tcg_target_long arg)
2085 {
2086 tcg_out_movi32(s, COND_AL, ret, arg);
2087 }
2088
2089 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
2090 and tcg_register_jit. */
2091
2092 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
2093
2094 #define FRAME_SIZE \
2095 ((PUSH_SIZE \
2096 + TCG_STATIC_CALL_ARGS_SIZE \
2097 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
2098 + TCG_TARGET_STACK_ALIGN - 1) \
2099 & -TCG_TARGET_STACK_ALIGN)
2100
2101 static void tcg_target_qemu_prologue(TCGContext *s)
2102 {
2103 int stack_addend;
2104
2105 /* Calling convention requires us to save r4-r11 and lr. */
2106 /* stmdb sp!, { r4 - r11, lr } */
2107 tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
2108
2109 /* Reserve callee argument and tcg temp space. */
2110 stack_addend = FRAME_SIZE - PUSH_SIZE;
2111
2112 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
2113 TCG_REG_CALL_STACK, stack_addend, 1);
2114 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
2115 CPU_TEMP_BUF_NLONGS * sizeof(long));
2116
2117 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2118
2119 tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
2120 tb_ret_addr = s->code_ptr;
2121
2122 /* Epilogue. We branch here via tb_ret_addr. */
2123 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
2124 TCG_REG_CALL_STACK, stack_addend, 1);
2125
2126 /* ldmia sp!, { r4 - r11, pc } */
2127 tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
2128 }
2129
2130 typedef struct {
2131 DebugFrameCIE cie;
2132 DebugFrameFDEHeader fde;
2133 uint8_t fde_def_cfa[4];
2134 uint8_t fde_reg_ofs[18];
2135 } DebugFrame;
2136
2137 #define ELF_HOST_MACHINE EM_ARM
2138
2139 /* We're expecting a 2 byte uleb128 encoded value. */
2140 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2141
2142 static DebugFrame debug_frame = {
2143 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2144 .cie.id = -1,
2145 .cie.version = 1,
2146 .cie.code_align = 1,
2147 .cie.data_align = 0x7c, /* sleb128 -4 */
2148 .cie.return_column = 14,
2149
2150 /* Total FDE size does not include the "len" member. */
2151 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2152
2153 .fde_def_cfa = {
2154 12, 13, /* DW_CFA_def_cfa sp, ... */
2155 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2156 (FRAME_SIZE >> 7)
2157 },
2158 .fde_reg_ofs = {
2159 /* The following must match the stmdb in the prologue. */
2160 0x8e, 1, /* DW_CFA_offset, lr, -4 */
2161 0x8b, 2, /* DW_CFA_offset, r11, -8 */
2162 0x8a, 3, /* DW_CFA_offset, r10, -12 */
2163 0x89, 4, /* DW_CFA_offset, r9, -16 */
2164 0x88, 5, /* DW_CFA_offset, r8, -20 */
2165 0x87, 6, /* DW_CFA_offset, r7, -24 */
2166 0x86, 7, /* DW_CFA_offset, r6, -28 */
2167 0x85, 8, /* DW_CFA_offset, r5, -32 */
2168 0x84, 9, /* DW_CFA_offset, r4, -36 */
2169 }
2170 };
2171
2172 void tcg_register_jit(void *buf, size_t buf_size)
2173 {
2174 debug_frame.fde.func_start = (tcg_target_long) buf;
2175 debug_frame.fde.func_len = buf_size;
2176
2177 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2178 }