]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/arm/tcg-target.c.inc
tcg: Split out tcg_out_extu_i32_i64
[mirror_qemu.git] / tcg / arm / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Andrzej Zaborowski
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "elf.h"
26 #include "../tcg-ldst.c.inc"
27 #include "../tcg-pool.c.inc"
28
29 int arm_arch = __ARM_ARCH;
30
31 #ifndef use_idiv_instructions
32 bool use_idiv_instructions;
33 #endif
34 #ifndef use_neon_instructions
35 bool use_neon_instructions;
36 #endif
37
38 #ifdef CONFIG_DEBUG_TCG
39 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
40 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
41 "%r8", "%r9", "%r10", "%r11", "%r12", "%sp", "%r14", "%pc",
42 "%q0", "%q1", "%q2", "%q3", "%q4", "%q5", "%q6", "%q7",
43 "%q8", "%q9", "%q10", "%q11", "%q12", "%q13", "%q14", "%q15",
44 };
45 #endif
46
47 static const int tcg_target_reg_alloc_order[] = {
48 TCG_REG_R4,
49 TCG_REG_R5,
50 TCG_REG_R6,
51 TCG_REG_R7,
52 TCG_REG_R8,
53 TCG_REG_R9,
54 TCG_REG_R10,
55 TCG_REG_R11,
56 TCG_REG_R13,
57 TCG_REG_R0,
58 TCG_REG_R1,
59 TCG_REG_R2,
60 TCG_REG_R3,
61 TCG_REG_R12,
62 TCG_REG_R14,
63
64 TCG_REG_Q0,
65 TCG_REG_Q1,
66 TCG_REG_Q2,
67 TCG_REG_Q3,
68 /* Q4 - Q7 are call-saved, and skipped. */
69 TCG_REG_Q8,
70 TCG_REG_Q9,
71 TCG_REG_Q10,
72 TCG_REG_Q11,
73 TCG_REG_Q12,
74 TCG_REG_Q13,
75 TCG_REG_Q14,
76 TCG_REG_Q15,
77 };
78
79 static const int tcg_target_call_iarg_regs[4] = {
80 TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
81 };
82
83 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
84 {
85 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
86 tcg_debug_assert(slot >= 0 && slot <= 3);
87 return TCG_REG_R0 + slot;
88 }
89
90 #define TCG_REG_TMP TCG_REG_R12
91 #define TCG_VEC_TMP TCG_REG_Q15
92 #ifndef CONFIG_SOFTMMU
93 #define TCG_REG_GUEST_BASE TCG_REG_R11
94 #endif
95
96 typedef enum {
97 COND_EQ = 0x0,
98 COND_NE = 0x1,
99 COND_CS = 0x2, /* Unsigned greater or equal */
100 COND_CC = 0x3, /* Unsigned less than */
101 COND_MI = 0x4, /* Negative */
102 COND_PL = 0x5, /* Zero or greater */
103 COND_VS = 0x6, /* Overflow */
104 COND_VC = 0x7, /* No overflow */
105 COND_HI = 0x8, /* Unsigned greater than */
106 COND_LS = 0x9, /* Unsigned less or equal */
107 COND_GE = 0xa,
108 COND_LT = 0xb,
109 COND_GT = 0xc,
110 COND_LE = 0xd,
111 COND_AL = 0xe,
112 } ARMCond;
113
114 #define TO_CPSR (1 << 20)
115
116 #define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
117 #define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
118 #define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
119 #define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
120 #define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
121 #define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
122 #define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
123 #define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
124
125 typedef enum {
126 ARITH_AND = 0x0 << 21,
127 ARITH_EOR = 0x1 << 21,
128 ARITH_SUB = 0x2 << 21,
129 ARITH_RSB = 0x3 << 21,
130 ARITH_ADD = 0x4 << 21,
131 ARITH_ADC = 0x5 << 21,
132 ARITH_SBC = 0x6 << 21,
133 ARITH_RSC = 0x7 << 21,
134 ARITH_TST = 0x8 << 21 | TO_CPSR,
135 ARITH_CMP = 0xa << 21 | TO_CPSR,
136 ARITH_CMN = 0xb << 21 | TO_CPSR,
137 ARITH_ORR = 0xc << 21,
138 ARITH_MOV = 0xd << 21,
139 ARITH_BIC = 0xe << 21,
140 ARITH_MVN = 0xf << 21,
141
142 INSN_B = 0x0a000000,
143
144 INSN_CLZ = 0x016f0f10,
145 INSN_RBIT = 0x06ff0f30,
146
147 INSN_LDMIA = 0x08b00000,
148 INSN_STMDB = 0x09200000,
149
150 INSN_LDR_IMM = 0x04100000,
151 INSN_LDR_REG = 0x06100000,
152 INSN_STR_IMM = 0x04000000,
153 INSN_STR_REG = 0x06000000,
154
155 INSN_LDRH_IMM = 0x005000b0,
156 INSN_LDRH_REG = 0x001000b0,
157 INSN_LDRSH_IMM = 0x005000f0,
158 INSN_LDRSH_REG = 0x001000f0,
159 INSN_STRH_IMM = 0x004000b0,
160 INSN_STRH_REG = 0x000000b0,
161
162 INSN_LDRB_IMM = 0x04500000,
163 INSN_LDRB_REG = 0x06500000,
164 INSN_LDRSB_IMM = 0x005000d0,
165 INSN_LDRSB_REG = 0x001000d0,
166 INSN_STRB_IMM = 0x04400000,
167 INSN_STRB_REG = 0x06400000,
168
169 INSN_LDRD_IMM = 0x004000d0,
170 INSN_LDRD_REG = 0x000000d0,
171 INSN_STRD_IMM = 0x004000f0,
172 INSN_STRD_REG = 0x000000f0,
173
174 INSN_DMB_ISH = 0xf57ff05b,
175 INSN_DMB_MCR = 0xee070fba,
176
177 /* Architected nop introduced in v6k. */
178 /* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
179 also Just So Happened to do nothing on pre-v6k so that we
180 don't need to conditionalize it? */
181 INSN_NOP_v6k = 0xe320f000,
182 /* Otherwise the assembler uses mov r0,r0 */
183 INSN_NOP_v4 = (COND_AL << 28) | ARITH_MOV,
184
185 INSN_VADD = 0xf2000800,
186 INSN_VAND = 0xf2000110,
187 INSN_VBIC = 0xf2100110,
188 INSN_VEOR = 0xf3000110,
189 INSN_VORN = 0xf2300110,
190 INSN_VORR = 0xf2200110,
191 INSN_VSUB = 0xf3000800,
192 INSN_VMUL = 0xf2000910,
193 INSN_VQADD = 0xf2000010,
194 INSN_VQADD_U = 0xf3000010,
195 INSN_VQSUB = 0xf2000210,
196 INSN_VQSUB_U = 0xf3000210,
197 INSN_VMAX = 0xf2000600,
198 INSN_VMAX_U = 0xf3000600,
199 INSN_VMIN = 0xf2000610,
200 INSN_VMIN_U = 0xf3000610,
201
202 INSN_VABS = 0xf3b10300,
203 INSN_VMVN = 0xf3b00580,
204 INSN_VNEG = 0xf3b10380,
205
206 INSN_VCEQ0 = 0xf3b10100,
207 INSN_VCGT0 = 0xf3b10000,
208 INSN_VCGE0 = 0xf3b10080,
209 INSN_VCLE0 = 0xf3b10180,
210 INSN_VCLT0 = 0xf3b10200,
211
212 INSN_VCEQ = 0xf3000810,
213 INSN_VCGE = 0xf2000310,
214 INSN_VCGT = 0xf2000300,
215 INSN_VCGE_U = 0xf3000310,
216 INSN_VCGT_U = 0xf3000300,
217
218 INSN_VSHLI = 0xf2800510, /* VSHL (immediate) */
219 INSN_VSARI = 0xf2800010, /* VSHR.S */
220 INSN_VSHRI = 0xf3800010, /* VSHR.U */
221 INSN_VSLI = 0xf3800510,
222 INSN_VSHL_S = 0xf2000400, /* VSHL.S (register) */
223 INSN_VSHL_U = 0xf3000400, /* VSHL.U (register) */
224
225 INSN_VBSL = 0xf3100110,
226 INSN_VBIT = 0xf3200110,
227 INSN_VBIF = 0xf3300110,
228
229 INSN_VTST = 0xf2000810,
230
231 INSN_VDUP_G = 0xee800b10, /* VDUP (ARM core register) */
232 INSN_VDUP_S = 0xf3b00c00, /* VDUP (scalar) */
233 INSN_VLDR_D = 0xed100b00, /* VLDR.64 */
234 INSN_VLD1 = 0xf4200000, /* VLD1 (multiple single elements) */
235 INSN_VLD1R = 0xf4a00c00, /* VLD1 (single element to all lanes) */
236 INSN_VST1 = 0xf4000000, /* VST1 (multiple single elements) */
237 INSN_VMOVI = 0xf2800010, /* VMOV (immediate) */
238 } ARMInsn;
239
240 #define INSN_NOP (use_armv7_instructions ? INSN_NOP_v6k : INSN_NOP_v4)
241
242 static const uint8_t tcg_cond_to_arm_cond[] = {
243 [TCG_COND_EQ] = COND_EQ,
244 [TCG_COND_NE] = COND_NE,
245 [TCG_COND_LT] = COND_LT,
246 [TCG_COND_GE] = COND_GE,
247 [TCG_COND_LE] = COND_LE,
248 [TCG_COND_GT] = COND_GT,
249 /* unsigned */
250 [TCG_COND_LTU] = COND_CC,
251 [TCG_COND_GEU] = COND_CS,
252 [TCG_COND_LEU] = COND_LS,
253 [TCG_COND_GTU] = COND_HI,
254 };
255
256 static int encode_imm(uint32_t imm);
257
258 /* TCG private relocation type: add with pc+imm8 */
259 #define R_ARM_PC8 11
260
261 /* TCG private relocation type: vldr with imm8 << 2 */
262 #define R_ARM_PC11 12
263
264 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
265 {
266 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
267 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) >> 2;
268
269 if (offset == sextract32(offset, 0, 24)) {
270 *src_rw = deposit32(*src_rw, 0, 24, offset);
271 return true;
272 }
273 return false;
274 }
275
276 static bool reloc_pc13(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
277 {
278 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
279 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
280
281 if (offset >= -0xfff && offset <= 0xfff) {
282 tcg_insn_unit insn = *src_rw;
283 bool u = (offset >= 0);
284 if (!u) {
285 offset = -offset;
286 }
287 insn = deposit32(insn, 23, 1, u);
288 insn = deposit32(insn, 0, 12, offset);
289 *src_rw = insn;
290 return true;
291 }
292 return false;
293 }
294
295 static bool reloc_pc11(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
296 {
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 ptrdiff_t offset = (tcg_ptr_byte_diff(target, src_rx) - 8) / 4;
299
300 if (offset >= -0xff && offset <= 0xff) {
301 tcg_insn_unit insn = *src_rw;
302 bool u = (offset >= 0);
303 if (!u) {
304 offset = -offset;
305 }
306 insn = deposit32(insn, 23, 1, u);
307 insn = deposit32(insn, 0, 8, offset);
308 *src_rw = insn;
309 return true;
310 }
311 return false;
312 }
313
314 static bool reloc_pc8(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
315 {
316 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
317 ptrdiff_t offset = tcg_ptr_byte_diff(target, src_rx) - 8;
318 int imm12 = encode_imm(offset);
319
320 if (imm12 >= 0) {
321 *src_rw = deposit32(*src_rw, 0, 12, imm12);
322 return true;
323 }
324 return false;
325 }
326
327 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
328 intptr_t value, intptr_t addend)
329 {
330 tcg_debug_assert(addend == 0);
331 switch (type) {
332 case R_ARM_PC24:
333 return reloc_pc24(code_ptr, (const tcg_insn_unit *)value);
334 case R_ARM_PC13:
335 return reloc_pc13(code_ptr, (const tcg_insn_unit *)value);
336 case R_ARM_PC11:
337 return reloc_pc11(code_ptr, (const tcg_insn_unit *)value);
338 case R_ARM_PC8:
339 return reloc_pc8(code_ptr, (const tcg_insn_unit *)value);
340 default:
341 g_assert_not_reached();
342 }
343 }
344
345 #define TCG_CT_CONST_ARM 0x100
346 #define TCG_CT_CONST_INV 0x200
347 #define TCG_CT_CONST_NEG 0x400
348 #define TCG_CT_CONST_ZERO 0x800
349 #define TCG_CT_CONST_ORRI 0x1000
350 #define TCG_CT_CONST_ANDI 0x2000
351
352 #define ALL_GENERAL_REGS 0xffffu
353 #define ALL_VECTOR_REGS 0xffff0000u
354
355 /*
356 * r0-r2 will be overwritten when reading the tlb entry (softmmu only)
357 * and r0-r1 doing the byte swapping, so don't use these.
358 * r3 is removed for softmmu to avoid clashes with helper arguments.
359 */
360 #ifdef CONFIG_SOFTMMU
361 #define ALL_QLOAD_REGS \
362 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
363 (1 << TCG_REG_R2) | (1 << TCG_REG_R3) | \
364 (1 << TCG_REG_R14)))
365 #define ALL_QSTORE_REGS \
366 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1) | \
367 (1 << TCG_REG_R2) | (1 << TCG_REG_R14) | \
368 ((TARGET_LONG_BITS == 64) << TCG_REG_R3)))
369 #else
370 #define ALL_QLOAD_REGS ALL_GENERAL_REGS
371 #define ALL_QSTORE_REGS \
372 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R0) | (1 << TCG_REG_R1)))
373 #endif
374
375 /*
376 * ARM immediates for ALU instructions are made of an unsigned 8-bit
377 * right-rotated by an even amount between 0 and 30.
378 *
379 * Return < 0 if @imm cannot be encoded, else the entire imm12 field.
380 */
381 static int encode_imm(uint32_t imm)
382 {
383 uint32_t rot, imm8;
384
385 /* Simple case, no rotation required. */
386 if ((imm & ~0xff) == 0) {
387 return imm;
388 }
389
390 /* Next, try a simple even shift. */
391 rot = ctz32(imm) & ~1;
392 imm8 = imm >> rot;
393 rot = 32 - rot;
394 if ((imm8 & ~0xff) == 0) {
395 goto found;
396 }
397
398 /*
399 * Finally, try harder with rotations.
400 * The ctz test above will have taken care of rotates >= 8.
401 */
402 for (rot = 2; rot < 8; rot += 2) {
403 imm8 = rol32(imm, rot);
404 if ((imm8 & ~0xff) == 0) {
405 goto found;
406 }
407 }
408 /* Fail: imm cannot be encoded. */
409 return -1;
410
411 found:
412 /* Note that rot is even, and we discard bit 0 by shifting by 7. */
413 return rot << 7 | imm8;
414 }
415
416 static int encode_imm_nofail(uint32_t imm)
417 {
418 int ret = encode_imm(imm);
419 tcg_debug_assert(ret >= 0);
420 return ret;
421 }
422
423 static bool check_fit_imm(uint32_t imm)
424 {
425 return encode_imm(imm) >= 0;
426 }
427
428 /* Return true if v16 is a valid 16-bit shifted immediate. */
429 static bool is_shimm16(uint16_t v16, int *cmode, int *imm8)
430 {
431 if (v16 == (v16 & 0xff)) {
432 *cmode = 0x8;
433 *imm8 = v16 & 0xff;
434 return true;
435 } else if (v16 == (v16 & 0xff00)) {
436 *cmode = 0xa;
437 *imm8 = v16 >> 8;
438 return true;
439 }
440 return false;
441 }
442
443 /* Return true if v32 is a valid 32-bit shifted immediate. */
444 static bool is_shimm32(uint32_t v32, int *cmode, int *imm8)
445 {
446 if (v32 == (v32 & 0xff)) {
447 *cmode = 0x0;
448 *imm8 = v32 & 0xff;
449 return true;
450 } else if (v32 == (v32 & 0xff00)) {
451 *cmode = 0x2;
452 *imm8 = (v32 >> 8) & 0xff;
453 return true;
454 } else if (v32 == (v32 & 0xff0000)) {
455 *cmode = 0x4;
456 *imm8 = (v32 >> 16) & 0xff;
457 return true;
458 } else if (v32 == (v32 & 0xff000000)) {
459 *cmode = 0x6;
460 *imm8 = v32 >> 24;
461 return true;
462 }
463 return false;
464 }
465
466 /* Return true if v32 is a valid 32-bit shifting ones immediate. */
467 static bool is_soimm32(uint32_t v32, int *cmode, int *imm8)
468 {
469 if ((v32 & 0xffff00ff) == 0xff) {
470 *cmode = 0xc;
471 *imm8 = (v32 >> 8) & 0xff;
472 return true;
473 } else if ((v32 & 0xff00ffff) == 0xffff) {
474 *cmode = 0xd;
475 *imm8 = (v32 >> 16) & 0xff;
476 return true;
477 }
478 return false;
479 }
480
481 /*
482 * Return non-zero if v32 can be formed by MOVI+ORR.
483 * Place the parameters for MOVI in (cmode, imm8).
484 * Return the cmode for ORR; the imm8 can be had via extraction from v32.
485 */
486 static int is_shimm32_pair(uint32_t v32, int *cmode, int *imm8)
487 {
488 int i;
489
490 for (i = 6; i > 0; i -= 2) {
491 /* Mask out one byte we can add with ORR. */
492 uint32_t tmp = v32 & ~(0xffu << (i * 4));
493 if (is_shimm32(tmp, cmode, imm8) ||
494 is_soimm32(tmp, cmode, imm8)) {
495 break;
496 }
497 }
498 return i;
499 }
500
501 /* Return true if V is a valid 16-bit or 32-bit shifted immediate. */
502 static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
503 {
504 if (v32 == deposit32(v32, 16, 16, v32)) {
505 return is_shimm16(v32, cmode, imm8);
506 } else {
507 return is_shimm32(v32, cmode, imm8);
508 }
509 }
510
511 /* Test if a constant matches the constraint.
512 * TODO: define constraints for:
513 *
514 * ldr/str offset: between -0xfff and 0xfff
515 * ldrh/strh offset: between -0xff and 0xff
516 * mov operand2: values represented with x << (2 * y), x < 0x100
517 * add, sub, eor...: ditto
518 */
519 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
520 {
521 if (ct & TCG_CT_CONST) {
522 return 1;
523 } else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
524 return 1;
525 } else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
526 return 1;
527 } else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
528 return 1;
529 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
530 return 1;
531 }
532
533 switch (ct & (TCG_CT_CONST_ORRI | TCG_CT_CONST_ANDI)) {
534 case 0:
535 break;
536 case TCG_CT_CONST_ANDI:
537 val = ~val;
538 /* fallthru */
539 case TCG_CT_CONST_ORRI:
540 if (val == deposit64(val, 32, 32, val)) {
541 int cmode, imm8;
542 return is_shimm1632(val, &cmode, &imm8);
543 }
544 break;
545 default:
546 /* Both bits should not be set for the same insn. */
547 g_assert_not_reached();
548 }
549
550 return 0;
551 }
552
553 static void tcg_out_b_imm(TCGContext *s, ARMCond cond, int32_t offset)
554 {
555 tcg_out32(s, (cond << 28) | INSN_B |
556 (((offset - 8) >> 2) & 0x00ffffff));
557 }
558
559 static void tcg_out_bl_imm(TCGContext *s, ARMCond cond, int32_t offset)
560 {
561 tcg_out32(s, (cond << 28) | 0x0b000000 |
562 (((offset - 8) >> 2) & 0x00ffffff));
563 }
564
565 static void tcg_out_blx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
566 {
567 tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
568 }
569
570 static void tcg_out_blx_imm(TCGContext *s, int32_t offset)
571 {
572 tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
573 (((offset - 8) >> 2) & 0x00ffffff));
574 }
575
576 static void tcg_out_dat_reg(TCGContext *s, ARMCond cond, ARMInsn opc,
577 TCGReg rd, TCGReg rn, TCGReg rm, int shift)
578 {
579 tcg_out32(s, (cond << 28) | (0 << 25) | opc |
580 (rn << 16) | (rd << 12) | shift | rm);
581 }
582
583 static void tcg_out_mov_reg(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rm)
584 {
585 /* Simple reg-reg move, optimising out the 'do nothing' case */
586 if (rd != rm) {
587 tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
588 }
589 }
590
591 static void tcg_out_bx_reg(TCGContext *s, ARMCond cond, TCGReg rn)
592 {
593 tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
594 }
595
596 static void tcg_out_b_reg(TCGContext *s, ARMCond cond, TCGReg rn)
597 {
598 /*
599 * Unless the C portion of QEMU is compiled as thumb, we don't need
600 * true BX semantics; merely a branch to an address held in a register.
601 */
602 tcg_out_bx_reg(s, cond, rn);
603 }
604
605 static void tcg_out_dat_imm(TCGContext *s, ARMCond cond, ARMInsn opc,
606 TCGReg rd, TCGReg rn, int im)
607 {
608 tcg_out32(s, (cond << 28) | (1 << 25) | opc |
609 (rn << 16) | (rd << 12) | im);
610 }
611
612 static void tcg_out_ldstm(TCGContext *s, ARMCond cond, ARMInsn opc,
613 TCGReg rn, uint16_t mask)
614 {
615 tcg_out32(s, (cond << 28) | opc | (rn << 16) | mask);
616 }
617
618 /* Note that this routine is used for both LDR and LDRH formats, so we do
619 not wish to include an immediate shift at this point. */
620 static void tcg_out_memop_r(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
621 TCGReg rn, TCGReg rm, bool u, bool p, bool w)
622 {
623 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
624 | (w << 21) | (rn << 16) | (rt << 12) | rm);
625 }
626
627 static void tcg_out_memop_8(TCGContext *s, ARMCond cond, ARMInsn opc, TCGReg rt,
628 TCGReg rn, int imm8, bool p, bool w)
629 {
630 bool u = 1;
631 if (imm8 < 0) {
632 imm8 = -imm8;
633 u = 0;
634 }
635 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
636 (rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
637 }
638
639 static void tcg_out_memop_12(TCGContext *s, ARMCond cond, ARMInsn opc,
640 TCGReg rt, TCGReg rn, int imm12, bool p, bool w)
641 {
642 bool u = 1;
643 if (imm12 < 0) {
644 imm12 = -imm12;
645 u = 0;
646 }
647 tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
648 (rn << 16) | (rt << 12) | imm12);
649 }
650
651 static void tcg_out_ld32_12(TCGContext *s, ARMCond cond, TCGReg rt,
652 TCGReg rn, int imm12)
653 {
654 tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
655 }
656
657 static void tcg_out_st32_12(TCGContext *s, ARMCond cond, TCGReg rt,
658 TCGReg rn, int imm12)
659 {
660 tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
661 }
662
663 static void tcg_out_ld32_r(TCGContext *s, ARMCond cond, TCGReg rt,
664 TCGReg rn, TCGReg rm)
665 {
666 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
667 }
668
669 static void tcg_out_st32_r(TCGContext *s, ARMCond cond, TCGReg rt,
670 TCGReg rn, TCGReg rm)
671 {
672 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
673 }
674
675 static void tcg_out_ldrd_8(TCGContext *s, ARMCond cond, TCGReg rt,
676 TCGReg rn, int imm8)
677 {
678 tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
679 }
680
681 static void tcg_out_ldrd_r(TCGContext *s, ARMCond cond, TCGReg rt,
682 TCGReg rn, TCGReg rm)
683 {
684 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
685 }
686
687 static void __attribute__((unused))
688 tcg_out_ldrd_rwb(TCGContext *s, ARMCond cond, TCGReg rt, TCGReg rn, TCGReg rm)
689 {
690 tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 1);
691 }
692
693 static void tcg_out_strd_8(TCGContext *s, ARMCond cond, TCGReg rt,
694 TCGReg rn, int imm8)
695 {
696 tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
697 }
698
699 static void tcg_out_strd_r(TCGContext *s, ARMCond cond, TCGReg rt,
700 TCGReg rn, TCGReg rm)
701 {
702 tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
703 }
704
705 /* Register pre-increment with base writeback. */
706 static void tcg_out_ld32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
707 TCGReg rn, TCGReg rm)
708 {
709 tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
710 }
711
712 static void tcg_out_st32_rwb(TCGContext *s, ARMCond cond, TCGReg rt,
713 TCGReg rn, TCGReg rm)
714 {
715 tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
716 }
717
718 static void tcg_out_ld16u_8(TCGContext *s, ARMCond cond, TCGReg rt,
719 TCGReg rn, int imm8)
720 {
721 tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
722 }
723
724 static void tcg_out_st16_8(TCGContext *s, ARMCond cond, TCGReg rt,
725 TCGReg rn, int imm8)
726 {
727 tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
728 }
729
730 static void tcg_out_ld16u_r(TCGContext *s, ARMCond cond, TCGReg rt,
731 TCGReg rn, TCGReg rm)
732 {
733 tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
734 }
735
736 static void tcg_out_st16_r(TCGContext *s, ARMCond cond, TCGReg rt,
737 TCGReg rn, TCGReg rm)
738 {
739 tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
740 }
741
742 static void tcg_out_ld16s_8(TCGContext *s, ARMCond cond, TCGReg rt,
743 TCGReg rn, int imm8)
744 {
745 tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
746 }
747
748 static void tcg_out_ld16s_r(TCGContext *s, ARMCond cond, TCGReg rt,
749 TCGReg rn, TCGReg rm)
750 {
751 tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
752 }
753
754 static void tcg_out_ld8_12(TCGContext *s, ARMCond cond, TCGReg rt,
755 TCGReg rn, int imm12)
756 {
757 tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
758 }
759
760 static void tcg_out_st8_12(TCGContext *s, ARMCond cond, TCGReg rt,
761 TCGReg rn, int imm12)
762 {
763 tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
764 }
765
766 static void tcg_out_ld8_r(TCGContext *s, ARMCond cond, TCGReg rt,
767 TCGReg rn, TCGReg rm)
768 {
769 tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
770 }
771
772 static void tcg_out_st8_r(TCGContext *s, ARMCond cond, TCGReg rt,
773 TCGReg rn, TCGReg rm)
774 {
775 tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
776 }
777
778 static void tcg_out_ld8s_8(TCGContext *s, ARMCond cond, TCGReg rt,
779 TCGReg rn, int imm8)
780 {
781 tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
782 }
783
784 static void tcg_out_ld8s_r(TCGContext *s, ARMCond cond, TCGReg rt,
785 TCGReg rn, TCGReg rm)
786 {
787 tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
788 }
789
790 static void tcg_out_movi_pool(TCGContext *s, ARMCond cond,
791 TCGReg rd, uint32_t arg)
792 {
793 new_pool_label(s, arg, R_ARM_PC13, s->code_ptr, 0);
794 tcg_out_ld32_12(s, cond, rd, TCG_REG_PC, 0);
795 }
796
797 static void tcg_out_movi32(TCGContext *s, ARMCond cond,
798 TCGReg rd, uint32_t arg)
799 {
800 int imm12, diff, opc, sh1, sh2;
801 uint32_t tt0, tt1, tt2;
802
803 /* Check a single MOV/MVN before anything else. */
804 imm12 = encode_imm(arg);
805 if (imm12 >= 0) {
806 tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0, imm12);
807 return;
808 }
809 imm12 = encode_imm(~arg);
810 if (imm12 >= 0) {
811 tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0, imm12);
812 return;
813 }
814
815 /* Check for a pc-relative address. This will usually be the TB,
816 or within the TB, which is immediately before the code block. */
817 diff = tcg_pcrel_diff(s, (void *)arg) - 8;
818 if (diff >= 0) {
819 imm12 = encode_imm(diff);
820 if (imm12 >= 0) {
821 tcg_out_dat_imm(s, cond, ARITH_ADD, rd, TCG_REG_PC, imm12);
822 return;
823 }
824 } else {
825 imm12 = encode_imm(-diff);
826 if (imm12 >= 0) {
827 tcg_out_dat_imm(s, cond, ARITH_SUB, rd, TCG_REG_PC, imm12);
828 return;
829 }
830 }
831
832 /* Use movw + movt. */
833 if (use_armv7_instructions) {
834 /* movw */
835 tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
836 | ((arg << 4) & 0x000f0000) | (arg & 0xfff));
837 if (arg & 0xffff0000) {
838 /* movt */
839 tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
840 | ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
841 }
842 return;
843 }
844
845 /* Look for sequences of two insns. If we have lots of 1's, we can
846 shorten the sequence by beginning with mvn and then clearing
847 higher bits with eor. */
848 tt0 = arg;
849 opc = ARITH_MOV;
850 if (ctpop32(arg) > 16) {
851 tt0 = ~arg;
852 opc = ARITH_MVN;
853 }
854 sh1 = ctz32(tt0) & ~1;
855 tt1 = tt0 & ~(0xff << sh1);
856 sh2 = ctz32(tt1) & ~1;
857 tt2 = tt1 & ~(0xff << sh2);
858 if (tt2 == 0) {
859 int rot;
860
861 rot = ((32 - sh1) << 7) & 0xf00;
862 tcg_out_dat_imm(s, cond, opc, rd, 0, ((tt0 >> sh1) & 0xff) | rot);
863 rot = ((32 - sh2) << 7) & 0xf00;
864 tcg_out_dat_imm(s, cond, ARITH_EOR, rd, rd,
865 ((tt0 >> sh2) & 0xff) | rot);
866 return;
867 }
868
869 /* Otherwise, drop it into the constant pool. */
870 tcg_out_movi_pool(s, cond, rd, arg);
871 }
872
873 /*
874 * Emit either the reg,imm or reg,reg form of a data-processing insn.
875 * rhs must satisfy the "rI" constraint.
876 */
877 static void tcg_out_dat_rI(TCGContext *s, ARMCond cond, ARMInsn opc,
878 TCGReg dst, TCGReg lhs, TCGArg rhs, int rhs_is_const)
879 {
880 if (rhs_is_const) {
881 tcg_out_dat_imm(s, cond, opc, dst, lhs, encode_imm_nofail(rhs));
882 } else {
883 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
884 }
885 }
886
887 /*
888 * Emit either the reg,imm or reg,reg form of a data-processing insn.
889 * rhs must satisfy the "rIK" constraint.
890 */
891 static void tcg_out_dat_rIK(TCGContext *s, ARMCond cond, ARMInsn opc,
892 ARMInsn opinv, TCGReg dst, TCGReg lhs, TCGArg rhs,
893 bool rhs_is_const)
894 {
895 if (rhs_is_const) {
896 int imm12 = encode_imm(rhs);
897 if (imm12 < 0) {
898 imm12 = encode_imm_nofail(~rhs);
899 opc = opinv;
900 }
901 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
902 } else {
903 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
904 }
905 }
906
907 static void tcg_out_dat_rIN(TCGContext *s, ARMCond cond, ARMInsn opc,
908 ARMInsn opneg, TCGReg dst, TCGReg lhs, TCGArg rhs,
909 bool rhs_is_const)
910 {
911 /* Emit either the reg,imm or reg,reg form of a data-processing insn.
912 * rhs must satisfy the "rIN" constraint.
913 */
914 if (rhs_is_const) {
915 int imm12 = encode_imm(rhs);
916 if (imm12 < 0) {
917 imm12 = encode_imm_nofail(-rhs);
918 opc = opneg;
919 }
920 tcg_out_dat_imm(s, cond, opc, dst, lhs, imm12);
921 } else {
922 tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
923 }
924 }
925
926 static void tcg_out_mul32(TCGContext *s, ARMCond cond, TCGReg rd,
927 TCGReg rn, TCGReg rm)
928 {
929 /* mul */
930 tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
931 }
932
933 static void tcg_out_umull32(TCGContext *s, ARMCond cond, TCGReg rd0,
934 TCGReg rd1, TCGReg rn, TCGReg rm)
935 {
936 /* umull */
937 tcg_out32(s, (cond << 28) | 0x00800090 |
938 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
939 }
940
941 static void tcg_out_smull32(TCGContext *s, ARMCond cond, TCGReg rd0,
942 TCGReg rd1, TCGReg rn, TCGReg rm)
943 {
944 /* smull */
945 tcg_out32(s, (cond << 28) | 0x00c00090 |
946 (rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
947 }
948
949 static void tcg_out_sdiv(TCGContext *s, ARMCond cond,
950 TCGReg rd, TCGReg rn, TCGReg rm)
951 {
952 tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
953 }
954
955 static void tcg_out_udiv(TCGContext *s, ARMCond cond,
956 TCGReg rd, TCGReg rn, TCGReg rm)
957 {
958 tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
959 }
960
961 static void tcg_out_ext8s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
962 {
963 /* sxtb */
964 tcg_out32(s, 0x06af0070 | (COND_AL << 28) | (rd << 12) | rn);
965 }
966
967 static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rn)
968 {
969 tcg_out_dat_imm(s, COND_AL, ARITH_AND, rd, rn, 0xff);
970 }
971
972 static void __attribute__((unused))
973 tcg_out_ext8u_cond(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
974 {
975 tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
976 }
977
978 static void tcg_out_ext16s(TCGContext *s, TCGType t, TCGReg rd, TCGReg rn)
979 {
980 /* sxth */
981 tcg_out32(s, 0x06bf0070 | (COND_AL << 28) | (rd << 12) | rn);
982 }
983
984 static void tcg_out_ext16u_cond(TCGContext *s, ARMCond cond,
985 TCGReg rd, TCGReg rn)
986 {
987 /* uxth */
988 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
989 }
990
991 static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rn)
992 {
993 tcg_out_ext16u_cond(s, COND_AL, rd, rn);
994 }
995
996 static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rn)
997 {
998 g_assert_not_reached();
999 }
1000
1001 static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rn)
1002 {
1003 g_assert_not_reached();
1004 }
1005
1006 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1007 {
1008 g_assert_not_reached();
1009 }
1010
1011 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rn)
1012 {
1013 g_assert_not_reached();
1014 }
1015
1016 static void tcg_out_bswap16(TCGContext *s, ARMCond cond,
1017 TCGReg rd, TCGReg rn, int flags)
1018 {
1019 if (flags & TCG_BSWAP_OS) {
1020 /* revsh */
1021 tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
1022 return;
1023 }
1024
1025 /* rev16 */
1026 tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
1027 if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1028 /* uxth */
1029 tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rd);
1030 }
1031 }
1032
1033 static void tcg_out_bswap32(TCGContext *s, ARMCond cond, TCGReg rd, TCGReg rn)
1034 {
1035 /* rev */
1036 tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
1037 }
1038
1039 static void tcg_out_deposit(TCGContext *s, ARMCond cond, TCGReg rd,
1040 TCGArg a1, int ofs, int len, bool const_a1)
1041 {
1042 if (const_a1) {
1043 /* bfi becomes bfc with rn == 15. */
1044 a1 = 15;
1045 }
1046 /* bfi/bfc */
1047 tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
1048 | (ofs << 7) | ((ofs + len - 1) << 16));
1049 }
1050
1051 static void tcg_out_extract(TCGContext *s, ARMCond cond, TCGReg rd,
1052 TCGReg rn, int ofs, int len)
1053 {
1054 /* ubfx */
1055 tcg_out32(s, 0x07e00050 | (cond << 28) | (rd << 12) | rn
1056 | (ofs << 7) | ((len - 1) << 16));
1057 }
1058
1059 static void tcg_out_sextract(TCGContext *s, ARMCond cond, TCGReg rd,
1060 TCGReg rn, int ofs, int len)
1061 {
1062 /* sbfx */
1063 tcg_out32(s, 0x07a00050 | (cond << 28) | (rd << 12) | rn
1064 | (ofs << 7) | ((len - 1) << 16));
1065 }
1066
1067 static void tcg_out_ld32u(TCGContext *s, ARMCond cond,
1068 TCGReg rd, TCGReg rn, int32_t offset)
1069 {
1070 if (offset > 0xfff || offset < -0xfff) {
1071 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1072 tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
1073 } else
1074 tcg_out_ld32_12(s, cond, rd, rn, offset);
1075 }
1076
1077 static void tcg_out_st32(TCGContext *s, ARMCond cond,
1078 TCGReg rd, TCGReg rn, int32_t offset)
1079 {
1080 if (offset > 0xfff || offset < -0xfff) {
1081 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1082 tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
1083 } else
1084 tcg_out_st32_12(s, cond, rd, rn, offset);
1085 }
1086
1087 static void tcg_out_ld16u(TCGContext *s, ARMCond cond,
1088 TCGReg rd, TCGReg rn, int32_t offset)
1089 {
1090 if (offset > 0xff || offset < -0xff) {
1091 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1092 tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
1093 } else
1094 tcg_out_ld16u_8(s, cond, rd, rn, offset);
1095 }
1096
1097 static void tcg_out_ld16s(TCGContext *s, ARMCond cond,
1098 TCGReg rd, TCGReg rn, int32_t offset)
1099 {
1100 if (offset > 0xff || offset < -0xff) {
1101 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1102 tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
1103 } else
1104 tcg_out_ld16s_8(s, cond, rd, rn, offset);
1105 }
1106
1107 static void tcg_out_st16(TCGContext *s, ARMCond cond,
1108 TCGReg rd, TCGReg rn, int32_t offset)
1109 {
1110 if (offset > 0xff || offset < -0xff) {
1111 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1112 tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
1113 } else
1114 tcg_out_st16_8(s, cond, rd, rn, offset);
1115 }
1116
1117 static void tcg_out_ld8u(TCGContext *s, ARMCond cond,
1118 TCGReg rd, TCGReg rn, int32_t offset)
1119 {
1120 if (offset > 0xfff || offset < -0xfff) {
1121 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1122 tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
1123 } else
1124 tcg_out_ld8_12(s, cond, rd, rn, offset);
1125 }
1126
1127 static void tcg_out_ld8s(TCGContext *s, ARMCond cond,
1128 TCGReg rd, TCGReg rn, int32_t offset)
1129 {
1130 if (offset > 0xff || offset < -0xff) {
1131 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1132 tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
1133 } else
1134 tcg_out_ld8s_8(s, cond, rd, rn, offset);
1135 }
1136
1137 static void tcg_out_st8(TCGContext *s, ARMCond cond,
1138 TCGReg rd, TCGReg rn, int32_t offset)
1139 {
1140 if (offset > 0xfff || offset < -0xfff) {
1141 tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
1142 tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
1143 } else
1144 tcg_out_st8_12(s, cond, rd, rn, offset);
1145 }
1146
1147 /*
1148 * The _goto case is normally between TBs within the same code buffer, and
1149 * with the code buffer limited to 16MB we wouldn't need the long case.
1150 * But we also use it for the tail-call to the qemu_ld/st helpers, which does.
1151 */
1152 static void tcg_out_goto(TCGContext *s, ARMCond cond, const tcg_insn_unit *addr)
1153 {
1154 intptr_t addri = (intptr_t)addr;
1155 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1156 bool arm_mode = !(addri & 1);
1157
1158 if (arm_mode && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
1159 tcg_out_b_imm(s, cond, disp);
1160 return;
1161 }
1162
1163 /* LDR is interworking from v5t. */
1164 tcg_out_movi_pool(s, cond, TCG_REG_PC, addri);
1165 }
1166
1167 /*
1168 * The call case is mostly used for helpers - so it's not unreasonable
1169 * for them to be beyond branch range.
1170 */
1171 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *addr)
1172 {
1173 intptr_t addri = (intptr_t)addr;
1174 ptrdiff_t disp = tcg_pcrel_diff(s, addr);
1175 bool arm_mode = !(addri & 1);
1176
1177 if (disp - 8 < 0x02000000 && disp - 8 >= -0x02000000) {
1178 if (arm_mode) {
1179 tcg_out_bl_imm(s, COND_AL, disp);
1180 } else {
1181 tcg_out_blx_imm(s, disp);
1182 }
1183 return;
1184 }
1185
1186 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addri);
1187 tcg_out_blx_reg(s, COND_AL, TCG_REG_TMP);
1188 }
1189
1190 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *addr,
1191 const TCGHelperInfo *info)
1192 {
1193 tcg_out_call_int(s, addr);
1194 }
1195
1196 static void tcg_out_goto_label(TCGContext *s, ARMCond cond, TCGLabel *l)
1197 {
1198 if (l->has_value) {
1199 tcg_out_goto(s, cond, l->u.value_ptr);
1200 } else {
1201 tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, l, 0);
1202 tcg_out_b_imm(s, cond, 0);
1203 }
1204 }
1205
1206 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1207 {
1208 if (use_armv7_instructions) {
1209 tcg_out32(s, INSN_DMB_ISH);
1210 } else {
1211 tcg_out32(s, INSN_DMB_MCR);
1212 }
1213 }
1214
1215 static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1216 const int *const_args)
1217 {
1218 TCGReg al = args[0];
1219 TCGReg ah = args[1];
1220 TCGArg bl = args[2];
1221 TCGArg bh = args[3];
1222 TCGCond cond = args[4];
1223 int const_bl = const_args[2];
1224 int const_bh = const_args[3];
1225
1226 switch (cond) {
1227 case TCG_COND_EQ:
1228 case TCG_COND_NE:
1229 case TCG_COND_LTU:
1230 case TCG_COND_LEU:
1231 case TCG_COND_GTU:
1232 case TCG_COND_GEU:
1233 /* We perform a conditional comparision. If the high half is
1234 equal, then overwrite the flags with the comparison of the
1235 low half. The resulting flags cover the whole. */
1236 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, ah, bh, const_bh);
1237 tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
1238 return cond;
1239
1240 case TCG_COND_LT:
1241 case TCG_COND_GE:
1242 /* We perform a double-word subtraction and examine the result.
1243 We do not actually need the result of the subtract, so the
1244 low part "subtract" is a compare. For the high half we have
1245 no choice but to compute into a temporary. */
1246 tcg_out_dat_rI(s, COND_AL, ARITH_CMP, 0, al, bl, const_bl);
1247 tcg_out_dat_rI(s, COND_AL, ARITH_SBC | TO_CPSR,
1248 TCG_REG_TMP, ah, bh, const_bh);
1249 return cond;
1250
1251 case TCG_COND_LE:
1252 case TCG_COND_GT:
1253 /* Similar, but with swapped arguments, via reversed subtract. */
1254 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR,
1255 TCG_REG_TMP, al, bl, const_bl);
1256 tcg_out_dat_rI(s, COND_AL, ARITH_RSC | TO_CPSR,
1257 TCG_REG_TMP, ah, bh, const_bh);
1258 return tcg_swap_cond(cond);
1259
1260 default:
1261 g_assert_not_reached();
1262 }
1263 }
1264
1265 /*
1266 * Note that TCGReg references Q-registers.
1267 * Q-regno = 2 * D-regno, so shift left by 1 whlie inserting.
1268 */
1269 static uint32_t encode_vd(TCGReg rd)
1270 {
1271 tcg_debug_assert(rd >= TCG_REG_Q0);
1272 return (extract32(rd, 3, 1) << 22) | (extract32(rd, 0, 3) << 13);
1273 }
1274
1275 static uint32_t encode_vn(TCGReg rn)
1276 {
1277 tcg_debug_assert(rn >= TCG_REG_Q0);
1278 return (extract32(rn, 3, 1) << 7) | (extract32(rn, 0, 3) << 17);
1279 }
1280
1281 static uint32_t encode_vm(TCGReg rm)
1282 {
1283 tcg_debug_assert(rm >= TCG_REG_Q0);
1284 return (extract32(rm, 3, 1) << 5) | (extract32(rm, 0, 3) << 1);
1285 }
1286
1287 static void tcg_out_vreg2(TCGContext *s, ARMInsn insn, int q, int vece,
1288 TCGReg d, TCGReg m)
1289 {
1290 tcg_out32(s, insn | (vece << 18) | (q << 6) |
1291 encode_vd(d) | encode_vm(m));
1292 }
1293
1294 static void tcg_out_vreg3(TCGContext *s, ARMInsn insn, int q, int vece,
1295 TCGReg d, TCGReg n, TCGReg m)
1296 {
1297 tcg_out32(s, insn | (vece << 20) | (q << 6) |
1298 encode_vd(d) | encode_vn(n) | encode_vm(m));
1299 }
1300
1301 static void tcg_out_vmovi(TCGContext *s, TCGReg rd,
1302 int q, int op, int cmode, uint8_t imm8)
1303 {
1304 tcg_out32(s, INSN_VMOVI | encode_vd(rd) | (q << 6) | (op << 5)
1305 | (cmode << 8) | extract32(imm8, 0, 4)
1306 | (extract32(imm8, 4, 3) << 16)
1307 | (extract32(imm8, 7, 1) << 24));
1308 }
1309
1310 static void tcg_out_vshifti(TCGContext *s, ARMInsn insn, int q,
1311 TCGReg rd, TCGReg rm, int l_imm6)
1312 {
1313 tcg_out32(s, insn | (q << 6) | encode_vd(rd) | encode_vm(rm) |
1314 (extract32(l_imm6, 6, 1) << 7) |
1315 (extract32(l_imm6, 0, 6) << 16));
1316 }
1317
1318 static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
1319 TCGReg rd, TCGReg rn, int offset)
1320 {
1321 if (offset != 0) {
1322 if (check_fit_imm(offset) || check_fit_imm(-offset)) {
1323 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
1324 TCG_REG_TMP, rn, offset, true);
1325 } else {
1326 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
1327 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1328 TCG_REG_TMP, TCG_REG_TMP, rn, 0);
1329 }
1330 rn = TCG_REG_TMP;
1331 }
1332 tcg_out32(s, insn | (rn << 16) | encode_vd(rd) | 0xf);
1333 }
1334
1335 #ifdef CONFIG_SOFTMMU
1336 /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
1337 * int mmu_idx, uintptr_t ra)
1338 */
1339 static void * const qemu_ld_helpers[MO_SSIZE + 1] = {
1340 [MO_UB] = helper_ret_ldub_mmu,
1341 [MO_SB] = helper_ret_ldsb_mmu,
1342 #if HOST_BIG_ENDIAN
1343 [MO_UW] = helper_be_lduw_mmu,
1344 [MO_UL] = helper_be_ldul_mmu,
1345 [MO_UQ] = helper_be_ldq_mmu,
1346 [MO_SW] = helper_be_ldsw_mmu,
1347 [MO_SL] = helper_be_ldul_mmu,
1348 #else
1349 [MO_UW] = helper_le_lduw_mmu,
1350 [MO_UL] = helper_le_ldul_mmu,
1351 [MO_UQ] = helper_le_ldq_mmu,
1352 [MO_SW] = helper_le_ldsw_mmu,
1353 [MO_SL] = helper_le_ldul_mmu,
1354 #endif
1355 };
1356
1357 /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
1358 * uintxx_t val, int mmu_idx, uintptr_t ra)
1359 */
1360 static void * const qemu_st_helpers[MO_SIZE + 1] = {
1361 [MO_8] = helper_ret_stb_mmu,
1362 #if HOST_BIG_ENDIAN
1363 [MO_16] = helper_be_stw_mmu,
1364 [MO_32] = helper_be_stl_mmu,
1365 [MO_64] = helper_be_stq_mmu,
1366 #else
1367 [MO_16] = helper_le_stw_mmu,
1368 [MO_32] = helper_le_stl_mmu,
1369 [MO_64] = helper_le_stq_mmu,
1370 #endif
1371 };
1372
1373 /* Helper routines for marshalling helper function arguments into
1374 * the correct registers and stack.
1375 * argreg is where we want to put this argument, arg is the argument itself.
1376 * Return value is the updated argreg ready for the next call.
1377 * Note that argreg 0..3 is real registers, 4+ on stack.
1378 *
1379 * We provide routines for arguments which are: immediate, 32 bit
1380 * value in register, 16 and 8 bit values in register (which must be zero
1381 * extended before use) and 64 bit value in a lo:hi register pair.
1382 */
1383 #define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
1384 static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
1385 { \
1386 if (argreg < 4) { \
1387 MOV_ARG(s, COND_AL, argreg, arg); \
1388 } else { \
1389 int ofs = (argreg - 4) * 4; \
1390 EXT_ARG; \
1391 tcg_debug_assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
1392 tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
1393 } \
1394 return argreg + 1; \
1395 }
1396
1397 DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
1398 (tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1399 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u_cond,
1400 (tcg_out_ext8u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1401 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u_cond,
1402 (tcg_out_ext16u_cond(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
1403 DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
1404
1405 static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
1406 TCGReg arglo, TCGReg arghi)
1407 {
1408 /* 64 bit arguments must go in even/odd register pairs
1409 * and in 8-aligned stack slots.
1410 */
1411 if (argreg & 1) {
1412 argreg++;
1413 }
1414 if (argreg >= 4 && (arglo & 1) == 0 && arghi == arglo + 1) {
1415 tcg_out_strd_8(s, COND_AL, arglo,
1416 TCG_REG_CALL_STACK, (argreg - 4) * 4);
1417 return argreg + 2;
1418 } else {
1419 argreg = tcg_out_arg_reg32(s, argreg, arglo);
1420 argreg = tcg_out_arg_reg32(s, argreg, arghi);
1421 return argreg;
1422 }
1423 }
1424
1425 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
1426
1427 /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */
1428 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1429 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256);
1430
1431 /* These offsets are built into the LDRD below. */
1432 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
1433 QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4);
1434
1435 /* Load and compare a TLB entry, leaving the flags set. Returns the register
1436 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
1437
1438 static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
1439 MemOp opc, int mem_index, bool is_load)
1440 {
1441 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read)
1442 : offsetof(CPUTLBEntry, addr_write));
1443 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1444 unsigned s_mask = (1 << (opc & MO_SIZE)) - 1;
1445 unsigned a_mask = (1 << get_alignment_bits(opc)) - 1;
1446 TCGReg t_addr;
1447
1448 /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */
1449 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off);
1450
1451 /* Extract the tlb index from the address into R0. */
1452 tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo,
1453 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
1454
1455 /*
1456 * Add the tlb_table pointer, creating the CPUTLBEntry address in R1.
1457 * Load the tlb comparator into R2/R3 and the fast path addend into R1.
1458 */
1459 if (cmp_off == 0) {
1460 if (TARGET_LONG_BITS == 64) {
1461 tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1462 } else {
1463 tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0);
1464 }
1465 } else {
1466 tcg_out_dat_reg(s, COND_AL, ARITH_ADD,
1467 TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0);
1468 if (TARGET_LONG_BITS == 64) {
1469 tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1470 } else {
1471 tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off);
1472 }
1473 }
1474
1475 /* Load the tlb addend. */
1476 tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1,
1477 offsetof(CPUTLBEntry, addend));
1478
1479 /*
1480 * Check alignment, check comparators.
1481 * Do this in 2-4 insns. Use MOVW for v7, if possible,
1482 * to reduce the number of sequential conditional instructions.
1483 * Almost all guests have at least 4k pages, which means that we need
1484 * to clear at least 9 bits even for an 8-byte memory, which means it
1485 * isn't worth checking for an immediate operand for BIC.
1486 *
1487 * For unaligned accesses, test the page of the last unit of alignment.
1488 * This leaves the least significant alignment bits unchanged, and of
1489 * course must be zero.
1490 */
1491 t_addr = addrlo;
1492 if (a_mask < s_mask) {
1493 t_addr = TCG_REG_R0;
1494 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, t_addr,
1495 addrlo, s_mask - a_mask);
1496 }
1497 if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) {
1498 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, ~(TARGET_PAGE_MASK | a_mask));
1499 tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP,
1500 t_addr, TCG_REG_TMP, 0);
1501 tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0);
1502 } else {
1503 if (a_mask) {
1504 tcg_debug_assert(a_mask <= 0xff);
1505 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1506 }
1507 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, t_addr,
1508 SHIFT_IMM_LSR(TARGET_PAGE_BITS));
1509 tcg_out_dat_reg(s, (a_mask ? COND_EQ : COND_AL), ARITH_CMP,
1510 0, TCG_REG_R2, TCG_REG_TMP,
1511 SHIFT_IMM_LSL(TARGET_PAGE_BITS));
1512 }
1513
1514 if (TARGET_LONG_BITS == 64) {
1515 tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0);
1516 }
1517
1518 return TCG_REG_R1;
1519 }
1520
1521 /* Record the context of a call to the out of line helper code for the slow
1522 path for a load or store, so that we can later generate the correct
1523 helper code. */
1524 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
1525 TCGReg datalo, TCGReg datahi, TCGReg addrlo,
1526 TCGReg addrhi, tcg_insn_unit *raddr,
1527 tcg_insn_unit *label_ptr)
1528 {
1529 TCGLabelQemuLdst *label = new_ldst_label(s);
1530
1531 label->is_ld = is_ld;
1532 label->oi = oi;
1533 label->datalo_reg = datalo;
1534 label->datahi_reg = datahi;
1535 label->addrlo_reg = addrlo;
1536 label->addrhi_reg = addrhi;
1537 label->raddr = tcg_splitwx_to_rx(raddr);
1538 label->label_ptr[0] = label_ptr;
1539 }
1540
1541 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1542 {
1543 TCGReg argreg, datalo, datahi;
1544 MemOpIdx oi = lb->oi;
1545 MemOp opc = get_memop(oi);
1546
1547 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1548 return false;
1549 }
1550
1551 argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
1552 if (TARGET_LONG_BITS == 64) {
1553 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1554 } else {
1555 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1556 }
1557 argreg = tcg_out_arg_imm32(s, argreg, oi);
1558 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1559
1560 /* Use the canonical unsigned helpers and minimize icache usage. */
1561 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]);
1562
1563 datalo = lb->datalo_reg;
1564 datahi = lb->datahi_reg;
1565 switch (opc & MO_SSIZE) {
1566 case MO_SB:
1567 tcg_out_ext8s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
1568 break;
1569 case MO_SW:
1570 tcg_out_ext16s(s, TCG_TYPE_I32, datalo, TCG_REG_R0);
1571 break;
1572 default:
1573 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1574 break;
1575 case MO_UQ:
1576 if (datalo != TCG_REG_R1) {
1577 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1578 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1579 } else if (datahi != TCG_REG_R0) {
1580 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1581 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0);
1582 } else {
1583 tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
1584 tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1);
1585 tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP);
1586 }
1587 break;
1588 }
1589
1590 tcg_out_goto(s, COND_AL, lb->raddr);
1591 return true;
1592 }
1593
1594 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1595 {
1596 TCGReg argreg, datalo, datahi;
1597 MemOpIdx oi = lb->oi;
1598 MemOp opc = get_memop(oi);
1599
1600 if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1601 return false;
1602 }
1603
1604 argreg = TCG_REG_R0;
1605 argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
1606 if (TARGET_LONG_BITS == 64) {
1607 argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
1608 } else {
1609 argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
1610 }
1611
1612 datalo = lb->datalo_reg;
1613 datahi = lb->datahi_reg;
1614 switch (opc & MO_SIZE) {
1615 case MO_8:
1616 argreg = tcg_out_arg_reg8(s, argreg, datalo);
1617 break;
1618 case MO_16:
1619 argreg = tcg_out_arg_reg16(s, argreg, datalo);
1620 break;
1621 case MO_32:
1622 default:
1623 argreg = tcg_out_arg_reg32(s, argreg, datalo);
1624 break;
1625 case MO_64:
1626 argreg = tcg_out_arg_reg64(s, argreg, datalo, datahi);
1627 break;
1628 }
1629
1630 argreg = tcg_out_arg_imm32(s, argreg, oi);
1631 argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
1632
1633 /* Tail-call to the helper, which will return to the fast path. */
1634 tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
1635 return true;
1636 }
1637 #else
1638
1639 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
1640 TCGReg addrhi, unsigned a_bits)
1641 {
1642 unsigned a_mask = (1 << a_bits) - 1;
1643 TCGLabelQemuLdst *label = new_ldst_label(s);
1644
1645 label->is_ld = is_ld;
1646 label->addrlo_reg = addrlo;
1647 label->addrhi_reg = addrhi;
1648
1649 /* We are expecting a_bits to max out at 7, and can easily support 8. */
1650 tcg_debug_assert(a_mask <= 0xff);
1651 /* tst addr, #mask */
1652 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, a_mask);
1653
1654 /* blne slow_path */
1655 label->label_ptr[0] = s->code_ptr;
1656 tcg_out_bl_imm(s, COND_NE, 0);
1657
1658 label->raddr = tcg_splitwx_to_rx(s->code_ptr);
1659 }
1660
1661 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
1662 {
1663 if (!reloc_pc24(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1664 return false;
1665 }
1666
1667 if (TARGET_LONG_BITS == 64) {
1668 /* 64-bit target address is aligned into R2:R3. */
1669 if (l->addrhi_reg != TCG_REG_R2) {
1670 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1671 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1672 } else if (l->addrlo_reg != TCG_REG_R3) {
1673 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg);
1674 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg);
1675 } else {
1676 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2);
1677 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3);
1678 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1);
1679 }
1680 } else {
1681 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg);
1682 }
1683 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_AREG0);
1684
1685 /*
1686 * Tail call to the helper, with the return address back inline,
1687 * just for the clarity of the debugging traceback -- the helper
1688 * cannot return. We have used BLNE to arrive here, so LR is
1689 * already set.
1690 */
1691 tcg_out_goto(s, COND_AL, (const void *)
1692 (l->is_ld ? helper_unaligned_ld : helper_unaligned_st));
1693 return true;
1694 }
1695
1696 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1697 {
1698 return tcg_out_fail_alignment(s, l);
1699 }
1700
1701 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1702 {
1703 return tcg_out_fail_alignment(s, l);
1704 }
1705 #endif /* SOFTMMU */
1706
1707 static void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
1708 TCGReg datalo, TCGReg datahi,
1709 TCGReg addrlo, TCGReg addend,
1710 bool scratch_addend)
1711 {
1712 /* Byte swapping is left to middle-end expansion. */
1713 tcg_debug_assert((opc & MO_BSWAP) == 0);
1714
1715 switch (opc & MO_SSIZE) {
1716 case MO_UB:
1717 tcg_out_ld8_r(s, COND_AL, datalo, addrlo, addend);
1718 break;
1719 case MO_SB:
1720 tcg_out_ld8s_r(s, COND_AL, datalo, addrlo, addend);
1721 break;
1722 case MO_UW:
1723 tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
1724 break;
1725 case MO_SW:
1726 tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
1727 break;
1728 case MO_UL:
1729 tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
1730 break;
1731 case MO_UQ:
1732 /* We used pair allocation for datalo, so already should be aligned. */
1733 tcg_debug_assert((datalo & 1) == 0);
1734 tcg_debug_assert(datahi == datalo + 1);
1735 /* LDRD requires alignment; double-check that. */
1736 if (get_alignment_bits(opc) >= MO_64) {
1737 /*
1738 * Rm (the second address op) must not overlap Rt or Rt + 1.
1739 * Since datalo is aligned, we can simplify the test via alignment.
1740 * Flip the two address arguments if that works.
1741 */
1742 if ((addend & ~1) != datalo) {
1743 tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
1744 break;
1745 }
1746 if ((addrlo & ~1) != datalo) {
1747 tcg_out_ldrd_r(s, COND_AL, datalo, addend, addrlo);
1748 break;
1749 }
1750 }
1751 if (scratch_addend) {
1752 tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
1753 tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
1754 } else {
1755 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
1756 addend, addrlo, SHIFT_IMM_LSL(0));
1757 tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
1758 tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
1759 }
1760 break;
1761 default:
1762 g_assert_not_reached();
1763 }
1764 }
1765
1766 #ifndef CONFIG_SOFTMMU
1767 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1768 TCGReg datahi, TCGReg addrlo)
1769 {
1770 /* Byte swapping is left to middle-end expansion. */
1771 tcg_debug_assert((opc & MO_BSWAP) == 0);
1772
1773 switch (opc & MO_SSIZE) {
1774 case MO_UB:
1775 tcg_out_ld8_12(s, COND_AL, datalo, addrlo, 0);
1776 break;
1777 case MO_SB:
1778 tcg_out_ld8s_8(s, COND_AL, datalo, addrlo, 0);
1779 break;
1780 case MO_UW:
1781 tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
1782 break;
1783 case MO_SW:
1784 tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
1785 break;
1786 case MO_UL:
1787 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1788 break;
1789 case MO_UQ:
1790 /* We used pair allocation for datalo, so already should be aligned. */
1791 tcg_debug_assert((datalo & 1) == 0);
1792 tcg_debug_assert(datahi == datalo + 1);
1793 /* LDRD requires alignment; double-check that. */
1794 if (get_alignment_bits(opc) >= MO_64) {
1795 tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
1796 } else if (datalo == addrlo) {
1797 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1798 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1799 } else {
1800 tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
1801 tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
1802 }
1803 break;
1804 default:
1805 g_assert_not_reached();
1806 }
1807 }
1808 #endif
1809
1810 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
1811 {
1812 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1813 MemOpIdx oi;
1814 MemOp opc;
1815 #ifdef CONFIG_SOFTMMU
1816 int mem_index;
1817 TCGReg addend;
1818 tcg_insn_unit *label_ptr;
1819 #else
1820 unsigned a_bits;
1821 #endif
1822
1823 datalo = *args++;
1824 datahi = (is64 ? *args++ : 0);
1825 addrlo = *args++;
1826 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1827 oi = *args++;
1828 opc = get_memop(oi);
1829
1830 #ifdef CONFIG_SOFTMMU
1831 mem_index = get_mmuidx(oi);
1832 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 1);
1833
1834 /* This a conditional BL only to load a pointer within this opcode into LR
1835 for the slow path. We will not be using the value for a tail call. */
1836 label_ptr = s->code_ptr;
1837 tcg_out_bl_imm(s, COND_NE, 0);
1838
1839 tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend, true);
1840
1841 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
1842 s->code_ptr, label_ptr);
1843 #else /* !CONFIG_SOFTMMU */
1844 a_bits = get_alignment_bits(opc);
1845 if (a_bits) {
1846 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
1847 }
1848 if (guest_base) {
1849 tcg_out_qemu_ld_index(s, opc, datalo, datahi,
1850 addrlo, TCG_REG_GUEST_BASE, false);
1851 } else {
1852 tcg_out_qemu_ld_direct(s, opc, datalo, datahi, addrlo);
1853 }
1854 #endif
1855 }
1856
1857 static void tcg_out_qemu_st_index(TCGContext *s, ARMCond cond, MemOp opc,
1858 TCGReg datalo, TCGReg datahi,
1859 TCGReg addrlo, TCGReg addend,
1860 bool scratch_addend)
1861 {
1862 /* Byte swapping is left to middle-end expansion. */
1863 tcg_debug_assert((opc & MO_BSWAP) == 0);
1864
1865 switch (opc & MO_SIZE) {
1866 case MO_8:
1867 tcg_out_st8_r(s, cond, datalo, addrlo, addend);
1868 break;
1869 case MO_16:
1870 tcg_out_st16_r(s, cond, datalo, addrlo, addend);
1871 break;
1872 case MO_32:
1873 tcg_out_st32_r(s, cond, datalo, addrlo, addend);
1874 break;
1875 case MO_64:
1876 /* We used pair allocation for datalo, so already should be aligned. */
1877 tcg_debug_assert((datalo & 1) == 0);
1878 tcg_debug_assert(datahi == datalo + 1);
1879 /* STRD requires alignment; double-check that. */
1880 if (get_alignment_bits(opc) >= MO_64) {
1881 tcg_out_strd_r(s, cond, datalo, addrlo, addend);
1882 } else if (scratch_addend) {
1883 tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
1884 tcg_out_st32_12(s, cond, datahi, addend, 4);
1885 } else {
1886 tcg_out_dat_reg(s, cond, ARITH_ADD, TCG_REG_TMP,
1887 addend, addrlo, SHIFT_IMM_LSL(0));
1888 tcg_out_st32_12(s, cond, datalo, TCG_REG_TMP, 0);
1889 tcg_out_st32_12(s, cond, datahi, TCG_REG_TMP, 4);
1890 }
1891 break;
1892 default:
1893 g_assert_not_reached();
1894 }
1895 }
1896
1897 #ifndef CONFIG_SOFTMMU
1898 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo,
1899 TCGReg datahi, TCGReg addrlo)
1900 {
1901 /* Byte swapping is left to middle-end expansion. */
1902 tcg_debug_assert((opc & MO_BSWAP) == 0);
1903
1904 switch (opc & MO_SIZE) {
1905 case MO_8:
1906 tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
1907 break;
1908 case MO_16:
1909 tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
1910 break;
1911 case MO_32:
1912 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1913 break;
1914 case MO_64:
1915 /* We used pair allocation for datalo, so already should be aligned. */
1916 tcg_debug_assert((datalo & 1) == 0);
1917 tcg_debug_assert(datahi == datalo + 1);
1918 /* STRD requires alignment; double-check that. */
1919 if (get_alignment_bits(opc) >= MO_64) {
1920 tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
1921 } else {
1922 tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
1923 tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
1924 }
1925 break;
1926 default:
1927 g_assert_not_reached();
1928 }
1929 }
1930 #endif
1931
1932 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
1933 {
1934 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1935 MemOpIdx oi;
1936 MemOp opc;
1937 #ifdef CONFIG_SOFTMMU
1938 int mem_index;
1939 TCGReg addend;
1940 tcg_insn_unit *label_ptr;
1941 #else
1942 unsigned a_bits;
1943 #endif
1944
1945 datalo = *args++;
1946 datahi = (is64 ? *args++ : 0);
1947 addrlo = *args++;
1948 addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
1949 oi = *args++;
1950 opc = get_memop(oi);
1951
1952 #ifdef CONFIG_SOFTMMU
1953 mem_index = get_mmuidx(oi);
1954 addend = tcg_out_tlb_read(s, addrlo, addrhi, opc, mem_index, 0);
1955
1956 tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi,
1957 addrlo, addend, true);
1958
1959 /* The conditional call must come last, as we're going to return here. */
1960 label_ptr = s->code_ptr;
1961 tcg_out_bl_imm(s, COND_NE, 0);
1962
1963 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
1964 s->code_ptr, label_ptr);
1965 #else /* !CONFIG_SOFTMMU */
1966 a_bits = get_alignment_bits(opc);
1967 if (a_bits) {
1968 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
1969 }
1970 if (guest_base) {
1971 tcg_out_qemu_st_index(s, COND_AL, opc, datalo, datahi,
1972 addrlo, TCG_REG_GUEST_BASE, false);
1973 } else {
1974 tcg_out_qemu_st_direct(s, opc, datalo, datahi, addrlo);
1975 }
1976 #endif
1977 }
1978
1979 static void tcg_out_epilogue(TCGContext *s);
1980
1981 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
1982 {
1983 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, arg);
1984 tcg_out_epilogue(s);
1985 }
1986
1987 static void tcg_out_goto_tb(TCGContext *s, int which)
1988 {
1989 uintptr_t i_addr;
1990 intptr_t i_disp;
1991
1992 /* Direct branch will be patched by tb_target_set_jmp_target. */
1993 set_jmp_insn_offset(s, which);
1994 tcg_out32(s, INSN_NOP);
1995
1996 /* When branch is out of range, fall through to indirect. */
1997 i_addr = get_jmp_target_addr(s, which);
1998 i_disp = tcg_pcrel_diff(s, (void *)i_addr) - 8;
1999 tcg_debug_assert(i_disp < 0);
2000 if (i_disp >= -0xfff) {
2001 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, i_disp);
2002 } else {
2003 /*
2004 * The TB is close, but outside the 12 bits addressable by
2005 * the load. We can extend this to 20 bits with a sub of a
2006 * shifted immediate from pc.
2007 */
2008 int h = -i_disp;
2009 int l = h & 0xfff;
2010
2011 h = encode_imm_nofail(h - l);
2012 tcg_out_dat_imm(s, COND_AL, ARITH_SUB, TCG_REG_R0, TCG_REG_PC, h);
2013 tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, l);
2014 }
2015 set_jmp_reset_offset(s, which);
2016 }
2017
2018 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2019 uintptr_t jmp_rx, uintptr_t jmp_rw)
2020 {
2021 uintptr_t addr = tb->jmp_target_addr[n];
2022 ptrdiff_t offset = addr - (jmp_rx + 8);
2023 tcg_insn_unit insn;
2024
2025 /* Either directly branch, or fall through to indirect branch. */
2026 if (offset == sextract64(offset, 0, 26)) {
2027 /* B <addr> */
2028 insn = deposit32((COND_AL << 28) | INSN_B, 0, 24, offset >> 2);
2029 } else {
2030 insn = INSN_NOP;
2031 }
2032
2033 qatomic_set((uint32_t *)jmp_rw, insn);
2034 flush_idcache_range(jmp_rx, jmp_rw, 4);
2035 }
2036
2037 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2038 const TCGArg args[TCG_MAX_OP_ARGS],
2039 const int const_args[TCG_MAX_OP_ARGS])
2040 {
2041 TCGArg a0, a1, a2, a3, a4, a5;
2042 int c;
2043
2044 switch (opc) {
2045 case INDEX_op_goto_ptr:
2046 tcg_out_b_reg(s, COND_AL, args[0]);
2047 break;
2048 case INDEX_op_br:
2049 tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
2050 break;
2051
2052 case INDEX_op_ld8u_i32:
2053 tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
2054 break;
2055 case INDEX_op_ld8s_i32:
2056 tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
2057 break;
2058 case INDEX_op_ld16u_i32:
2059 tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
2060 break;
2061 case INDEX_op_ld16s_i32:
2062 tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
2063 break;
2064 case INDEX_op_ld_i32:
2065 tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
2066 break;
2067 case INDEX_op_st8_i32:
2068 tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
2069 break;
2070 case INDEX_op_st16_i32:
2071 tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
2072 break;
2073 case INDEX_op_st_i32:
2074 tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
2075 break;
2076
2077 case INDEX_op_movcond_i32:
2078 /* Constraints mean that v2 is always in the same register as dest,
2079 * so we only need to do "if condition passed, move v1 to dest".
2080 */
2081 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2082 args[1], args[2], const_args[2]);
2083 tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
2084 ARITH_MVN, args[0], 0, args[3], const_args[3]);
2085 break;
2086 case INDEX_op_add_i32:
2087 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
2088 args[0], args[1], args[2], const_args[2]);
2089 break;
2090 case INDEX_op_sub_i32:
2091 if (const_args[1]) {
2092 if (const_args[2]) {
2093 tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
2094 } else {
2095 tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
2096 args[0], args[2], args[1], 1);
2097 }
2098 } else {
2099 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
2100 args[0], args[1], args[2], const_args[2]);
2101 }
2102 break;
2103 case INDEX_op_and_i32:
2104 tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
2105 args[0], args[1], args[2], const_args[2]);
2106 break;
2107 case INDEX_op_andc_i32:
2108 tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
2109 args[0], args[1], args[2], const_args[2]);
2110 break;
2111 case INDEX_op_or_i32:
2112 c = ARITH_ORR;
2113 goto gen_arith;
2114 case INDEX_op_xor_i32:
2115 c = ARITH_EOR;
2116 /* Fall through. */
2117 gen_arith:
2118 tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
2119 break;
2120 case INDEX_op_add2_i32:
2121 a0 = args[0], a1 = args[1], a2 = args[2];
2122 a3 = args[3], a4 = args[4], a5 = args[5];
2123 if (a0 == a3 || (a0 == a5 && !const_args[5])) {
2124 a0 = TCG_REG_TMP;
2125 }
2126 tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
2127 a0, a2, a4, const_args[4]);
2128 tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
2129 a1, a3, a5, const_args[5]);
2130 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2131 break;
2132 case INDEX_op_sub2_i32:
2133 a0 = args[0], a1 = args[1], a2 = args[2];
2134 a3 = args[3], a4 = args[4], a5 = args[5];
2135 if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
2136 a0 = TCG_REG_TMP;
2137 }
2138 if (const_args[2]) {
2139 if (const_args[4]) {
2140 tcg_out_movi32(s, COND_AL, a0, a4);
2141 a4 = a0;
2142 }
2143 tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
2144 } else {
2145 tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
2146 ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
2147 }
2148 if (const_args[3]) {
2149 if (const_args[5]) {
2150 tcg_out_movi32(s, COND_AL, a1, a5);
2151 a5 = a1;
2152 }
2153 tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
2154 } else {
2155 tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
2156 a1, a3, a5, const_args[5]);
2157 }
2158 tcg_out_mov_reg(s, COND_AL, args[0], a0);
2159 break;
2160 case INDEX_op_neg_i32:
2161 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
2162 break;
2163 case INDEX_op_not_i32:
2164 tcg_out_dat_reg(s, COND_AL,
2165 ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
2166 break;
2167 case INDEX_op_mul_i32:
2168 tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
2169 break;
2170 case INDEX_op_mulu2_i32:
2171 tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2172 break;
2173 case INDEX_op_muls2_i32:
2174 tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
2175 break;
2176 /* XXX: Perhaps args[2] & 0x1f is wrong */
2177 case INDEX_op_shl_i32:
2178 c = const_args[2] ?
2179 SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
2180 goto gen_shift32;
2181 case INDEX_op_shr_i32:
2182 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
2183 SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
2184 goto gen_shift32;
2185 case INDEX_op_sar_i32:
2186 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
2187 SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
2188 goto gen_shift32;
2189 case INDEX_op_rotr_i32:
2190 c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
2191 SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
2192 /* Fall through. */
2193 gen_shift32:
2194 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
2195 break;
2196
2197 case INDEX_op_rotl_i32:
2198 if (const_args[2]) {
2199 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2200 ((0x20 - args[2]) & 0x1f) ?
2201 SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
2202 SHIFT_IMM_LSL(0));
2203 } else {
2204 tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[2], 0x20);
2205 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
2206 SHIFT_REG_ROR(TCG_REG_TMP));
2207 }
2208 break;
2209
2210 case INDEX_op_ctz_i32:
2211 tcg_out_dat_reg(s, COND_AL, INSN_RBIT, TCG_REG_TMP, 0, args[1], 0);
2212 a1 = TCG_REG_TMP;
2213 goto do_clz;
2214
2215 case INDEX_op_clz_i32:
2216 a1 = args[1];
2217 do_clz:
2218 a0 = args[0];
2219 a2 = args[2];
2220 c = const_args[2];
2221 if (c && a2 == 32) {
2222 tcg_out_dat_reg(s, COND_AL, INSN_CLZ, a0, 0, a1, 0);
2223 break;
2224 }
2225 tcg_out_dat_imm(s, COND_AL, ARITH_CMP, 0, a1, 0);
2226 tcg_out_dat_reg(s, COND_NE, INSN_CLZ, a0, 0, a1, 0);
2227 if (c || a0 != a2) {
2228 tcg_out_dat_rIK(s, COND_EQ, ARITH_MOV, ARITH_MVN, a0, 0, a2, c);
2229 }
2230 break;
2231
2232 case INDEX_op_brcond_i32:
2233 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2234 args[0], args[1], const_args[1]);
2235 tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
2236 arg_label(args[3]));
2237 break;
2238 case INDEX_op_setcond_i32:
2239 tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
2240 args[1], args[2], const_args[2]);
2241 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
2242 ARITH_MOV, args[0], 0, 1);
2243 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
2244 ARITH_MOV, args[0], 0, 0);
2245 break;
2246
2247 case INDEX_op_brcond2_i32:
2248 c = tcg_out_cmp2(s, args, const_args);
2249 tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[5]));
2250 break;
2251 case INDEX_op_setcond2_i32:
2252 c = tcg_out_cmp2(s, args + 1, const_args + 1);
2253 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c], ARITH_MOV, args[0], 0, 1);
2254 tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
2255 ARITH_MOV, args[0], 0, 0);
2256 break;
2257
2258 case INDEX_op_qemu_ld_i32:
2259 tcg_out_qemu_ld(s, args, 0);
2260 break;
2261 case INDEX_op_qemu_ld_i64:
2262 tcg_out_qemu_ld(s, args, 1);
2263 break;
2264 case INDEX_op_qemu_st_i32:
2265 tcg_out_qemu_st(s, args, 0);
2266 break;
2267 case INDEX_op_qemu_st_i64:
2268 tcg_out_qemu_st(s, args, 1);
2269 break;
2270
2271 case INDEX_op_bswap16_i32:
2272 tcg_out_bswap16(s, COND_AL, args[0], args[1], args[2]);
2273 break;
2274 case INDEX_op_bswap32_i32:
2275 tcg_out_bswap32(s, COND_AL, args[0], args[1]);
2276 break;
2277
2278 case INDEX_op_deposit_i32:
2279 tcg_out_deposit(s, COND_AL, args[0], args[2],
2280 args[3], args[4], const_args[2]);
2281 break;
2282 case INDEX_op_extract_i32:
2283 tcg_out_extract(s, COND_AL, args[0], args[1], args[2], args[3]);
2284 break;
2285 case INDEX_op_sextract_i32:
2286 tcg_out_sextract(s, COND_AL, args[0], args[1], args[2], args[3]);
2287 break;
2288 case INDEX_op_extract2_i32:
2289 /* ??? These optimization vs zero should be generic. */
2290 /* ??? But we can't substitute 2 for 1 in the opcode stream yet. */
2291 if (const_args[1]) {
2292 if (const_args[2]) {
2293 tcg_out_movi(s, TCG_TYPE_REG, args[0], 0);
2294 } else {
2295 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2296 args[2], SHIFT_IMM_LSL(32 - args[3]));
2297 }
2298 } else if (const_args[2]) {
2299 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0,
2300 args[1], SHIFT_IMM_LSR(args[3]));
2301 } else {
2302 /* We can do extract2 in 2 insns, vs the 3 required otherwise. */
2303 tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0,
2304 args[2], SHIFT_IMM_LSL(32 - args[3]));
2305 tcg_out_dat_reg(s, COND_AL, ARITH_ORR, args[0], TCG_REG_TMP,
2306 args[1], SHIFT_IMM_LSR(args[3]));
2307 }
2308 break;
2309
2310 case INDEX_op_div_i32:
2311 tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
2312 break;
2313 case INDEX_op_divu_i32:
2314 tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
2315 break;
2316
2317 case INDEX_op_mb:
2318 tcg_out_mb(s, args[0]);
2319 break;
2320
2321 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
2322 case INDEX_op_call: /* Always emitted via tcg_out_call. */
2323 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
2324 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
2325 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
2326 case INDEX_op_ext8u_i32:
2327 case INDEX_op_ext16s_i32:
2328 case INDEX_op_ext16u_i32:
2329 default:
2330 g_assert_not_reached();
2331 }
2332 }
2333
2334 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2335 {
2336 switch (op) {
2337 case INDEX_op_goto_ptr:
2338 return C_O0_I1(r);
2339
2340 case INDEX_op_ld8u_i32:
2341 case INDEX_op_ld8s_i32:
2342 case INDEX_op_ld16u_i32:
2343 case INDEX_op_ld16s_i32:
2344 case INDEX_op_ld_i32:
2345 case INDEX_op_neg_i32:
2346 case INDEX_op_not_i32:
2347 case INDEX_op_bswap16_i32:
2348 case INDEX_op_bswap32_i32:
2349 case INDEX_op_ext8s_i32:
2350 case INDEX_op_ext16s_i32:
2351 case INDEX_op_ext16u_i32:
2352 case INDEX_op_extract_i32:
2353 case INDEX_op_sextract_i32:
2354 return C_O1_I1(r, r);
2355
2356 case INDEX_op_st8_i32:
2357 case INDEX_op_st16_i32:
2358 case INDEX_op_st_i32:
2359 return C_O0_I2(r, r);
2360
2361 case INDEX_op_add_i32:
2362 case INDEX_op_sub_i32:
2363 case INDEX_op_setcond_i32:
2364 return C_O1_I2(r, r, rIN);
2365
2366 case INDEX_op_and_i32:
2367 case INDEX_op_andc_i32:
2368 case INDEX_op_clz_i32:
2369 case INDEX_op_ctz_i32:
2370 return C_O1_I2(r, r, rIK);
2371
2372 case INDEX_op_mul_i32:
2373 case INDEX_op_div_i32:
2374 case INDEX_op_divu_i32:
2375 return C_O1_I2(r, r, r);
2376
2377 case INDEX_op_mulu2_i32:
2378 case INDEX_op_muls2_i32:
2379 return C_O2_I2(r, r, r, r);
2380
2381 case INDEX_op_or_i32:
2382 case INDEX_op_xor_i32:
2383 return C_O1_I2(r, r, rI);
2384
2385 case INDEX_op_shl_i32:
2386 case INDEX_op_shr_i32:
2387 case INDEX_op_sar_i32:
2388 case INDEX_op_rotl_i32:
2389 case INDEX_op_rotr_i32:
2390 return C_O1_I2(r, r, ri);
2391
2392 case INDEX_op_brcond_i32:
2393 return C_O0_I2(r, rIN);
2394 case INDEX_op_deposit_i32:
2395 return C_O1_I2(r, 0, rZ);
2396 case INDEX_op_extract2_i32:
2397 return C_O1_I2(r, rZ, rZ);
2398 case INDEX_op_movcond_i32:
2399 return C_O1_I4(r, r, rIN, rIK, 0);
2400 case INDEX_op_add2_i32:
2401 return C_O2_I4(r, r, r, r, rIN, rIK);
2402 case INDEX_op_sub2_i32:
2403 return C_O2_I4(r, r, rI, rI, rIN, rIK);
2404 case INDEX_op_brcond2_i32:
2405 return C_O0_I4(r, r, rI, rI);
2406 case INDEX_op_setcond2_i32:
2407 return C_O1_I4(r, r, r, rI, rI);
2408
2409 case INDEX_op_qemu_ld_i32:
2410 return TARGET_LONG_BITS == 32 ? C_O1_I1(r, l) : C_O1_I2(r, l, l);
2411 case INDEX_op_qemu_ld_i64:
2412 return TARGET_LONG_BITS == 32 ? C_O2_I1(e, p, l) : C_O2_I2(e, p, l, l);
2413 case INDEX_op_qemu_st_i32:
2414 return TARGET_LONG_BITS == 32 ? C_O0_I2(s, s) : C_O0_I3(s, s, s);
2415 case INDEX_op_qemu_st_i64:
2416 return TARGET_LONG_BITS == 32 ? C_O0_I3(S, p, s) : C_O0_I4(S, p, s, s);
2417
2418 case INDEX_op_st_vec:
2419 return C_O0_I2(w, r);
2420 case INDEX_op_ld_vec:
2421 case INDEX_op_dupm_vec:
2422 return C_O1_I1(w, r);
2423 case INDEX_op_dup_vec:
2424 return C_O1_I1(w, wr);
2425 case INDEX_op_abs_vec:
2426 case INDEX_op_neg_vec:
2427 case INDEX_op_not_vec:
2428 case INDEX_op_shli_vec:
2429 case INDEX_op_shri_vec:
2430 case INDEX_op_sari_vec:
2431 return C_O1_I1(w, w);
2432 case INDEX_op_dup2_vec:
2433 case INDEX_op_add_vec:
2434 case INDEX_op_mul_vec:
2435 case INDEX_op_smax_vec:
2436 case INDEX_op_smin_vec:
2437 case INDEX_op_ssadd_vec:
2438 case INDEX_op_sssub_vec:
2439 case INDEX_op_sub_vec:
2440 case INDEX_op_umax_vec:
2441 case INDEX_op_umin_vec:
2442 case INDEX_op_usadd_vec:
2443 case INDEX_op_ussub_vec:
2444 case INDEX_op_xor_vec:
2445 case INDEX_op_arm_sshl_vec:
2446 case INDEX_op_arm_ushl_vec:
2447 return C_O1_I2(w, w, w);
2448 case INDEX_op_arm_sli_vec:
2449 return C_O1_I2(w, 0, w);
2450 case INDEX_op_or_vec:
2451 case INDEX_op_andc_vec:
2452 return C_O1_I2(w, w, wO);
2453 case INDEX_op_and_vec:
2454 case INDEX_op_orc_vec:
2455 return C_O1_I2(w, w, wV);
2456 case INDEX_op_cmp_vec:
2457 return C_O1_I2(w, w, wZ);
2458 case INDEX_op_bitsel_vec:
2459 return C_O1_I3(w, w, w, w);
2460 default:
2461 g_assert_not_reached();
2462 }
2463 }
2464
2465 static void tcg_target_init(TCGContext *s)
2466 {
2467 /*
2468 * Only probe for the platform and capabilities if we haven't already
2469 * determined maximum values at compile time.
2470 */
2471 #if !defined(use_idiv_instructions) || !defined(use_neon_instructions)
2472 {
2473 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2474 #ifndef use_idiv_instructions
2475 use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
2476 #endif
2477 #ifndef use_neon_instructions
2478 use_neon_instructions = (hwcap & HWCAP_ARM_NEON) != 0;
2479 #endif
2480 }
2481 #endif
2482
2483 if (__ARM_ARCH < 7) {
2484 const char *pl = (const char *)qemu_getauxval(AT_PLATFORM);
2485 if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
2486 arm_arch = pl[1] - '0';
2487 }
2488
2489 if (arm_arch < 6) {
2490 error_report("TCG: ARMv%d is unsupported; exiting", arm_arch);
2491 exit(EXIT_FAILURE);
2492 }
2493 }
2494
2495 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2496
2497 tcg_target_call_clobber_regs = 0;
2498 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
2499 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
2500 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
2501 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
2502 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
2503 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
2504
2505 if (use_neon_instructions) {
2506 tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
2507 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2508
2509 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q0);
2510 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q1);
2511 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q2);
2512 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q3);
2513 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q8);
2514 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q9);
2515 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q10);
2516 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q11);
2517 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q12);
2518 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q13);
2519 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q14);
2520 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_Q15);
2521 }
2522
2523 s->reserved_regs = 0;
2524 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
2525 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
2526 tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
2527 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP);
2528 }
2529
2530 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
2531 TCGReg arg1, intptr_t arg2)
2532 {
2533 switch (type) {
2534 case TCG_TYPE_I32:
2535 tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
2536 return;
2537 case TCG_TYPE_V64:
2538 /* regs 1; size 8; align 8 */
2539 tcg_out_vldst(s, INSN_VLD1 | 0x7d0, arg, arg1, arg2);
2540 return;
2541 case TCG_TYPE_V128:
2542 /*
2543 * We have only 8-byte alignment for the stack per the ABI.
2544 * Rather than dynamically re-align the stack, it's easier
2545 * to simply not request alignment beyond that. So:
2546 * regs 2; size 8; align 8
2547 */
2548 tcg_out_vldst(s, INSN_VLD1 | 0xad0, arg, arg1, arg2);
2549 return;
2550 default:
2551 g_assert_not_reached();
2552 }
2553 }
2554
2555 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
2556 TCGReg arg1, intptr_t arg2)
2557 {
2558 switch (type) {
2559 case TCG_TYPE_I32:
2560 tcg_out_st32(s, COND_AL, arg, arg1, arg2);
2561 return;
2562 case TCG_TYPE_V64:
2563 /* regs 1; size 8; align 8 */
2564 tcg_out_vldst(s, INSN_VST1 | 0x7d0, arg, arg1, arg2);
2565 return;
2566 case TCG_TYPE_V128:
2567 /* See tcg_out_ld re alignment: regs 2; size 8; align 8 */
2568 tcg_out_vldst(s, INSN_VST1 | 0xad0, arg, arg1, arg2);
2569 return;
2570 default:
2571 g_assert_not_reached();
2572 }
2573 }
2574
2575 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
2576 TCGReg base, intptr_t ofs)
2577 {
2578 return false;
2579 }
2580
2581 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
2582 {
2583 if (ret == arg) {
2584 return true;
2585 }
2586 switch (type) {
2587 case TCG_TYPE_I32:
2588 if (ret < TCG_REG_Q0 && arg < TCG_REG_Q0) {
2589 tcg_out_mov_reg(s, COND_AL, ret, arg);
2590 return true;
2591 }
2592 return false;
2593
2594 case TCG_TYPE_V64:
2595 case TCG_TYPE_V128:
2596 /* "VMOV D,N" is an alias for "VORR D,N,N". */
2597 tcg_out_vreg3(s, INSN_VORR, type - TCG_TYPE_V64, 0, ret, arg, arg);
2598 return true;
2599
2600 default:
2601 g_assert_not_reached();
2602 }
2603 }
2604
2605 static void tcg_out_movi(TCGContext *s, TCGType type,
2606 TCGReg ret, tcg_target_long arg)
2607 {
2608 tcg_debug_assert(type == TCG_TYPE_I32);
2609 tcg_debug_assert(ret < TCG_REG_Q0);
2610 tcg_out_movi32(s, COND_AL, ret, arg);
2611 }
2612
2613 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
2614 tcg_target_long imm)
2615 {
2616 int enc, opc = ARITH_ADD;
2617
2618 /* All of the easiest immediates to encode are positive. */
2619 if (imm < 0) {
2620 imm = -imm;
2621 opc = ARITH_SUB;
2622 }
2623 enc = encode_imm(imm);
2624 if (enc >= 0) {
2625 tcg_out_dat_imm(s, COND_AL, opc, rd, rs, enc);
2626 } else {
2627 tcg_out_movi32(s, COND_AL, TCG_REG_TMP, imm);
2628 tcg_out_dat_reg(s, COND_AL, opc, rd, rs,
2629 TCG_REG_TMP, SHIFT_IMM_LSL(0));
2630 }
2631 }
2632
2633 /* Type is always V128, with I64 elements. */
2634 static void tcg_out_dup2_vec(TCGContext *s, TCGReg rd, TCGReg rl, TCGReg rh)
2635 {
2636 /* Move high element into place first. */
2637 /* VMOV Dd+1, Ds */
2638 tcg_out_vreg3(s, INSN_VORR | (1 << 12), 0, 0, rd, rh, rh);
2639 /* Move low element into place; tcg_out_mov will check for nop. */
2640 tcg_out_mov(s, TCG_TYPE_V64, rd, rl);
2641 }
2642
2643 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
2644 TCGReg rd, TCGReg rs)
2645 {
2646 int q = type - TCG_TYPE_V64;
2647
2648 if (vece == MO_64) {
2649 if (type == TCG_TYPE_V128) {
2650 tcg_out_dup2_vec(s, rd, rs, rs);
2651 } else {
2652 tcg_out_mov(s, TCG_TYPE_V64, rd, rs);
2653 }
2654 } else if (rs < TCG_REG_Q0) {
2655 int b = (vece == MO_8);
2656 int e = (vece == MO_16);
2657 tcg_out32(s, INSN_VDUP_G | (b << 22) | (q << 21) | (e << 5) |
2658 encode_vn(rd) | (rs << 12));
2659 } else {
2660 int imm4 = 1 << vece;
2661 tcg_out32(s, INSN_VDUP_S | (imm4 << 16) | (q << 6) |
2662 encode_vd(rd) | encode_vm(rs));
2663 }
2664 return true;
2665 }
2666
2667 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
2668 TCGReg rd, TCGReg base, intptr_t offset)
2669 {
2670 if (vece == MO_64) {
2671 tcg_out_ld(s, TCG_TYPE_V64, rd, base, offset);
2672 if (type == TCG_TYPE_V128) {
2673 tcg_out_dup2_vec(s, rd, rd, rd);
2674 }
2675 } else {
2676 int q = type - TCG_TYPE_V64;
2677 tcg_out_vldst(s, INSN_VLD1R | (vece << 6) | (q << 5),
2678 rd, base, offset);
2679 }
2680 return true;
2681 }
2682
2683 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
2684 TCGReg rd, int64_t v64)
2685 {
2686 int q = type - TCG_TYPE_V64;
2687 int cmode, imm8, i;
2688
2689 /* Test all bytes equal first. */
2690 if (vece == MO_8) {
2691 tcg_out_vmovi(s, rd, q, 0, 0xe, v64);
2692 return;
2693 }
2694
2695 /*
2696 * Test all bytes 0x00 or 0xff second. This can match cases that
2697 * might otherwise take 2 or 3 insns for MO_16 or MO_32 below.
2698 */
2699 for (i = imm8 = 0; i < 8; i++) {
2700 uint8_t byte = v64 >> (i * 8);
2701 if (byte == 0xff) {
2702 imm8 |= 1 << i;
2703 } else if (byte != 0) {
2704 goto fail_bytes;
2705 }
2706 }
2707 tcg_out_vmovi(s, rd, q, 1, 0xe, imm8);
2708 return;
2709 fail_bytes:
2710
2711 /*
2712 * Tests for various replications. For each element width, if we
2713 * cannot find an expansion there's no point checking a larger
2714 * width because we already know by replication it cannot match.
2715 */
2716 if (vece == MO_16) {
2717 uint16_t v16 = v64;
2718
2719 if (is_shimm16(v16, &cmode, &imm8)) {
2720 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2721 return;
2722 }
2723 if (is_shimm16(~v16, &cmode, &imm8)) {
2724 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2725 return;
2726 }
2727
2728 /*
2729 * Otherwise, all remaining constants can be loaded in two insns:
2730 * rd = v16 & 0xff, rd |= v16 & 0xff00.
2731 */
2732 tcg_out_vmovi(s, rd, q, 0, 0x8, v16 & 0xff);
2733 tcg_out_vmovi(s, rd, q, 0, 0xb, v16 >> 8); /* VORRI */
2734 return;
2735 }
2736
2737 if (vece == MO_32) {
2738 uint32_t v32 = v64;
2739
2740 if (is_shimm32(v32, &cmode, &imm8) ||
2741 is_soimm32(v32, &cmode, &imm8)) {
2742 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2743 return;
2744 }
2745 if (is_shimm32(~v32, &cmode, &imm8) ||
2746 is_soimm32(~v32, &cmode, &imm8)) {
2747 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2748 return;
2749 }
2750
2751 /*
2752 * Restrict the set of constants to those we can load with
2753 * two instructions. Others we load from the pool.
2754 */
2755 i = is_shimm32_pair(v32, &cmode, &imm8);
2756 if (i) {
2757 tcg_out_vmovi(s, rd, q, 0, cmode, imm8);
2758 tcg_out_vmovi(s, rd, q, 0, i | 1, extract32(v32, i * 4, 8));
2759 return;
2760 }
2761 i = is_shimm32_pair(~v32, &cmode, &imm8);
2762 if (i) {
2763 tcg_out_vmovi(s, rd, q, 1, cmode, imm8);
2764 tcg_out_vmovi(s, rd, q, 1, i | 1, extract32(~v32, i * 4, 8));
2765 return;
2766 }
2767 }
2768
2769 /*
2770 * As a last resort, load from the constant pool.
2771 */
2772 if (!q || vece == MO_64) {
2773 new_pool_l2(s, R_ARM_PC11, s->code_ptr, 0, v64, v64 >> 32);
2774 /* VLDR Dd, [pc + offset] */
2775 tcg_out32(s, INSN_VLDR_D | encode_vd(rd) | (0xf << 16));
2776 if (q) {
2777 tcg_out_dup2_vec(s, rd, rd, rd);
2778 }
2779 } else {
2780 new_pool_label(s, (uint32_t)v64, R_ARM_PC8, s->code_ptr, 0);
2781 /* add tmp, pc, offset */
2782 tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, TCG_REG_PC, 0);
2783 tcg_out_dupm_vec(s, type, MO_32, rd, TCG_REG_TMP, 0);
2784 }
2785 }
2786
2787 static const ARMInsn vec_cmp_insn[16] = {
2788 [TCG_COND_EQ] = INSN_VCEQ,
2789 [TCG_COND_GT] = INSN_VCGT,
2790 [TCG_COND_GE] = INSN_VCGE,
2791 [TCG_COND_GTU] = INSN_VCGT_U,
2792 [TCG_COND_GEU] = INSN_VCGE_U,
2793 };
2794
2795 static const ARMInsn vec_cmp0_insn[16] = {
2796 [TCG_COND_EQ] = INSN_VCEQ0,
2797 [TCG_COND_GT] = INSN_VCGT0,
2798 [TCG_COND_GE] = INSN_VCGE0,
2799 [TCG_COND_LT] = INSN_VCLT0,
2800 [TCG_COND_LE] = INSN_VCLE0,
2801 };
2802
2803 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
2804 unsigned vecl, unsigned vece,
2805 const TCGArg args[TCG_MAX_OP_ARGS],
2806 const int const_args[TCG_MAX_OP_ARGS])
2807 {
2808 TCGType type = vecl + TCG_TYPE_V64;
2809 unsigned q = vecl;
2810 TCGArg a0, a1, a2, a3;
2811 int cmode, imm8;
2812
2813 a0 = args[0];
2814 a1 = args[1];
2815 a2 = args[2];
2816
2817 switch (opc) {
2818 case INDEX_op_ld_vec:
2819 tcg_out_ld(s, type, a0, a1, a2);
2820 return;
2821 case INDEX_op_st_vec:
2822 tcg_out_st(s, type, a0, a1, a2);
2823 return;
2824 case INDEX_op_dupm_vec:
2825 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
2826 return;
2827 case INDEX_op_dup2_vec:
2828 tcg_out_dup2_vec(s, a0, a1, a2);
2829 return;
2830 case INDEX_op_abs_vec:
2831 tcg_out_vreg2(s, INSN_VABS, q, vece, a0, a1);
2832 return;
2833 case INDEX_op_neg_vec:
2834 tcg_out_vreg2(s, INSN_VNEG, q, vece, a0, a1);
2835 return;
2836 case INDEX_op_not_vec:
2837 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a1);
2838 return;
2839 case INDEX_op_add_vec:
2840 tcg_out_vreg3(s, INSN_VADD, q, vece, a0, a1, a2);
2841 return;
2842 case INDEX_op_mul_vec:
2843 tcg_out_vreg3(s, INSN_VMUL, q, vece, a0, a1, a2);
2844 return;
2845 case INDEX_op_smax_vec:
2846 tcg_out_vreg3(s, INSN_VMAX, q, vece, a0, a1, a2);
2847 return;
2848 case INDEX_op_smin_vec:
2849 tcg_out_vreg3(s, INSN_VMIN, q, vece, a0, a1, a2);
2850 return;
2851 case INDEX_op_sub_vec:
2852 tcg_out_vreg3(s, INSN_VSUB, q, vece, a0, a1, a2);
2853 return;
2854 case INDEX_op_ssadd_vec:
2855 tcg_out_vreg3(s, INSN_VQADD, q, vece, a0, a1, a2);
2856 return;
2857 case INDEX_op_sssub_vec:
2858 tcg_out_vreg3(s, INSN_VQSUB, q, vece, a0, a1, a2);
2859 return;
2860 case INDEX_op_umax_vec:
2861 tcg_out_vreg3(s, INSN_VMAX_U, q, vece, a0, a1, a2);
2862 return;
2863 case INDEX_op_umin_vec:
2864 tcg_out_vreg3(s, INSN_VMIN_U, q, vece, a0, a1, a2);
2865 return;
2866 case INDEX_op_usadd_vec:
2867 tcg_out_vreg3(s, INSN_VQADD_U, q, vece, a0, a1, a2);
2868 return;
2869 case INDEX_op_ussub_vec:
2870 tcg_out_vreg3(s, INSN_VQSUB_U, q, vece, a0, a1, a2);
2871 return;
2872 case INDEX_op_xor_vec:
2873 tcg_out_vreg3(s, INSN_VEOR, q, 0, a0, a1, a2);
2874 return;
2875 case INDEX_op_arm_sshl_vec:
2876 /*
2877 * Note that Vm is the data and Vn is the shift count,
2878 * therefore the arguments appear reversed.
2879 */
2880 tcg_out_vreg3(s, INSN_VSHL_S, q, vece, a0, a2, a1);
2881 return;
2882 case INDEX_op_arm_ushl_vec:
2883 /* See above. */
2884 tcg_out_vreg3(s, INSN_VSHL_U, q, vece, a0, a2, a1);
2885 return;
2886 case INDEX_op_shli_vec:
2887 tcg_out_vshifti(s, INSN_VSHLI, q, a0, a1, a2 + (8 << vece));
2888 return;
2889 case INDEX_op_shri_vec:
2890 tcg_out_vshifti(s, INSN_VSHRI, q, a0, a1, (16 << vece) - a2);
2891 return;
2892 case INDEX_op_sari_vec:
2893 tcg_out_vshifti(s, INSN_VSARI, q, a0, a1, (16 << vece) - a2);
2894 return;
2895 case INDEX_op_arm_sli_vec:
2896 tcg_out_vshifti(s, INSN_VSLI, q, a0, a2, args[3] + (8 << vece));
2897 return;
2898
2899 case INDEX_op_andc_vec:
2900 if (!const_args[2]) {
2901 tcg_out_vreg3(s, INSN_VBIC, q, 0, a0, a1, a2);
2902 return;
2903 }
2904 a2 = ~a2;
2905 /* fall through */
2906 case INDEX_op_and_vec:
2907 if (const_args[2]) {
2908 is_shimm1632(~a2, &cmode, &imm8);
2909 if (a0 == a1) {
2910 tcg_out_vmovi(s, a0, q, 1, cmode | 1, imm8); /* VBICI */
2911 return;
2912 }
2913 tcg_out_vmovi(s, a0, q, 1, cmode, imm8); /* VMVNI */
2914 a2 = a0;
2915 }
2916 tcg_out_vreg3(s, INSN_VAND, q, 0, a0, a1, a2);
2917 return;
2918
2919 case INDEX_op_orc_vec:
2920 if (!const_args[2]) {
2921 tcg_out_vreg3(s, INSN_VORN, q, 0, a0, a1, a2);
2922 return;
2923 }
2924 a2 = ~a2;
2925 /* fall through */
2926 case INDEX_op_or_vec:
2927 if (const_args[2]) {
2928 is_shimm1632(a2, &cmode, &imm8);
2929 if (a0 == a1) {
2930 tcg_out_vmovi(s, a0, q, 0, cmode | 1, imm8); /* VORRI */
2931 return;
2932 }
2933 tcg_out_vmovi(s, a0, q, 0, cmode, imm8); /* VMOVI */
2934 a2 = a0;
2935 }
2936 tcg_out_vreg3(s, INSN_VORR, q, 0, a0, a1, a2);
2937 return;
2938
2939 case INDEX_op_cmp_vec:
2940 {
2941 TCGCond cond = args[3];
2942
2943 if (cond == TCG_COND_NE) {
2944 if (const_args[2]) {
2945 tcg_out_vreg3(s, INSN_VTST, q, vece, a0, a1, a1);
2946 } else {
2947 tcg_out_vreg3(s, INSN_VCEQ, q, vece, a0, a1, a2);
2948 tcg_out_vreg2(s, INSN_VMVN, q, 0, a0, a0);
2949 }
2950 } else {
2951 ARMInsn insn;
2952
2953 if (const_args[2]) {
2954 insn = vec_cmp0_insn[cond];
2955 if (insn) {
2956 tcg_out_vreg2(s, insn, q, vece, a0, a1);
2957 return;
2958 }
2959 tcg_out_dupi_vec(s, type, MO_8, TCG_VEC_TMP, 0);
2960 a2 = TCG_VEC_TMP;
2961 }
2962 insn = vec_cmp_insn[cond];
2963 if (insn == 0) {
2964 TCGArg t;
2965 t = a1, a1 = a2, a2 = t;
2966 cond = tcg_swap_cond(cond);
2967 insn = vec_cmp_insn[cond];
2968 tcg_debug_assert(insn != 0);
2969 }
2970 tcg_out_vreg3(s, insn, q, vece, a0, a1, a2);
2971 }
2972 }
2973 return;
2974
2975 case INDEX_op_bitsel_vec:
2976 a3 = args[3];
2977 if (a0 == a3) {
2978 tcg_out_vreg3(s, INSN_VBIT, q, 0, a0, a2, a1);
2979 } else if (a0 == a2) {
2980 tcg_out_vreg3(s, INSN_VBIF, q, 0, a0, a3, a1);
2981 } else {
2982 tcg_out_mov(s, type, a0, a1);
2983 tcg_out_vreg3(s, INSN_VBSL, q, 0, a0, a2, a3);
2984 }
2985 return;
2986
2987 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
2988 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
2989 default:
2990 g_assert_not_reached();
2991 }
2992 }
2993
2994 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2995 {
2996 switch (opc) {
2997 case INDEX_op_add_vec:
2998 case INDEX_op_sub_vec:
2999 case INDEX_op_and_vec:
3000 case INDEX_op_andc_vec:
3001 case INDEX_op_or_vec:
3002 case INDEX_op_orc_vec:
3003 case INDEX_op_xor_vec:
3004 case INDEX_op_not_vec:
3005 case INDEX_op_shli_vec:
3006 case INDEX_op_shri_vec:
3007 case INDEX_op_sari_vec:
3008 case INDEX_op_ssadd_vec:
3009 case INDEX_op_sssub_vec:
3010 case INDEX_op_usadd_vec:
3011 case INDEX_op_ussub_vec:
3012 case INDEX_op_bitsel_vec:
3013 return 1;
3014 case INDEX_op_abs_vec:
3015 case INDEX_op_cmp_vec:
3016 case INDEX_op_mul_vec:
3017 case INDEX_op_neg_vec:
3018 case INDEX_op_smax_vec:
3019 case INDEX_op_smin_vec:
3020 case INDEX_op_umax_vec:
3021 case INDEX_op_umin_vec:
3022 return vece < MO_64;
3023 case INDEX_op_shlv_vec:
3024 case INDEX_op_shrv_vec:
3025 case INDEX_op_sarv_vec:
3026 case INDEX_op_rotli_vec:
3027 case INDEX_op_rotlv_vec:
3028 case INDEX_op_rotrv_vec:
3029 return -1;
3030 default:
3031 return 0;
3032 }
3033 }
3034
3035 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3036 TCGArg a0, ...)
3037 {
3038 va_list va;
3039 TCGv_vec v0, v1, v2, t1, t2, c1;
3040 TCGArg a2;
3041
3042 va_start(va, a0);
3043 v0 = temp_tcgv_vec(arg_temp(a0));
3044 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3045 a2 = va_arg(va, TCGArg);
3046 va_end(va);
3047
3048 switch (opc) {
3049 case INDEX_op_shlv_vec:
3050 /*
3051 * Merely propagate shlv_vec to arm_ushl_vec.
3052 * In this way we don't set TCG_TARGET_HAS_shv_vec
3053 * because everything is done via expansion.
3054 */
3055 v2 = temp_tcgv_vec(arg_temp(a2));
3056 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3057 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3058 break;
3059
3060 case INDEX_op_shrv_vec:
3061 case INDEX_op_sarv_vec:
3062 /* Right shifts are negative left shifts for NEON. */
3063 v2 = temp_tcgv_vec(arg_temp(a2));
3064 t1 = tcg_temp_new_vec(type);
3065 tcg_gen_neg_vec(vece, t1, v2);
3066 if (opc == INDEX_op_shrv_vec) {
3067 opc = INDEX_op_arm_ushl_vec;
3068 } else {
3069 opc = INDEX_op_arm_sshl_vec;
3070 }
3071 vec_gen_3(opc, type, vece, tcgv_vec_arg(v0),
3072 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3073 tcg_temp_free_vec(t1);
3074 break;
3075
3076 case INDEX_op_rotli_vec:
3077 t1 = tcg_temp_new_vec(type);
3078 tcg_gen_shri_vec(vece, t1, v1, -a2 & ((8 << vece) - 1));
3079 vec_gen_4(INDEX_op_arm_sli_vec, type, vece,
3080 tcgv_vec_arg(v0), tcgv_vec_arg(t1), tcgv_vec_arg(v1), a2);
3081 tcg_temp_free_vec(t1);
3082 break;
3083
3084 case INDEX_op_rotlv_vec:
3085 v2 = temp_tcgv_vec(arg_temp(a2));
3086 t1 = tcg_temp_new_vec(type);
3087 c1 = tcg_constant_vec(type, vece, 8 << vece);
3088 tcg_gen_sub_vec(vece, t1, v2, c1);
3089 /* Right shifts are negative left shifts for NEON. */
3090 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3091 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3092 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(v0),
3093 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3094 tcg_gen_or_vec(vece, v0, v0, t1);
3095 tcg_temp_free_vec(t1);
3096 break;
3097
3098 case INDEX_op_rotrv_vec:
3099 v2 = temp_tcgv_vec(arg_temp(a2));
3100 t1 = tcg_temp_new_vec(type);
3101 t2 = tcg_temp_new_vec(type);
3102 c1 = tcg_constant_vec(type, vece, 8 << vece);
3103 tcg_gen_neg_vec(vece, t1, v2);
3104 tcg_gen_sub_vec(vece, t2, c1, v2);
3105 /* Right shifts are negative left shifts for NEON. */
3106 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t1),
3107 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3108 vec_gen_3(INDEX_op_arm_ushl_vec, type, vece, tcgv_vec_arg(t2),
3109 tcgv_vec_arg(v1), tcgv_vec_arg(t2));
3110 tcg_gen_or_vec(vece, v0, t1, t2);
3111 tcg_temp_free_vec(t1);
3112 tcg_temp_free_vec(t2);
3113 break;
3114
3115 default:
3116 g_assert_not_reached();
3117 }
3118 }
3119
3120 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
3121 {
3122 int i;
3123 for (i = 0; i < count; ++i) {
3124 p[i] = INSN_NOP;
3125 }
3126 }
3127
3128 /* Compute frame size via macros, to share between tcg_target_qemu_prologue
3129 and tcg_register_jit. */
3130
3131 #define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
3132
3133 #define FRAME_SIZE \
3134 ((PUSH_SIZE \
3135 + TCG_STATIC_CALL_ARGS_SIZE \
3136 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
3137 + TCG_TARGET_STACK_ALIGN - 1) \
3138 & -TCG_TARGET_STACK_ALIGN)
3139
3140 #define STACK_ADDEND (FRAME_SIZE - PUSH_SIZE)
3141
3142 static void tcg_target_qemu_prologue(TCGContext *s)
3143 {
3144 /* Calling convention requires us to save r4-r11 and lr. */
3145 /* stmdb sp!, { r4 - r11, lr } */
3146 tcg_out_ldstm(s, COND_AL, INSN_STMDB, TCG_REG_CALL_STACK,
3147 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3148 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3149 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_R14));
3150
3151 /* Reserve callee argument and tcg temp space. */
3152 tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
3153 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3154 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
3155 CPU_TEMP_BUF_NLONGS * sizeof(long));
3156
3157 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
3158
3159 #ifndef CONFIG_SOFTMMU
3160 if (guest_base) {
3161 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
3162 tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
3163 }
3164 #endif
3165
3166 tcg_out_b_reg(s, COND_AL, tcg_target_call_iarg_regs[1]);
3167
3168 /*
3169 * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
3170 * and fall through to the rest of the epilogue.
3171 */
3172 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
3173 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
3174 tcg_out_epilogue(s);
3175 }
3176
3177 static void tcg_out_epilogue(TCGContext *s)
3178 {
3179 /* Release local stack frame. */
3180 tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
3181 TCG_REG_CALL_STACK, STACK_ADDEND, 1);
3182
3183 /* ldmia sp!, { r4 - r11, pc } */
3184 tcg_out_ldstm(s, COND_AL, INSN_LDMIA, TCG_REG_CALL_STACK,
3185 (1 << TCG_REG_R4) | (1 << TCG_REG_R5) | (1 << TCG_REG_R6) |
3186 (1 << TCG_REG_R7) | (1 << TCG_REG_R8) | (1 << TCG_REG_R9) |
3187 (1 << TCG_REG_R10) | (1 << TCG_REG_R11) | (1 << TCG_REG_PC));
3188 }
3189
3190 typedef struct {
3191 DebugFrameHeader h;
3192 uint8_t fde_def_cfa[4];
3193 uint8_t fde_reg_ofs[18];
3194 } DebugFrame;
3195
3196 #define ELF_HOST_MACHINE EM_ARM
3197
3198 /* We're expecting a 2 byte uleb128 encoded value. */
3199 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3200
3201 static const DebugFrame debug_frame = {
3202 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3203 .h.cie.id = -1,
3204 .h.cie.version = 1,
3205 .h.cie.code_align = 1,
3206 .h.cie.data_align = 0x7c, /* sleb128 -4 */
3207 .h.cie.return_column = 14,
3208
3209 /* Total FDE size does not include the "len" member. */
3210 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
3211
3212 .fde_def_cfa = {
3213 12, 13, /* DW_CFA_def_cfa sp, ... */
3214 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
3215 (FRAME_SIZE >> 7)
3216 },
3217 .fde_reg_ofs = {
3218 /* The following must match the stmdb in the prologue. */
3219 0x8e, 1, /* DW_CFA_offset, lr, -4 */
3220 0x8b, 2, /* DW_CFA_offset, r11, -8 */
3221 0x8a, 3, /* DW_CFA_offset, r10, -12 */
3222 0x89, 4, /* DW_CFA_offset, r9, -16 */
3223 0x88, 5, /* DW_CFA_offset, r8, -20 */
3224 0x87, 6, /* DW_CFA_offset, r7, -24 */
3225 0x86, 7, /* DW_CFA_offset, r6, -28 */
3226 0x85, 8, /* DW_CFA_offset, r5, -32 */
3227 0x84, 9, /* DW_CFA_offset, r4, -36 */
3228 }
3229 };
3230
3231 void tcg_register_jit(const void *buf, size_t buf_size)
3232 {
3233 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
3234 }