]>
Commit | Line | Data |
---|---|---|
4a136e0a CF |
1 | /* |
2 | * Initial TCG Implementation for aarch64 | |
3 | * | |
4 | * Copyright (c) 2013 Huawei Technologies Duesseldorf GmbH | |
5 | * Written by Claudio Fontana | |
6 | * | |
7 | * This work is licensed under the terms of the GNU GPL, version 2 or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * See the COPYING file in the top-level directory for details. | |
11 | */ | |
12 | ||
9ecefc84 | 13 | #include "tcg-be-ldst.h" |
4a136e0a CF |
14 | #include "qemu/bitops.h" |
15 | ||
7763ffa0 RH |
16 | /* We're going to re-use TCGType in setting of the SF bit, which controls |
17 | the size of the operation performed. If we know the values match, it | |
18 | makes things much cleaner. */ | |
19 | QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); | |
20 | ||
4a136e0a CF |
21 | #ifndef NDEBUG |
22 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
23 | "%x0", "%x1", "%x2", "%x3", "%x4", "%x5", "%x6", "%x7", | |
24 | "%x8", "%x9", "%x10", "%x11", "%x12", "%x13", "%x14", "%x15", | |
25 | "%x16", "%x17", "%x18", "%x19", "%x20", "%x21", "%x22", "%x23", | |
d82b78e4 | 26 | "%x24", "%x25", "%x26", "%x27", "%x28", "%fp", "%x30", "%sp", |
4a136e0a CF |
27 | }; |
28 | #endif /* NDEBUG */ | |
29 | ||
30 | static const int tcg_target_reg_alloc_order[] = { | |
31 | TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, | |
32 | TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, | |
6a91c7c9 | 33 | TCG_REG_X28, /* we will reserve this for GUEST_BASE if configured */ |
4a136e0a | 34 | |
d82b78e4 RH |
35 | TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, |
36 | TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, | |
4a136e0a CF |
37 | TCG_REG_X16, TCG_REG_X17, |
38 | ||
4a136e0a CF |
39 | TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, |
40 | TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, | |
41 | ||
d82b78e4 RH |
42 | /* X18 reserved by system */ |
43 | /* X19 reserved for AREG0 */ | |
44 | /* X29 reserved as fp */ | |
45 | /* X30 reserved as temporary */ | |
4a136e0a CF |
46 | }; |
47 | ||
48 | static const int tcg_target_call_iarg_regs[8] = { | |
49 | TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, | |
50 | TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7 | |
51 | }; | |
52 | static const int tcg_target_call_oarg_regs[1] = { | |
53 | TCG_REG_X0 | |
54 | }; | |
55 | ||
d82b78e4 | 56 | #define TCG_REG_TMP TCG_REG_X30 |
4a136e0a | 57 | |
6a91c7c9 | 58 | #ifndef CONFIG_SOFTMMU |
d82b78e4 RH |
59 | # ifdef CONFIG_USE_GUEST_BASE |
60 | # define TCG_REG_GUEST_BASE TCG_REG_X28 | |
6a91c7c9 | 61 | # else |
d82b78e4 | 62 | # define TCG_REG_GUEST_BASE TCG_REG_XZR |
6a91c7c9 JK |
63 | # endif |
64 | #endif | |
65 | ||
8587c30c | 66 | static inline void reloc_pc26(tcg_insn_unit *code_ptr, tcg_insn_unit *target) |
4a136e0a | 67 | { |
8587c30c RH |
68 | ptrdiff_t offset = target - code_ptr; |
69 | assert(offset == sextract64(offset, 0, 26)); | |
4a136e0a CF |
70 | /* read instruction, mask away previous PC_REL26 parameter contents, |
71 | set the proper offset, then write back the instruction. */ | |
8587c30c | 72 | *code_ptr = deposit32(*code_ptr, 0, 26, offset); |
4a136e0a CF |
73 | } |
74 | ||
8587c30c | 75 | static inline void reloc_pc19(tcg_insn_unit *code_ptr, tcg_insn_unit *target) |
4a136e0a | 76 | { |
8587c30c RH |
77 | ptrdiff_t offset = target - code_ptr; |
78 | assert(offset == sextract64(offset, 0, 19)); | |
79 | *code_ptr = deposit32(*code_ptr, 5, 19, offset); | |
4a136e0a CF |
80 | } |
81 | ||
8587c30c | 82 | static inline void patch_reloc(tcg_insn_unit *code_ptr, int type, |
2ba7fae2 | 83 | intptr_t value, intptr_t addend) |
4a136e0a | 84 | { |
8587c30c | 85 | assert(addend == 0); |
4a136e0a CF |
86 | switch (type) { |
87 | case R_AARCH64_JUMP26: | |
88 | case R_AARCH64_CALL26: | |
8587c30c | 89 | reloc_pc26(code_ptr, (tcg_insn_unit *)value); |
4a136e0a CF |
90 | break; |
91 | case R_AARCH64_CONDBR19: | |
8587c30c | 92 | reloc_pc19(code_ptr, (tcg_insn_unit *)value); |
4a136e0a | 93 | break; |
4a136e0a CF |
94 | default: |
95 | tcg_abort(); | |
96 | } | |
97 | } | |
98 | ||
170bf931 RH |
99 | #define TCG_CT_CONST_AIMM 0x100 |
100 | #define TCG_CT_CONST_LIMM 0x200 | |
101 | #define TCG_CT_CONST_ZERO 0x400 | |
102 | #define TCG_CT_CONST_MONE 0x800 | |
90f1cd91 | 103 | |
4a136e0a CF |
104 | /* parse target specific constraints */ |
105 | static int target_parse_constraint(TCGArgConstraint *ct, | |
106 | const char **pct_str) | |
107 | { | |
108 | const char *ct_str = *pct_str; | |
109 | ||
110 | switch (ct_str[0]) { | |
111 | case 'r': | |
112 | ct->ct |= TCG_CT_REG; | |
113 | tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); | |
114 | break; | |
115 | case 'l': /* qemu_ld / qemu_st address, data_reg */ | |
116 | ct->ct |= TCG_CT_REG; | |
117 | tcg_regset_set32(ct->u.regs, 0, (1ULL << TCG_TARGET_NB_REGS) - 1); | |
118 | #ifdef CONFIG_SOFTMMU | |
119 | /* x0 and x1 will be overwritten when reading the tlb entry, | |
120 | and x2, and x3 for helper args, better to avoid using them. */ | |
121 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_X0); | |
122 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_X1); | |
123 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_X2); | |
124 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_X3); | |
125 | #endif | |
126 | break; | |
90f1cd91 RH |
127 | case 'A': /* Valid for arithmetic immediate (positive or negative). */ |
128 | ct->ct |= TCG_CT_CONST_AIMM; | |
129 | break; | |
e029f293 RH |
130 | case 'L': /* Valid for logical immediate. */ |
131 | ct->ct |= TCG_CT_CONST_LIMM; | |
132 | break; | |
c6e929e7 RH |
133 | case 'M': /* minus one */ |
134 | ct->ct |= TCG_CT_CONST_MONE; | |
135 | break; | |
04ce397b RH |
136 | case 'Z': /* zero */ |
137 | ct->ct |= TCG_CT_CONST_ZERO; | |
138 | break; | |
4a136e0a CF |
139 | default: |
140 | return -1; | |
141 | } | |
142 | ||
143 | ct_str++; | |
144 | *pct_str = ct_str; | |
145 | return 0; | |
146 | } | |
147 | ||
90f1cd91 RH |
148 | static inline bool is_aimm(uint64_t val) |
149 | { | |
150 | return (val & ~0xfff) == 0 || (val & ~0xfff000) == 0; | |
151 | } | |
152 | ||
e029f293 RH |
153 | static inline bool is_limm(uint64_t val) |
154 | { | |
155 | /* Taking a simplified view of the logical immediates for now, ignoring | |
156 | the replication that can happen across the field. Match bit patterns | |
157 | of the forms | |
158 | 0....01....1 | |
159 | 0..01..10..0 | |
160 | and their inverses. */ | |
161 | ||
162 | /* Make things easier below, by testing the form with msb clear. */ | |
163 | if ((int64_t)val < 0) { | |
164 | val = ~val; | |
165 | } | |
166 | if (val == 0) { | |
167 | return false; | |
168 | } | |
169 | val += val & -val; | |
170 | return (val & (val - 1)) == 0; | |
171 | } | |
172 | ||
f6c6afc1 | 173 | static int tcg_target_const_match(tcg_target_long val, TCGType type, |
90f1cd91 | 174 | const TCGArgConstraint *arg_ct) |
4a136e0a CF |
175 | { |
176 | int ct = arg_ct->ct; | |
177 | ||
178 | if (ct & TCG_CT_CONST) { | |
179 | return 1; | |
180 | } | |
170bf931 | 181 | if (type == TCG_TYPE_I32) { |
90f1cd91 RH |
182 | val = (int32_t)val; |
183 | } | |
184 | if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) { | |
185 | return 1; | |
186 | } | |
e029f293 RH |
187 | if ((ct & TCG_CT_CONST_LIMM) && is_limm(val)) { |
188 | return 1; | |
189 | } | |
04ce397b RH |
190 | if ((ct & TCG_CT_CONST_ZERO) && val == 0) { |
191 | return 1; | |
192 | } | |
c6e929e7 RH |
193 | if ((ct & TCG_CT_CONST_MONE) && val == -1) { |
194 | return 1; | |
195 | } | |
4a136e0a CF |
196 | |
197 | return 0; | |
198 | } | |
199 | ||
200 | enum aarch64_cond_code { | |
201 | COND_EQ = 0x0, | |
202 | COND_NE = 0x1, | |
203 | COND_CS = 0x2, /* Unsigned greater or equal */ | |
204 | COND_HS = COND_CS, /* ALIAS greater or equal */ | |
205 | COND_CC = 0x3, /* Unsigned less than */ | |
206 | COND_LO = COND_CC, /* ALIAS Lower */ | |
207 | COND_MI = 0x4, /* Negative */ | |
208 | COND_PL = 0x5, /* Zero or greater */ | |
209 | COND_VS = 0x6, /* Overflow */ | |
210 | COND_VC = 0x7, /* No overflow */ | |
211 | COND_HI = 0x8, /* Unsigned greater than */ | |
212 | COND_LS = 0x9, /* Unsigned less or equal */ | |
213 | COND_GE = 0xa, | |
214 | COND_LT = 0xb, | |
215 | COND_GT = 0xc, | |
216 | COND_LE = 0xd, | |
217 | COND_AL = 0xe, | |
218 | COND_NV = 0xf, /* behaves like COND_AL here */ | |
219 | }; | |
220 | ||
221 | static const enum aarch64_cond_code tcg_cond_to_aarch64[] = { | |
222 | [TCG_COND_EQ] = COND_EQ, | |
223 | [TCG_COND_NE] = COND_NE, | |
224 | [TCG_COND_LT] = COND_LT, | |
225 | [TCG_COND_GE] = COND_GE, | |
226 | [TCG_COND_LE] = COND_LE, | |
227 | [TCG_COND_GT] = COND_GT, | |
228 | /* unsigned */ | |
229 | [TCG_COND_LTU] = COND_LO, | |
230 | [TCG_COND_GTU] = COND_HI, | |
231 | [TCG_COND_GEU] = COND_HS, | |
232 | [TCG_COND_LEU] = COND_LS, | |
233 | }; | |
234 | ||
3d4299f4 RH |
235 | typedef enum { |
236 | LDST_ST = 0, /* store */ | |
237 | LDST_LD = 1, /* load */ | |
238 | LDST_LD_S_X = 2, /* load and sign-extend into Xt */ | |
239 | LDST_LD_S_W = 3, /* load and sign-extend into Wt */ | |
240 | } AArch64LdstType; | |
4a136e0a | 241 | |
50573c66 RH |
242 | /* We encode the format of the insn into the beginning of the name, so that |
243 | we can have the preprocessor help "typecheck" the insn vs the output | |
244 | function. Arm didn't provide us with nice names for the formats, so we | |
245 | use the section number of the architecture reference manual in which the | |
246 | instruction group is described. */ | |
247 | typedef enum { | |
3d9e69a2 RH |
248 | /* Compare and branch (immediate). */ |
249 | I3201_CBZ = 0x34000000, | |
250 | I3201_CBNZ = 0x35000000, | |
251 | ||
81d8a5ee RH |
252 | /* Conditional branch (immediate). */ |
253 | I3202_B_C = 0x54000000, | |
254 | ||
255 | /* Unconditional branch (immediate). */ | |
256 | I3206_B = 0x14000000, | |
257 | I3206_BL = 0x94000000, | |
258 | ||
259 | /* Unconditional branch (register). */ | |
260 | I3207_BR = 0xd61f0000, | |
261 | I3207_BLR = 0xd63f0000, | |
262 | I3207_RET = 0xd65f0000, | |
263 | ||
3d4299f4 RH |
264 | /* Load/store register. Described here as 3.3.12, but the helper |
265 | that emits them can transform to 3.3.10 or 3.3.13. */ | |
266 | I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30, | |
267 | I3312_STRH = 0x38000000 | LDST_ST << 22 | MO_16 << 30, | |
268 | I3312_STRW = 0x38000000 | LDST_ST << 22 | MO_32 << 30, | |
269 | I3312_STRX = 0x38000000 | LDST_ST << 22 | MO_64 << 30, | |
270 | ||
271 | I3312_LDRB = 0x38000000 | LDST_LD << 22 | MO_8 << 30, | |
272 | I3312_LDRH = 0x38000000 | LDST_LD << 22 | MO_16 << 30, | |
273 | I3312_LDRW = 0x38000000 | LDST_LD << 22 | MO_32 << 30, | |
274 | I3312_LDRX = 0x38000000 | LDST_LD << 22 | MO_64 << 30, | |
275 | ||
276 | I3312_LDRSBW = 0x38000000 | LDST_LD_S_W << 22 | MO_8 << 30, | |
277 | I3312_LDRSHW = 0x38000000 | LDST_LD_S_W << 22 | MO_16 << 30, | |
278 | ||
279 | I3312_LDRSBX = 0x38000000 | LDST_LD_S_X << 22 | MO_8 << 30, | |
280 | I3312_LDRSHX = 0x38000000 | LDST_LD_S_X << 22 | MO_16 << 30, | |
281 | I3312_LDRSWX = 0x38000000 | LDST_LD_S_X << 22 | MO_32 << 30, | |
282 | ||
283 | I3312_TO_I3310 = 0x00206800, | |
284 | I3312_TO_I3313 = 0x01000000, | |
285 | ||
95f72aa9 RH |
286 | /* Load/store register pair instructions. */ |
287 | I3314_LDP = 0x28400000, | |
288 | I3314_STP = 0x28000000, | |
289 | ||
096c46c0 RH |
290 | /* Add/subtract immediate instructions. */ |
291 | I3401_ADDI = 0x11000000, | |
292 | I3401_ADDSI = 0x31000000, | |
293 | I3401_SUBI = 0x51000000, | |
294 | I3401_SUBSI = 0x71000000, | |
295 | ||
b3c56df7 RH |
296 | /* Bitfield instructions. */ |
297 | I3402_BFM = 0x33000000, | |
298 | I3402_SBFM = 0x13000000, | |
299 | I3402_UBFM = 0x53000000, | |
300 | ||
301 | /* Extract instruction. */ | |
302 | I3403_EXTR = 0x13800000, | |
303 | ||
e029f293 RH |
304 | /* Logical immediate instructions. */ |
305 | I3404_ANDI = 0x12000000, | |
306 | I3404_ORRI = 0x32000000, | |
307 | I3404_EORI = 0x52000000, | |
308 | ||
582ab779 RH |
309 | /* Move wide immediate instructions. */ |
310 | I3405_MOVN = 0x12800000, | |
311 | I3405_MOVZ = 0x52800000, | |
312 | I3405_MOVK = 0x72800000, | |
313 | ||
c6e310d9 RH |
314 | /* PC relative addressing instructions. */ |
315 | I3406_ADR = 0x10000000, | |
316 | I3406_ADRP = 0x90000000, | |
317 | ||
50573c66 RH |
318 | /* Add/subtract shifted register instructions (without a shift). */ |
319 | I3502_ADD = 0x0b000000, | |
320 | I3502_ADDS = 0x2b000000, | |
321 | I3502_SUB = 0x4b000000, | |
322 | I3502_SUBS = 0x6b000000, | |
323 | ||
324 | /* Add/subtract shifted register instructions (with a shift). */ | |
325 | I3502S_ADD_LSL = I3502_ADD, | |
326 | ||
c6e929e7 RH |
327 | /* Add/subtract with carry instructions. */ |
328 | I3503_ADC = 0x1a000000, | |
329 | I3503_SBC = 0x5a000000, | |
330 | ||
04ce397b RH |
331 | /* Conditional select instructions. */ |
332 | I3506_CSEL = 0x1a800000, | |
333 | I3506_CSINC = 0x1a800400, | |
334 | ||
edd8824c RH |
335 | /* Data-processing (1 source) instructions. */ |
336 | I3507_REV16 = 0x5ac00400, | |
337 | I3507_REV32 = 0x5ac00800, | |
338 | I3507_REV64 = 0x5ac00c00, | |
339 | ||
df9351e3 RH |
340 | /* Data-processing (2 source) instructions. */ |
341 | I3508_LSLV = 0x1ac02000, | |
342 | I3508_LSRV = 0x1ac02400, | |
343 | I3508_ASRV = 0x1ac02800, | |
344 | I3508_RORV = 0x1ac02c00, | |
1fcc9ddf RH |
345 | I3508_SMULH = 0x9b407c00, |
346 | I3508_UMULH = 0x9bc07c00, | |
8678b71c RH |
347 | I3508_UDIV = 0x1ac00800, |
348 | I3508_SDIV = 0x1ac00c00, | |
349 | ||
350 | /* Data-processing (3 source) instructions. */ | |
351 | I3509_MADD = 0x1b000000, | |
352 | I3509_MSUB = 0x1b008000, | |
df9351e3 | 353 | |
50573c66 RH |
354 | /* Logical shifted register instructions (without a shift). */ |
355 | I3510_AND = 0x0a000000, | |
14b155dd | 356 | I3510_BIC = 0x0a200000, |
50573c66 | 357 | I3510_ORR = 0x2a000000, |
14b155dd | 358 | I3510_ORN = 0x2a200000, |
50573c66 | 359 | I3510_EOR = 0x4a000000, |
14b155dd | 360 | I3510_EON = 0x4a200000, |
50573c66 RH |
361 | I3510_ANDS = 0x6a000000, |
362 | } AArch64Insn; | |
4a136e0a | 363 | |
4a136e0a CF |
364 | static inline uint32_t tcg_in32(TCGContext *s) |
365 | { | |
366 | uint32_t v = *(uint32_t *)s->code_ptr; | |
367 | return v; | |
368 | } | |
369 | ||
50573c66 RH |
370 | /* Emit an opcode with "type-checking" of the format. */ |
371 | #define tcg_out_insn(S, FMT, OP, ...) \ | |
372 | glue(tcg_out_insn_,FMT)(S, glue(glue(glue(I,FMT),_),OP), ## __VA_ARGS__) | |
373 | ||
3d9e69a2 RH |
374 | static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext, |
375 | TCGReg rt, int imm19) | |
376 | { | |
377 | tcg_out32(s, insn | ext << 31 | (imm19 & 0x7ffff) << 5 | rt); | |
378 | } | |
379 | ||
81d8a5ee RH |
380 | static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn, |
381 | TCGCond c, int imm19) | |
382 | { | |
383 | tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5); | |
384 | } | |
385 | ||
386 | static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26) | |
387 | { | |
388 | tcg_out32(s, insn | (imm26 & 0x03ffffff)); | |
389 | } | |
390 | ||
391 | static void tcg_out_insn_3207(TCGContext *s, AArch64Insn insn, TCGReg rn) | |
392 | { | |
393 | tcg_out32(s, insn | rn << 5); | |
394 | } | |
395 | ||
95f72aa9 RH |
396 | static void tcg_out_insn_3314(TCGContext *s, AArch64Insn insn, |
397 | TCGReg r1, TCGReg r2, TCGReg rn, | |
398 | tcg_target_long ofs, bool pre, bool w) | |
399 | { | |
400 | insn |= 1u << 31; /* ext */ | |
401 | insn |= pre << 24; | |
402 | insn |= w << 23; | |
403 | ||
404 | assert(ofs >= -0x200 && ofs < 0x200 && (ofs & 7) == 0); | |
405 | insn |= (ofs & (0x7f << 3)) << (15 - 3); | |
406 | ||
407 | tcg_out32(s, insn | r2 << 10 | rn << 5 | r1); | |
408 | } | |
409 | ||
096c46c0 RH |
410 | static void tcg_out_insn_3401(TCGContext *s, AArch64Insn insn, TCGType ext, |
411 | TCGReg rd, TCGReg rn, uint64_t aimm) | |
412 | { | |
413 | if (aimm > 0xfff) { | |
414 | assert((aimm & 0xfff) == 0); | |
415 | aimm >>= 12; | |
416 | assert(aimm <= 0xfff); | |
417 | aimm |= 1 << 12; /* apply LSL 12 */ | |
418 | } | |
419 | tcg_out32(s, insn | ext << 31 | aimm << 10 | rn << 5 | rd); | |
420 | } | |
421 | ||
e029f293 RH |
422 | /* This function can be used for both 3.4.2 (Bitfield) and 3.4.4 |
423 | (Logical immediate). Both insn groups have N, IMMR and IMMS fields | |
424 | that feed the DecodeBitMasks pseudo function. */ | |
425 | static void tcg_out_insn_3402(TCGContext *s, AArch64Insn insn, TCGType ext, | |
426 | TCGReg rd, TCGReg rn, int n, int immr, int imms) | |
427 | { | |
428 | tcg_out32(s, insn | ext << 31 | n << 22 | immr << 16 | imms << 10 | |
429 | | rn << 5 | rd); | |
430 | } | |
431 | ||
432 | #define tcg_out_insn_3404 tcg_out_insn_3402 | |
433 | ||
b3c56df7 RH |
434 | static void tcg_out_insn_3403(TCGContext *s, AArch64Insn insn, TCGType ext, |
435 | TCGReg rd, TCGReg rn, TCGReg rm, int imms) | |
436 | { | |
437 | tcg_out32(s, insn | ext << 31 | ext << 22 | rm << 16 | imms << 10 | |
438 | | rn << 5 | rd); | |
439 | } | |
440 | ||
582ab779 RH |
441 | /* This function is used for the Move (wide immediate) instruction group. |
442 | Note that SHIFT is a full shift count, not the 2 bit HW field. */ | |
443 | static void tcg_out_insn_3405(TCGContext *s, AArch64Insn insn, TCGType ext, | |
444 | TCGReg rd, uint16_t half, unsigned shift) | |
445 | { | |
446 | assert((shift & ~0x30) == 0); | |
447 | tcg_out32(s, insn | ext << 31 | shift << (21 - 4) | half << 5 | rd); | |
448 | } | |
449 | ||
c6e310d9 RH |
450 | static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn, |
451 | TCGReg rd, int64_t disp) | |
452 | { | |
453 | tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd); | |
454 | } | |
455 | ||
50573c66 RH |
456 | /* This function is for both 3.5.2 (Add/Subtract shifted register), for |
457 | the rare occasion when we actually want to supply a shift amount. */ | |
458 | static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn, | |
459 | TCGType ext, TCGReg rd, TCGReg rn, | |
460 | TCGReg rm, int imm6) | |
461 | { | |
462 | tcg_out32(s, insn | ext << 31 | rm << 16 | imm6 << 10 | rn << 5 | rd); | |
463 | } | |
464 | ||
465 | /* This function is for 3.5.2 (Add/subtract shifted register), | |
466 | and 3.5.10 (Logical shifted register), for the vast majorty of cases | |
467 | when we don't want to apply a shift. Thus it can also be used for | |
468 | 3.5.3 (Add/subtract with carry) and 3.5.8 (Data processing 2 source). */ | |
469 | static void tcg_out_insn_3502(TCGContext *s, AArch64Insn insn, TCGType ext, | |
470 | TCGReg rd, TCGReg rn, TCGReg rm) | |
471 | { | |
472 | tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd); | |
473 | } | |
474 | ||
475 | #define tcg_out_insn_3503 tcg_out_insn_3502 | |
476 | #define tcg_out_insn_3508 tcg_out_insn_3502 | |
477 | #define tcg_out_insn_3510 tcg_out_insn_3502 | |
478 | ||
04ce397b RH |
479 | static void tcg_out_insn_3506(TCGContext *s, AArch64Insn insn, TCGType ext, |
480 | TCGReg rd, TCGReg rn, TCGReg rm, TCGCond c) | |
481 | { | |
482 | tcg_out32(s, insn | ext << 31 | rm << 16 | rn << 5 | rd | |
483 | | tcg_cond_to_aarch64[c] << 12); | |
484 | } | |
485 | ||
edd8824c RH |
486 | static void tcg_out_insn_3507(TCGContext *s, AArch64Insn insn, TCGType ext, |
487 | TCGReg rd, TCGReg rn) | |
488 | { | |
489 | tcg_out32(s, insn | ext << 31 | rn << 5 | rd); | |
490 | } | |
491 | ||
8678b71c RH |
492 | static void tcg_out_insn_3509(TCGContext *s, AArch64Insn insn, TCGType ext, |
493 | TCGReg rd, TCGReg rn, TCGReg rm, TCGReg ra) | |
494 | { | |
495 | tcg_out32(s, insn | ext << 31 | rm << 16 | ra << 10 | rn << 5 | rd); | |
496 | } | |
497 | ||
3d4299f4 RH |
498 | static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn, |
499 | TCGReg rd, TCGReg base, TCGReg regoff) | |
500 | { | |
501 | /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ | |
502 | tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 | base << 5 | rd); | |
503 | } | |
50573c66 | 504 | |
3d4299f4 RH |
505 | |
506 | static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn, | |
507 | TCGReg rd, TCGReg rn, intptr_t offset) | |
4a136e0a | 508 | { |
3d4299f4 | 509 | tcg_out32(s, insn | (offset & 0x1ff) << 12 | rn << 5 | rd); |
4a136e0a CF |
510 | } |
511 | ||
3d4299f4 RH |
512 | static void tcg_out_insn_3313(TCGContext *s, AArch64Insn insn, |
513 | TCGReg rd, TCGReg rn, uintptr_t scaled_uimm) | |
b1f6dc0d | 514 | { |
3d4299f4 RH |
515 | /* Note the AArch64Insn constants above are for C3.3.12. Adjust. */ |
516 | tcg_out32(s, insn | I3312_TO_I3313 | scaled_uimm << 10 | rn << 5 | rd); | |
b1f6dc0d CF |
517 | } |
518 | ||
7d11fc7c RH |
519 | /* Register to register move using ORR (shifted register with no shift). */ |
520 | static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rm) | |
4a136e0a | 521 | { |
7d11fc7c RH |
522 | tcg_out_insn(s, 3510, ORR, ext, rd, TCG_REG_XZR, rm); |
523 | } | |
524 | ||
525 | /* Register to register move using ADDI (move to/from SP). */ | |
526 | static void tcg_out_movr_sp(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) | |
527 | { | |
528 | tcg_out_insn(s, 3401, ADDI, ext, rd, rn, 0); | |
4a136e0a CF |
529 | } |
530 | ||
4ec4f0bd RH |
531 | /* This function is used for the Logical (immediate) instruction group. |
532 | The value of LIMM must satisfy IS_LIMM. See the comment above about | |
533 | only supporting simplified logical immediates. */ | |
534 | static void tcg_out_logicali(TCGContext *s, AArch64Insn insn, TCGType ext, | |
535 | TCGReg rd, TCGReg rn, uint64_t limm) | |
536 | { | |
537 | unsigned h, l, r, c; | |
538 | ||
539 | assert(is_limm(limm)); | |
540 | ||
541 | h = clz64(limm); | |
542 | l = ctz64(limm); | |
543 | if (l == 0) { | |
544 | r = 0; /* form 0....01....1 */ | |
545 | c = ctz64(~limm) - 1; | |
546 | if (h == 0) { | |
547 | r = clz64(~limm); /* form 1..10..01..1 */ | |
548 | c += r; | |
549 | } | |
550 | } else { | |
551 | r = 64 - l; /* form 1....10....0 or 0..01..10..0 */ | |
552 | c = r - h - 1; | |
553 | } | |
554 | if (ext == TCG_TYPE_I32) { | |
555 | r &= 31; | |
556 | c &= 31; | |
557 | } | |
558 | ||
559 | tcg_out_insn_3404(s, insn, ext, rd, rn, ext, r, c); | |
560 | } | |
561 | ||
582ab779 RH |
562 | static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, |
563 | tcg_target_long value) | |
4a136e0a | 564 | { |
582ab779 | 565 | AArch64Insn insn; |
dfeb5fe7 RH |
566 | int i, wantinv, shift; |
567 | tcg_target_long svalue = value; | |
568 | tcg_target_long ivalue = ~value; | |
569 | tcg_target_long imask; | |
570 | ||
571 | /* For 32-bit values, discard potential garbage in value. For 64-bit | |
572 | values within [2**31, 2**32-1], we can create smaller sequences by | |
573 | interpreting this as a negative 32-bit number, while ensuring that | |
574 | the high 32 bits are cleared by setting SF=0. */ | |
575 | if (type == TCG_TYPE_I32 || (value & ~0xffffffffull) == 0) { | |
576 | svalue = (int32_t)value; | |
582ab779 | 577 | value = (uint32_t)value; |
dfeb5fe7 RH |
578 | ivalue = (uint32_t)ivalue; |
579 | type = TCG_TYPE_I32; | |
580 | } | |
581 | ||
d8918df5 RH |
582 | /* Speed things up by handling the common case of small positive |
583 | and negative values specially. */ | |
584 | if ((value & ~0xffffull) == 0) { | |
585 | tcg_out_insn(s, 3405, MOVZ, type, rd, value, 0); | |
586 | return; | |
587 | } else if ((ivalue & ~0xffffull) == 0) { | |
588 | tcg_out_insn(s, 3405, MOVN, type, rd, ivalue, 0); | |
589 | return; | |
590 | } | |
591 | ||
4ec4f0bd RH |
592 | /* Check for bitfield immediates. For the benefit of 32-bit quantities, |
593 | use the sign-extended value. That lets us match rotated values such | |
594 | as 0xff0000ff with the same 64-bit logic matching 0xffffffffff0000ff. */ | |
595 | if (is_limm(svalue)) { | |
596 | tcg_out_logicali(s, I3404_ORRI, type, rd, TCG_REG_XZR, svalue); | |
597 | return; | |
598 | } | |
599 | ||
c6e310d9 RH |
600 | /* Look for host pointer values within 4G of the PC. This happens |
601 | often when loading pointers to QEMU's own data structures. */ | |
602 | if (type == TCG_TYPE_I64) { | |
603 | tcg_target_long disp = (value >> 12) - ((intptr_t)s->code_ptr >> 12); | |
604 | if (disp == sextract64(disp, 0, 21)) { | |
605 | tcg_out_insn(s, 3406, ADRP, rd, disp); | |
606 | if (value & 0xfff) { | |
607 | tcg_out_insn(s, 3401, ADDI, type, rd, rd, value & 0xfff); | |
608 | } | |
609 | return; | |
610 | } | |
611 | } | |
612 | ||
dfeb5fe7 RH |
613 | /* Would it take fewer insns to begin with MOVN? For the value and its |
614 | inverse, count the number of 16-bit lanes that are 0. */ | |
615 | for (i = wantinv = imask = 0; i < 64; i += 16) { | |
616 | tcg_target_long mask = 0xffffull << i; | |
617 | if ((value & mask) == 0) { | |
618 | wantinv -= 1; | |
619 | } | |
620 | if ((ivalue & mask) == 0) { | |
621 | wantinv += 1; | |
622 | imask |= mask; | |
623 | } | |
582ab779 RH |
624 | } |
625 | ||
dfeb5fe7 | 626 | /* If we had more 0xffff than 0x0000, invert VALUE and use MOVN. */ |
582ab779 | 627 | insn = I3405_MOVZ; |
dfeb5fe7 RH |
628 | if (wantinv > 0) { |
629 | value = ivalue; | |
630 | insn = I3405_MOVN; | |
631 | } | |
632 | ||
633 | /* Find the lowest lane that is not 0x0000. */ | |
634 | shift = ctz64(value) & (63 & -16); | |
635 | tcg_out_insn_3405(s, insn, type, rd, value >> shift, shift); | |
636 | ||
637 | if (wantinv > 0) { | |
638 | /* Re-invert the value, so MOVK sees non-inverted bits. */ | |
639 | value = ~value; | |
640 | /* Clear out all the 0xffff lanes. */ | |
641 | value ^= imask; | |
642 | } | |
643 | /* Clear out the lane that we just set. */ | |
644 | value &= ~(0xffffUL << shift); | |
645 | ||
646 | /* Iterate until all lanes have been set, and thus cleared from VALUE. */ | |
647 | while (value) { | |
648 | shift = ctz64(value) & (63 & -16); | |
649 | tcg_out_insn(s, 3405, MOVK, type, rd, value >> shift, shift); | |
4a136e0a | 650 | value &= ~(0xffffUL << shift); |
dfeb5fe7 | 651 | } |
4a136e0a CF |
652 | } |
653 | ||
3d4299f4 RH |
654 | /* Define something more legible for general use. */ |
655 | #define tcg_out_ldst_r tcg_out_insn_3310 | |
4a136e0a | 656 | |
3d4299f4 RH |
657 | static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, |
658 | TCGReg rd, TCGReg rn, intptr_t offset) | |
4a136e0a | 659 | { |
3d4299f4 RH |
660 | TCGMemOp size = (uint32_t)insn >> 30; |
661 | ||
3d4299f4 RH |
662 | /* If the offset is naturally aligned and in range, then we can |
663 | use the scaled uimm12 encoding */ | |
664 | if (offset >= 0 && !(offset & ((1 << size) - 1))) { | |
665 | uintptr_t scaled_uimm = offset >> size; | |
666 | if (scaled_uimm <= 0xfff) { | |
667 | tcg_out_insn_3313(s, insn, rd, rn, scaled_uimm); | |
668 | return; | |
b1f6dc0d CF |
669 | } |
670 | } | |
671 | ||
a056c9fa RH |
672 | /* Small signed offsets can use the unscaled encoding. */ |
673 | if (offset >= -256 && offset < 256) { | |
674 | tcg_out_insn_3312(s, insn, rd, rn, offset); | |
675 | return; | |
676 | } | |
677 | ||
3d4299f4 | 678 | /* Worst-case scenario, move offset to temp register, use reg offset. */ |
b1f6dc0d | 679 | tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset); |
3d4299f4 | 680 | tcg_out_ldst_r(s, insn, rd, rn, TCG_REG_TMP); |
4a136e0a CF |
681 | } |
682 | ||
4a136e0a CF |
683 | static inline void tcg_out_mov(TCGContext *s, |
684 | TCGType type, TCGReg ret, TCGReg arg) | |
685 | { | |
686 | if (ret != arg) { | |
929f8b55 | 687 | tcg_out_movr(s, type, ret, arg); |
4a136e0a CF |
688 | } |
689 | } | |
690 | ||
691 | static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, | |
a05b5b9b | 692 | TCGReg arg1, intptr_t arg2) |
4a136e0a | 693 | { |
3d4299f4 | 694 | tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_LDRW : I3312_LDRX, |
4a136e0a CF |
695 | arg, arg1, arg2); |
696 | } | |
697 | ||
698 | static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, | |
a05b5b9b | 699 | TCGReg arg1, intptr_t arg2) |
4a136e0a | 700 | { |
3d4299f4 | 701 | tcg_out_ldst(s, type == TCG_TYPE_I32 ? I3312_STRW : I3312_STRX, |
4a136e0a CF |
702 | arg, arg1, arg2); |
703 | } | |
704 | ||
b3c56df7 RH |
705 | static inline void tcg_out_bfm(TCGContext *s, TCGType ext, TCGReg rd, |
706 | TCGReg rn, unsigned int a, unsigned int b) | |
707 | { | |
708 | tcg_out_insn(s, 3402, BFM, ext, rd, rn, ext, a, b); | |
709 | } | |
710 | ||
7763ffa0 RH |
711 | static inline void tcg_out_ubfm(TCGContext *s, TCGType ext, TCGReg rd, |
712 | TCGReg rn, unsigned int a, unsigned int b) | |
4a136e0a | 713 | { |
b3c56df7 | 714 | tcg_out_insn(s, 3402, UBFM, ext, rd, rn, ext, a, b); |
4a136e0a CF |
715 | } |
716 | ||
7763ffa0 RH |
717 | static inline void tcg_out_sbfm(TCGContext *s, TCGType ext, TCGReg rd, |
718 | TCGReg rn, unsigned int a, unsigned int b) | |
4a136e0a | 719 | { |
b3c56df7 | 720 | tcg_out_insn(s, 3402, SBFM, ext, rd, rn, ext, a, b); |
4a136e0a CF |
721 | } |
722 | ||
7763ffa0 | 723 | static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, |
4a136e0a CF |
724 | TCGReg rn, TCGReg rm, unsigned int a) |
725 | { | |
b3c56df7 | 726 | tcg_out_insn(s, 3403, EXTR, ext, rd, rn, rm, a); |
4a136e0a CF |
727 | } |
728 | ||
7763ffa0 | 729 | static inline void tcg_out_shl(TCGContext *s, TCGType ext, |
4a136e0a CF |
730 | TCGReg rd, TCGReg rn, unsigned int m) |
731 | { | |
b3c56df7 RH |
732 | int bits = ext ? 64 : 32; |
733 | int max = bits - 1; | |
4a136e0a CF |
734 | tcg_out_ubfm(s, ext, rd, rn, bits - (m & max), max - (m & max)); |
735 | } | |
736 | ||
7763ffa0 | 737 | static inline void tcg_out_shr(TCGContext *s, TCGType ext, |
4a136e0a CF |
738 | TCGReg rd, TCGReg rn, unsigned int m) |
739 | { | |
740 | int max = ext ? 63 : 31; | |
741 | tcg_out_ubfm(s, ext, rd, rn, m & max, max); | |
742 | } | |
743 | ||
7763ffa0 | 744 | static inline void tcg_out_sar(TCGContext *s, TCGType ext, |
4a136e0a CF |
745 | TCGReg rd, TCGReg rn, unsigned int m) |
746 | { | |
747 | int max = ext ? 63 : 31; | |
748 | tcg_out_sbfm(s, ext, rd, rn, m & max, max); | |
749 | } | |
750 | ||
7763ffa0 | 751 | static inline void tcg_out_rotr(TCGContext *s, TCGType ext, |
4a136e0a CF |
752 | TCGReg rd, TCGReg rn, unsigned int m) |
753 | { | |
754 | int max = ext ? 63 : 31; | |
755 | tcg_out_extr(s, ext, rd, rn, rn, m & max); | |
756 | } | |
757 | ||
7763ffa0 | 758 | static inline void tcg_out_rotl(TCGContext *s, TCGType ext, |
4a136e0a CF |
759 | TCGReg rd, TCGReg rn, unsigned int m) |
760 | { | |
b3c56df7 RH |
761 | int bits = ext ? 64 : 32; |
762 | int max = bits - 1; | |
4a136e0a CF |
763 | tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max)); |
764 | } | |
765 | ||
b3c56df7 RH |
766 | static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, |
767 | TCGReg rn, unsigned lsb, unsigned width) | |
768 | { | |
769 | unsigned size = ext ? 64 : 32; | |
770 | unsigned a = (size - lsb) & (size - 1); | |
771 | unsigned b = width - 1; | |
772 | tcg_out_bfm(s, ext, rd, rn, a, b); | |
773 | } | |
774 | ||
90f1cd91 RH |
775 | static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a, |
776 | tcg_target_long b, bool const_b) | |
4a136e0a | 777 | { |
90f1cd91 RH |
778 | if (const_b) { |
779 | /* Using CMP or CMN aliases. */ | |
780 | if (b >= 0) { | |
781 | tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b); | |
782 | } else { | |
783 | tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b); | |
784 | } | |
785 | } else { | |
786 | /* Using CMP alias SUBS wzr, Wn, Wm */ | |
787 | tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b); | |
788 | } | |
4a136e0a CF |
789 | } |
790 | ||
8587c30c | 791 | static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) |
4a136e0a | 792 | { |
8587c30c RH |
793 | ptrdiff_t offset = target - s->code_ptr; |
794 | assert(offset == sextract64(offset, 0, 26)); | |
81d8a5ee | 795 | tcg_out_insn(s, 3206, B, offset); |
4a136e0a CF |
796 | } |
797 | ||
798 | static inline void tcg_out_goto_noaddr(TCGContext *s) | |
799 | { | |
81d8a5ee RH |
800 | /* We pay attention here to not modify the branch target by reading from |
801 | the buffer. This ensure that caches and memory are kept coherent during | |
802 | retranslation. Mask away possible garbage in the high bits for the | |
803 | first translation, while keeping the offset bits for retranslation. */ | |
804 | uint32_t old = tcg_in32(s); | |
805 | tcg_out_insn(s, 3206, B, old); | |
4a136e0a CF |
806 | } |
807 | ||
808 | static inline void tcg_out_goto_cond_noaddr(TCGContext *s, TCGCond c) | |
809 | { | |
81d8a5ee RH |
810 | /* See comments in tcg_out_goto_noaddr. */ |
811 | uint32_t old = tcg_in32(s) >> 5; | |
812 | tcg_out_insn(s, 3202, B_C, c, old); | |
4a136e0a CF |
813 | } |
814 | ||
4a136e0a CF |
815 | static inline void tcg_out_callr(TCGContext *s, TCGReg reg) |
816 | { | |
81d8a5ee | 817 | tcg_out_insn(s, 3207, BLR, reg); |
4a136e0a CF |
818 | } |
819 | ||
8587c30c | 820 | static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *target) |
4a136e0a | 821 | { |
8587c30c RH |
822 | ptrdiff_t offset = target - s->code_ptr; |
823 | if (offset == sextract64(offset, 0, 26)) { | |
81d8a5ee | 824 | tcg_out_insn(s, 3206, BL, offset); |
8587c30c RH |
825 | } else { |
826 | tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); | |
827 | tcg_out_callr(s, TCG_REG_TMP); | |
4a136e0a CF |
828 | } |
829 | } | |
830 | ||
4a136e0a CF |
831 | void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr) |
832 | { | |
8587c30c RH |
833 | tcg_insn_unit *code_ptr = (tcg_insn_unit *)jmp_addr; |
834 | tcg_insn_unit *target = (tcg_insn_unit *)addr; | |
4a136e0a | 835 | |
8587c30c | 836 | reloc_pc26(code_ptr, target); |
4a136e0a CF |
837 | flush_icache_range(jmp_addr, jmp_addr + 4); |
838 | } | |
839 | ||
840 | static inline void tcg_out_goto_label(TCGContext *s, int label_index) | |
841 | { | |
842 | TCGLabel *l = &s->labels[label_index]; | |
843 | ||
844 | if (!l->has_value) { | |
845 | tcg_out_reloc(s, s->code_ptr, R_AARCH64_JUMP26, label_index, 0); | |
846 | tcg_out_goto_noaddr(s); | |
847 | } else { | |
8587c30c | 848 | tcg_out_goto(s, l->u.value_ptr); |
4a136e0a CF |
849 | } |
850 | } | |
851 | ||
cae1f6f3 RH |
852 | static void tcg_out_brcond(TCGContext *s, TCGMemOp ext, TCGCond c, TCGArg a, |
853 | TCGArg b, bool b_const, int label) | |
4a136e0a | 854 | { |
cae1f6f3 RH |
855 | TCGLabel *l = &s->labels[label]; |
856 | intptr_t offset; | |
3d9e69a2 | 857 | bool need_cmp; |
cae1f6f3 | 858 | |
3d9e69a2 RH |
859 | if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) { |
860 | need_cmp = false; | |
861 | } else { | |
862 | need_cmp = true; | |
863 | tcg_out_cmp(s, ext, a, b, b_const); | |
864 | } | |
4a136e0a CF |
865 | |
866 | if (!l->has_value) { | |
cae1f6f3 RH |
867 | tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, label, 0); |
868 | offset = tcg_in32(s) >> 5; | |
4a136e0a | 869 | } else { |
8587c30c RH |
870 | offset = l->u.value_ptr - s->code_ptr; |
871 | assert(offset == sextract64(offset, 0, 19)); | |
4a136e0a | 872 | } |
cae1f6f3 | 873 | |
3d9e69a2 RH |
874 | if (need_cmp) { |
875 | tcg_out_insn(s, 3202, B_C, c, offset); | |
876 | } else if (c == TCG_COND_EQ) { | |
877 | tcg_out_insn(s, 3201, CBZ, ext, a, offset); | |
878 | } else { | |
879 | tcg_out_insn(s, 3201, CBNZ, ext, a, offset); | |
880 | } | |
4a136e0a CF |
881 | } |
882 | ||
edd8824c | 883 | static inline void tcg_out_rev64(TCGContext *s, TCGReg rd, TCGReg rn) |
9c4a059d | 884 | { |
edd8824c | 885 | tcg_out_insn(s, 3507, REV64, TCG_TYPE_I64, rd, rn); |
9c4a059d CF |
886 | } |
887 | ||
edd8824c | 888 | static inline void tcg_out_rev32(TCGContext *s, TCGReg rd, TCGReg rn) |
9c4a059d | 889 | { |
edd8824c RH |
890 | tcg_out_insn(s, 3507, REV32, TCG_TYPE_I32, rd, rn); |
891 | } | |
892 | ||
893 | static inline void tcg_out_rev16(TCGContext *s, TCGReg rd, TCGReg rn) | |
894 | { | |
895 | tcg_out_insn(s, 3507, REV16, TCG_TYPE_I32, rd, rn); | |
9c4a059d CF |
896 | } |
897 | ||
929f8b55 | 898 | static inline void tcg_out_sxt(TCGContext *s, TCGType ext, TCGMemOp s_bits, |
31f1275b CF |
899 | TCGReg rd, TCGReg rn) |
900 | { | |
b3c56df7 | 901 | /* Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 */ |
929f8b55 | 902 | int bits = (8 << s_bits) - 1; |
31f1275b CF |
903 | tcg_out_sbfm(s, ext, rd, rn, 0, bits); |
904 | } | |
905 | ||
929f8b55 | 906 | static inline void tcg_out_uxt(TCGContext *s, TCGMemOp s_bits, |
31f1275b CF |
907 | TCGReg rd, TCGReg rn) |
908 | { | |
b3c56df7 | 909 | /* Using ALIASes UXTB, UXTH of UBFM Wd, Wn, #0, #7|15 */ |
929f8b55 | 910 | int bits = (8 << s_bits) - 1; |
31f1275b CF |
911 | tcg_out_ubfm(s, 0, rd, rn, 0, bits); |
912 | } | |
913 | ||
90f1cd91 RH |
914 | static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, |
915 | TCGReg rn, int64_t aimm) | |
916 | { | |
917 | if (aimm >= 0) { | |
918 | tcg_out_insn(s, 3401, ADDI, ext, rd, rn, aimm); | |
919 | } else { | |
920 | tcg_out_insn(s, 3401, SUBI, ext, rd, rn, -aimm); | |
921 | } | |
922 | } | |
923 | ||
c6e929e7 RH |
924 | static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl, |
925 | TCGReg rh, TCGReg al, TCGReg ah, | |
926 | tcg_target_long bl, tcg_target_long bh, | |
927 | bool const_bl, bool const_bh, bool sub) | |
928 | { | |
929 | TCGReg orig_rl = rl; | |
930 | AArch64Insn insn; | |
931 | ||
932 | if (rl == ah || (!const_bh && rl == bh)) { | |
933 | rl = TCG_REG_TMP; | |
934 | } | |
935 | ||
936 | if (const_bl) { | |
937 | insn = I3401_ADDSI; | |
938 | if ((bl < 0) ^ sub) { | |
939 | insn = I3401_SUBSI; | |
940 | bl = -bl; | |
941 | } | |
942 | tcg_out_insn_3401(s, insn, ext, rl, al, bl); | |
943 | } else { | |
944 | tcg_out_insn_3502(s, sub ? I3502_SUBS : I3502_ADDS, ext, rl, al, bl); | |
945 | } | |
946 | ||
947 | insn = I3503_ADC; | |
948 | if (const_bh) { | |
949 | /* Note that the only two constants we support are 0 and -1, and | |
950 | that SBC = rn + ~rm + c, so adc -1 is sbc 0, and vice-versa. */ | |
951 | if ((bh != 0) ^ sub) { | |
952 | insn = I3503_SBC; | |
953 | } | |
954 | bh = TCG_REG_XZR; | |
955 | } else if (sub) { | |
956 | insn = I3503_SBC; | |
957 | } | |
958 | tcg_out_insn_3503(s, insn, ext, rh, ah, bh); | |
959 | ||
b825025f | 960 | tcg_out_mov(s, ext, orig_rl, rl); |
c6e929e7 RH |
961 | } |
962 | ||
4a136e0a | 963 | #ifdef CONFIG_SOFTMMU |
023261ef RH |
964 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, |
965 | * int mmu_idx, uintptr_t ra) | |
966 | */ | |
8587c30c | 967 | static void * const qemu_ld_helpers[16] = { |
de61d14f RH |
968 | [MO_UB] = helper_ret_ldub_mmu, |
969 | [MO_LEUW] = helper_le_lduw_mmu, | |
970 | [MO_LEUL] = helper_le_ldul_mmu, | |
971 | [MO_LEQ] = helper_le_ldq_mmu, | |
972 | [MO_BEUW] = helper_be_lduw_mmu, | |
973 | [MO_BEUL] = helper_be_ldul_mmu, | |
974 | [MO_BEQ] = helper_be_ldq_mmu, | |
4a136e0a CF |
975 | }; |
976 | ||
023261ef RH |
977 | /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, |
978 | * uintxx_t val, int mmu_idx, uintptr_t ra) | |
979 | */ | |
8587c30c | 980 | static void * const qemu_st_helpers[16] = { |
de61d14f RH |
981 | [MO_UB] = helper_ret_stb_mmu, |
982 | [MO_LEUW] = helper_le_stw_mmu, | |
983 | [MO_LEUL] = helper_le_stl_mmu, | |
984 | [MO_LEQ] = helper_le_stq_mmu, | |
985 | [MO_BEUW] = helper_be_stw_mmu, | |
986 | [MO_BEUL] = helper_be_stl_mmu, | |
987 | [MO_BEQ] = helper_be_stq_mmu, | |
4a136e0a CF |
988 | }; |
989 | ||
8587c30c | 990 | static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target) |
dc0c8aaf | 991 | { |
8587c30c RH |
992 | ptrdiff_t offset = tcg_pcrel_diff(s, target); |
993 | assert(offset == sextract64(offset, 0, 21)); | |
994 | tcg_out_insn(s, 3406, ADR, rd, offset); | |
dc0c8aaf RH |
995 | } |
996 | ||
c6d8ed24 JK |
997 | static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) |
998 | { | |
929f8b55 RH |
999 | TCGMemOp opc = lb->opc; |
1000 | TCGMemOp size = opc & MO_SIZE; | |
1001 | ||
8587c30c | 1002 | reloc_pc19(lb->label_ptr[0], s->code_ptr); |
017a86f7 | 1003 | |
b825025f RH |
1004 | tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); |
1005 | tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); | |
c6d8ed24 | 1006 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index); |
8587c30c RH |
1007 | tcg_out_adr(s, TCG_REG_X3, lb->raddr); |
1008 | tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); | |
929f8b55 | 1009 | if (opc & MO_SIGN) { |
9c53889b | 1010 | tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); |
c6d8ed24 | 1011 | } else { |
b825025f | 1012 | tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); |
c6d8ed24 JK |
1013 | } |
1014 | ||
8587c30c | 1015 | tcg_out_goto(s, lb->raddr); |
c6d8ed24 JK |
1016 | } |
1017 | ||
1018 | static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) | |
1019 | { | |
de61d14f RH |
1020 | TCGMemOp opc = lb->opc; |
1021 | TCGMemOp size = opc & MO_SIZE; | |
929f8b55 | 1022 | |
8587c30c | 1023 | reloc_pc19(lb->label_ptr[0], s->code_ptr); |
c6d8ed24 | 1024 | |
b825025f RH |
1025 | tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0); |
1026 | tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); | |
1027 | tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); | |
c6d8ed24 | 1028 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index); |
8587c30c RH |
1029 | tcg_out_adr(s, TCG_REG_X4, lb->raddr); |
1030 | tcg_out_call(s, qemu_st_helpers[opc]); | |
1031 | tcg_out_goto(s, lb->raddr); | |
c6d8ed24 JK |
1032 | } |
1033 | ||
ad5171db | 1034 | static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, |
9c53889b | 1035 | TCGType ext, TCGReg data_reg, TCGReg addr_reg, |
8587c30c RH |
1036 | int mem_index, tcg_insn_unit *raddr, |
1037 | tcg_insn_unit *label_ptr) | |
c6d8ed24 | 1038 | { |
9ecefc84 | 1039 | TCGLabelQemuLdst *label = new_ldst_label(s); |
c6d8ed24 | 1040 | |
c6d8ed24 JK |
1041 | label->is_ld = is_ld; |
1042 | label->opc = opc; | |
9c53889b | 1043 | label->type = ext; |
c6d8ed24 JK |
1044 | label->datalo_reg = data_reg; |
1045 | label->addrlo_reg = addr_reg; | |
1046 | label->mem_index = mem_index; | |
1047 | label->raddr = raddr; | |
1048 | label->label_ptr[0] = label_ptr; | |
1049 | } | |
1050 | ||
1051 | /* Load and compare a TLB entry, emitting the conditional jump to the | |
1052 | slow path for the failure case, which will be patched later when finalizing | |
1053 | the slow path. Generated code returns the host addend in X1, | |
1054 | clobbers X0,X2,X3,TMP. */ | |
9e4177ad | 1055 | static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, TCGMemOp s_bits, |
8587c30c RH |
1056 | tcg_insn_unit **label_ptr, int mem_index, |
1057 | bool is_read) | |
c6d8ed24 JK |
1058 | { |
1059 | TCGReg base = TCG_AREG0; | |
1060 | int tlb_offset = is_read ? | |
1061 | offsetof(CPUArchState, tlb_table[mem_index][0].addr_read) | |
1062 | : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); | |
6f472467 | 1063 | |
c6d8ed24 JK |
1064 | /* Extract the TLB index from the address into X0. |
1065 | X0<CPU_TLB_BITS:0> = | |
1066 | addr_reg<TARGET_PAGE_BITS+CPU_TLB_BITS:TARGET_PAGE_BITS> */ | |
6f472467 | 1067 | tcg_out_ubfm(s, TARGET_LONG_BITS == 64, TCG_REG_X0, addr_reg, |
c6d8ed24 | 1068 | TARGET_PAGE_BITS, TARGET_PAGE_BITS + CPU_TLB_BITS); |
6f472467 | 1069 | |
c6d8ed24 JK |
1070 | /* Store the page mask part of the address and the low s_bits into X3. |
1071 | Later this allows checking for equality and alignment at the same time. | |
1072 | X3 = addr_reg & (PAGE_MASK | ((1 << s_bits) - 1)) */ | |
e029f293 RH |
1073 | tcg_out_logicali(s, I3404_ANDI, TARGET_LONG_BITS == 64, TCG_REG_X3, |
1074 | addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); | |
6f472467 | 1075 | |
c6d8ed24 | 1076 | /* Add any "high bits" from the tlb offset to the env address into X2, |
096c46c0 | 1077 | to take advantage of the LSL12 form of the ADDI instruction. |
c6d8ed24 | 1078 | X2 = env + (tlb_offset & 0xfff000) */ |
6f472467 RH |
1079 | if (tlb_offset & 0xfff000) { |
1080 | tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_X2, base, | |
1081 | tlb_offset & 0xfff000); | |
1082 | base = TCG_REG_X2; | |
1083 | } | |
1084 | ||
c6d8ed24 JK |
1085 | /* Merge the tlb index contribution into X2. |
1086 | X2 = X2 + (X0 << CPU_TLB_ENTRY_BITS) */ | |
6f472467 | 1087 | tcg_out_insn(s, 3502S, ADD_LSL, TCG_TYPE_I64, TCG_REG_X2, base, |
50573c66 | 1088 | TCG_REG_X0, CPU_TLB_ENTRY_BITS); |
6f472467 | 1089 | |
c6d8ed24 JK |
1090 | /* Merge "low bits" from tlb offset, load the tlb comparator into X0. |
1091 | X0 = load [X2 + (tlb_offset & 0x000fff)] */ | |
3d4299f4 RH |
1092 | tcg_out_ldst(s, TARGET_LONG_BITS == 32 ? I3312_LDRW : I3312_LDRX, |
1093 | TCG_REG_X0, TCG_REG_X2, tlb_offset & 0xfff); | |
6f472467 | 1094 | |
c6d8ed24 JK |
1095 | /* Load the tlb addend. Do that early to avoid stalling. |
1096 | X1 = load [X2 + (tlb_offset & 0xfff) + offsetof(addend)] */ | |
3d4299f4 | 1097 | tcg_out_ldst(s, I3312_LDRX, TCG_REG_X1, TCG_REG_X2, |
c6d8ed24 JK |
1098 | (tlb_offset & 0xfff) + (offsetof(CPUTLBEntry, addend)) - |
1099 | (is_read ? offsetof(CPUTLBEntry, addr_read) | |
1100 | : offsetof(CPUTLBEntry, addr_write))); | |
6f472467 | 1101 | |
c6d8ed24 | 1102 | /* Perform the address comparison. */ |
90f1cd91 | 1103 | tcg_out_cmp(s, (TARGET_LONG_BITS == 64), TCG_REG_X0, TCG_REG_X3, 0); |
6f472467 | 1104 | |
c6d8ed24 | 1105 | /* If not equal, we jump to the slow path. */ |
6f472467 | 1106 | *label_ptr = s->code_ptr; |
c6d8ed24 JK |
1107 | tcg_out_goto_cond_noaddr(s, TCG_COND_NE); |
1108 | } | |
1109 | ||
1110 | #endif /* CONFIG_SOFTMMU */ | |
6a91c7c9 | 1111 | |
9c53889b | 1112 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp memop, TCGType ext, |
9e4177ad | 1113 | TCGReg data_r, TCGReg addr_r, TCGReg off_r) |
6a91c7c9 | 1114 | { |
9e4177ad RH |
1115 | const TCGMemOp bswap = memop & MO_BSWAP; |
1116 | ||
1117 | switch (memop & MO_SSIZE) { | |
1118 | case MO_UB: | |
3d4299f4 | 1119 | tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, off_r); |
6a91c7c9 | 1120 | break; |
9e4177ad | 1121 | case MO_SB: |
9c53889b RH |
1122 | tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW, |
1123 | data_r, addr_r, off_r); | |
6a91c7c9 | 1124 | break; |
9e4177ad | 1125 | case MO_UW: |
3d4299f4 | 1126 | tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); |
9e4177ad | 1127 | if (bswap) { |
edd8824c | 1128 | tcg_out_rev16(s, data_r, data_r); |
6a91c7c9 JK |
1129 | } |
1130 | break; | |
9e4177ad RH |
1131 | case MO_SW: |
1132 | if (bswap) { | |
3d4299f4 | 1133 | tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, off_r); |
edd8824c | 1134 | tcg_out_rev16(s, data_r, data_r); |
9c53889b | 1135 | tcg_out_sxt(s, ext, MO_16, data_r, data_r); |
6a91c7c9 | 1136 | } else { |
9c53889b RH |
1137 | tcg_out_ldst_r(s, ext ? I3312_LDRSHX : I3312_LDRSHW, |
1138 | data_r, addr_r, off_r); | |
6a91c7c9 JK |
1139 | } |
1140 | break; | |
9e4177ad | 1141 | case MO_UL: |
3d4299f4 | 1142 | tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); |
9e4177ad | 1143 | if (bswap) { |
edd8824c | 1144 | tcg_out_rev32(s, data_r, data_r); |
6a91c7c9 JK |
1145 | } |
1146 | break; | |
9e4177ad RH |
1147 | case MO_SL: |
1148 | if (bswap) { | |
3d4299f4 | 1149 | tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, off_r); |
edd8824c | 1150 | tcg_out_rev32(s, data_r, data_r); |
929f8b55 | 1151 | tcg_out_sxt(s, TCG_TYPE_I64, MO_32, data_r, data_r); |
6a91c7c9 | 1152 | } else { |
3d4299f4 | 1153 | tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, off_r); |
6a91c7c9 JK |
1154 | } |
1155 | break; | |
9e4177ad | 1156 | case MO_Q: |
3d4299f4 | 1157 | tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, off_r); |
9e4177ad | 1158 | if (bswap) { |
edd8824c | 1159 | tcg_out_rev64(s, data_r, data_r); |
6a91c7c9 JK |
1160 | } |
1161 | break; | |
1162 | default: | |
1163 | tcg_abort(); | |
1164 | } | |
1165 | } | |
1166 | ||
9e4177ad RH |
1167 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop, |
1168 | TCGReg data_r, TCGReg addr_r, TCGReg off_r) | |
6a91c7c9 | 1169 | { |
9e4177ad RH |
1170 | const TCGMemOp bswap = memop & MO_BSWAP; |
1171 | ||
1172 | switch (memop & MO_SIZE) { | |
1173 | case MO_8: | |
3d4299f4 | 1174 | tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, off_r); |
6a91c7c9 | 1175 | break; |
9e4177ad | 1176 | case MO_16: |
e81864a1 | 1177 | if (bswap && data_r != TCG_REG_XZR) { |
edd8824c | 1178 | tcg_out_rev16(s, TCG_REG_TMP, data_r); |
9e4177ad | 1179 | data_r = TCG_REG_TMP; |
6a91c7c9 | 1180 | } |
3d4299f4 | 1181 | tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, off_r); |
6a91c7c9 | 1182 | break; |
9e4177ad | 1183 | case MO_32: |
e81864a1 | 1184 | if (bswap && data_r != TCG_REG_XZR) { |
edd8824c | 1185 | tcg_out_rev32(s, TCG_REG_TMP, data_r); |
9e4177ad | 1186 | data_r = TCG_REG_TMP; |
6a91c7c9 | 1187 | } |
3d4299f4 | 1188 | tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, off_r); |
6a91c7c9 | 1189 | break; |
9e4177ad | 1190 | case MO_64: |
e81864a1 | 1191 | if (bswap && data_r != TCG_REG_XZR) { |
edd8824c | 1192 | tcg_out_rev64(s, TCG_REG_TMP, data_r); |
9e4177ad | 1193 | data_r = TCG_REG_TMP; |
6a91c7c9 | 1194 | } |
3d4299f4 | 1195 | tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, off_r); |
6a91c7c9 JK |
1196 | break; |
1197 | default: | |
1198 | tcg_abort(); | |
1199 | } | |
1200 | } | |
4a136e0a | 1201 | |
667b1cdd | 1202 | static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
9c53889b | 1203 | TCGMemOp memop, TCGType ext, int mem_index) |
4a136e0a | 1204 | { |
4a136e0a | 1205 | #ifdef CONFIG_SOFTMMU |
667b1cdd | 1206 | TCGMemOp s_bits = memop & MO_SIZE; |
8587c30c | 1207 | tcg_insn_unit *label_ptr; |
4a136e0a | 1208 | |
c6d8ed24 | 1209 | tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1); |
9c53889b RH |
1210 | tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1); |
1211 | add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg, | |
c6d8ed24 | 1212 | mem_index, s->code_ptr, label_ptr); |
4a136e0a | 1213 | #else /* !CONFIG_SOFTMMU */ |
9c53889b | 1214 | tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, |
6a91c7c9 JK |
1215 | GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); |
1216 | #endif /* CONFIG_SOFTMMU */ | |
4a136e0a CF |
1217 | } |
1218 | ||
667b1cdd RH |
1219 | static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, |
1220 | TCGMemOp memop, int mem_index) | |
4a136e0a | 1221 | { |
4a136e0a | 1222 | #ifdef CONFIG_SOFTMMU |
667b1cdd | 1223 | TCGMemOp s_bits = memop & MO_SIZE; |
8587c30c | 1224 | tcg_insn_unit *label_ptr; |
4a136e0a | 1225 | |
c6d8ed24 | 1226 | tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0); |
9e4177ad | 1227 | tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1); |
9c53889b | 1228 | add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg, |
c6d8ed24 | 1229 | mem_index, s->code_ptr, label_ptr); |
4a136e0a | 1230 | #else /* !CONFIG_SOFTMMU */ |
9e4177ad | 1231 | tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, |
6a91c7c9 JK |
1232 | GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR); |
1233 | #endif /* CONFIG_SOFTMMU */ | |
4a136e0a CF |
1234 | } |
1235 | ||
8587c30c | 1236 | static tcg_insn_unit *tb_ret_addr; |
4a136e0a | 1237 | |
4a136e0a | 1238 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, |
8d8db193 RH |
1239 | const TCGArg args[TCG_MAX_OP_ARGS], |
1240 | const int const_args[TCG_MAX_OP_ARGS]) | |
4a136e0a | 1241 | { |
f0293414 RH |
1242 | /* 99% of the time, we can signal the use of extension registers |
1243 | by looking to see if the opcode handles 64-bit data. */ | |
1244 | TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; | |
4a136e0a | 1245 | |
8d8db193 RH |
1246 | /* Hoist the loads of the most common arguments. */ |
1247 | TCGArg a0 = args[0]; | |
1248 | TCGArg a1 = args[1]; | |
1249 | TCGArg a2 = args[2]; | |
1250 | int c2 = const_args[2]; | |
1251 | ||
04ce397b RH |
1252 | /* Some operands are defined with "rZ" constraint, a register or |
1253 | the zero register. These need not actually test args[I] == 0. */ | |
1254 | #define REG0(I) (const_args[I] ? TCG_REG_XZR : (TCGReg)args[I]) | |
1255 | ||
4a136e0a CF |
1256 | switch (opc) { |
1257 | case INDEX_op_exit_tb: | |
8d8db193 | 1258 | tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); |
8587c30c | 1259 | tcg_out_goto(s, tb_ret_addr); |
4a136e0a CF |
1260 | break; |
1261 | ||
1262 | case INDEX_op_goto_tb: | |
1263 | #ifndef USE_DIRECT_JUMP | |
1264 | #error "USE_DIRECT_JUMP required for aarch64" | |
1265 | #endif | |
1266 | assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */ | |
8587c30c | 1267 | s->tb_jmp_offset[a0] = tcg_current_code_size(s); |
4a136e0a CF |
1268 | /* actual branch destination will be patched by |
1269 | aarch64_tb_set_jmp_target later, beware retranslation. */ | |
1270 | tcg_out_goto_noaddr(s); | |
8587c30c | 1271 | s->tb_next_offset[a0] = tcg_current_code_size(s); |
4a136e0a CF |
1272 | break; |
1273 | ||
4a136e0a | 1274 | case INDEX_op_br: |
8d8db193 | 1275 | tcg_out_goto_label(s, a0); |
4a136e0a CF |
1276 | break; |
1277 | ||
4a136e0a | 1278 | case INDEX_op_ld8u_i32: |
4a136e0a | 1279 | case INDEX_op_ld8u_i64: |
3d4299f4 | 1280 | tcg_out_ldst(s, I3312_LDRB, a0, a1, a2); |
dc73dfd4 RH |
1281 | break; |
1282 | case INDEX_op_ld8s_i32: | |
3d4299f4 | 1283 | tcg_out_ldst(s, I3312_LDRSBW, a0, a1, a2); |
dc73dfd4 | 1284 | break; |
4a136e0a | 1285 | case INDEX_op_ld8s_i64: |
3d4299f4 | 1286 | tcg_out_ldst(s, I3312_LDRSBX, a0, a1, a2); |
dc73dfd4 RH |
1287 | break; |
1288 | case INDEX_op_ld16u_i32: | |
4a136e0a | 1289 | case INDEX_op_ld16u_i64: |
3d4299f4 | 1290 | tcg_out_ldst(s, I3312_LDRH, a0, a1, a2); |
dc73dfd4 RH |
1291 | break; |
1292 | case INDEX_op_ld16s_i32: | |
3d4299f4 | 1293 | tcg_out_ldst(s, I3312_LDRSHW, a0, a1, a2); |
dc73dfd4 | 1294 | break; |
4a136e0a | 1295 | case INDEX_op_ld16s_i64: |
3d4299f4 | 1296 | tcg_out_ldst(s, I3312_LDRSHX, a0, a1, a2); |
dc73dfd4 RH |
1297 | break; |
1298 | case INDEX_op_ld_i32: | |
4a136e0a | 1299 | case INDEX_op_ld32u_i64: |
3d4299f4 | 1300 | tcg_out_ldst(s, I3312_LDRW, a0, a1, a2); |
dc73dfd4 | 1301 | break; |
4a136e0a | 1302 | case INDEX_op_ld32s_i64: |
3d4299f4 | 1303 | tcg_out_ldst(s, I3312_LDRSWX, a0, a1, a2); |
e81864a1 | 1304 | break; |
dc73dfd4 | 1305 | case INDEX_op_ld_i64: |
3d4299f4 | 1306 | tcg_out_ldst(s, I3312_LDRX, a0, a1, a2); |
dc73dfd4 RH |
1307 | break; |
1308 | ||
4a136e0a CF |
1309 | case INDEX_op_st8_i32: |
1310 | case INDEX_op_st8_i64: | |
3d4299f4 | 1311 | tcg_out_ldst(s, I3312_STRB, REG0(0), a1, a2); |
dc73dfd4 | 1312 | break; |
4a136e0a CF |
1313 | case INDEX_op_st16_i32: |
1314 | case INDEX_op_st16_i64: | |
3d4299f4 | 1315 | tcg_out_ldst(s, I3312_STRH, REG0(0), a1, a2); |
dc73dfd4 RH |
1316 | break; |
1317 | case INDEX_op_st_i32: | |
4a136e0a | 1318 | case INDEX_op_st32_i64: |
3d4299f4 | 1319 | tcg_out_ldst(s, I3312_STRW, REG0(0), a1, a2); |
dc73dfd4 RH |
1320 | break; |
1321 | case INDEX_op_st_i64: | |
3d4299f4 | 1322 | tcg_out_ldst(s, I3312_STRX, REG0(0), a1, a2); |
4a136e0a CF |
1323 | break; |
1324 | ||
4a136e0a | 1325 | case INDEX_op_add_i32: |
90f1cd91 RH |
1326 | a2 = (int32_t)a2; |
1327 | /* FALLTHRU */ | |
1328 | case INDEX_op_add_i64: | |
1329 | if (c2) { | |
1330 | tcg_out_addsubi(s, ext, a0, a1, a2); | |
1331 | } else { | |
1332 | tcg_out_insn(s, 3502, ADD, ext, a0, a1, a2); | |
1333 | } | |
4a136e0a CF |
1334 | break; |
1335 | ||
4a136e0a | 1336 | case INDEX_op_sub_i32: |
90f1cd91 RH |
1337 | a2 = (int32_t)a2; |
1338 | /* FALLTHRU */ | |
1339 | case INDEX_op_sub_i64: | |
1340 | if (c2) { | |
1341 | tcg_out_addsubi(s, ext, a0, a1, -a2); | |
1342 | } else { | |
1343 | tcg_out_insn(s, 3502, SUB, ext, a0, a1, a2); | |
1344 | } | |
4a136e0a CF |
1345 | break; |
1346 | ||
14b155dd RH |
1347 | case INDEX_op_neg_i64: |
1348 | case INDEX_op_neg_i32: | |
1349 | tcg_out_insn(s, 3502, SUB, ext, a0, TCG_REG_XZR, a1); | |
1350 | break; | |
1351 | ||
4a136e0a | 1352 | case INDEX_op_and_i32: |
e029f293 RH |
1353 | a2 = (int32_t)a2; |
1354 | /* FALLTHRU */ | |
1355 | case INDEX_op_and_i64: | |
1356 | if (c2) { | |
1357 | tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, a2); | |
1358 | } else { | |
1359 | tcg_out_insn(s, 3510, AND, ext, a0, a1, a2); | |
1360 | } | |
4a136e0a CF |
1361 | break; |
1362 | ||
14b155dd RH |
1363 | case INDEX_op_andc_i32: |
1364 | a2 = (int32_t)a2; | |
1365 | /* FALLTHRU */ | |
1366 | case INDEX_op_andc_i64: | |
1367 | if (c2) { | |
1368 | tcg_out_logicali(s, I3404_ANDI, ext, a0, a1, ~a2); | |
1369 | } else { | |
1370 | tcg_out_insn(s, 3510, BIC, ext, a0, a1, a2); | |
1371 | } | |
1372 | break; | |
1373 | ||
4a136e0a | 1374 | case INDEX_op_or_i32: |
e029f293 RH |
1375 | a2 = (int32_t)a2; |
1376 | /* FALLTHRU */ | |
1377 | case INDEX_op_or_i64: | |
1378 | if (c2) { | |
1379 | tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, a2); | |
1380 | } else { | |
1381 | tcg_out_insn(s, 3510, ORR, ext, a0, a1, a2); | |
1382 | } | |
4a136e0a CF |
1383 | break; |
1384 | ||
14b155dd RH |
1385 | case INDEX_op_orc_i32: |
1386 | a2 = (int32_t)a2; | |
1387 | /* FALLTHRU */ | |
1388 | case INDEX_op_orc_i64: | |
1389 | if (c2) { | |
1390 | tcg_out_logicali(s, I3404_ORRI, ext, a0, a1, ~a2); | |
1391 | } else { | |
1392 | tcg_out_insn(s, 3510, ORN, ext, a0, a1, a2); | |
1393 | } | |
1394 | break; | |
1395 | ||
4a136e0a | 1396 | case INDEX_op_xor_i32: |
e029f293 RH |
1397 | a2 = (int32_t)a2; |
1398 | /* FALLTHRU */ | |
1399 | case INDEX_op_xor_i64: | |
1400 | if (c2) { | |
1401 | tcg_out_logicali(s, I3404_EORI, ext, a0, a1, a2); | |
1402 | } else { | |
1403 | tcg_out_insn(s, 3510, EOR, ext, a0, a1, a2); | |
1404 | } | |
4a136e0a CF |
1405 | break; |
1406 | ||
14b155dd RH |
1407 | case INDEX_op_eqv_i32: |
1408 | a2 = (int32_t)a2; | |
1409 | /* FALLTHRU */ | |
1410 | case INDEX_op_eqv_i64: | |
1411 | if (c2) { | |
1412 | tcg_out_logicali(s, I3404_EORI, ext, a0, a1, ~a2); | |
1413 | } else { | |
1414 | tcg_out_insn(s, 3510, EON, ext, a0, a1, a2); | |
1415 | } | |
1416 | break; | |
1417 | ||
1418 | case INDEX_op_not_i64: | |
1419 | case INDEX_op_not_i32: | |
1420 | tcg_out_insn(s, 3510, ORN, ext, a0, TCG_REG_XZR, a1); | |
1421 | break; | |
1422 | ||
4a136e0a | 1423 | case INDEX_op_mul_i64: |
4a136e0a | 1424 | case INDEX_op_mul_i32: |
8678b71c RH |
1425 | tcg_out_insn(s, 3509, MADD, ext, a0, a1, a2, TCG_REG_XZR); |
1426 | break; | |
1427 | ||
1428 | case INDEX_op_div_i64: | |
1429 | case INDEX_op_div_i32: | |
1430 | tcg_out_insn(s, 3508, SDIV, ext, a0, a1, a2); | |
1431 | break; | |
1432 | case INDEX_op_divu_i64: | |
1433 | case INDEX_op_divu_i32: | |
1434 | tcg_out_insn(s, 3508, UDIV, ext, a0, a1, a2); | |
1435 | break; | |
1436 | ||
1437 | case INDEX_op_rem_i64: | |
1438 | case INDEX_op_rem_i32: | |
1439 | tcg_out_insn(s, 3508, SDIV, ext, TCG_REG_TMP, a1, a2); | |
1440 | tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); | |
1441 | break; | |
1442 | case INDEX_op_remu_i64: | |
1443 | case INDEX_op_remu_i32: | |
1444 | tcg_out_insn(s, 3508, UDIV, ext, TCG_REG_TMP, a1, a2); | |
1445 | tcg_out_insn(s, 3509, MSUB, ext, a0, TCG_REG_TMP, a2, a1); | |
4a136e0a CF |
1446 | break; |
1447 | ||
1448 | case INDEX_op_shl_i64: | |
4a136e0a | 1449 | case INDEX_op_shl_i32: |
df9351e3 | 1450 | if (c2) { |
8d8db193 | 1451 | tcg_out_shl(s, ext, a0, a1, a2); |
df9351e3 RH |
1452 | } else { |
1453 | tcg_out_insn(s, 3508, LSLV, ext, a0, a1, a2); | |
4a136e0a CF |
1454 | } |
1455 | break; | |
1456 | ||
1457 | case INDEX_op_shr_i64: | |
4a136e0a | 1458 | case INDEX_op_shr_i32: |
df9351e3 | 1459 | if (c2) { |
8d8db193 | 1460 | tcg_out_shr(s, ext, a0, a1, a2); |
df9351e3 RH |
1461 | } else { |
1462 | tcg_out_insn(s, 3508, LSRV, ext, a0, a1, a2); | |
4a136e0a CF |
1463 | } |
1464 | break; | |
1465 | ||
1466 | case INDEX_op_sar_i64: | |
4a136e0a | 1467 | case INDEX_op_sar_i32: |
df9351e3 | 1468 | if (c2) { |
8d8db193 | 1469 | tcg_out_sar(s, ext, a0, a1, a2); |
df9351e3 RH |
1470 | } else { |
1471 | tcg_out_insn(s, 3508, ASRV, ext, a0, a1, a2); | |
4a136e0a CF |
1472 | } |
1473 | break; | |
1474 | ||
1475 | case INDEX_op_rotr_i64: | |
4a136e0a | 1476 | case INDEX_op_rotr_i32: |
df9351e3 | 1477 | if (c2) { |
8d8db193 | 1478 | tcg_out_rotr(s, ext, a0, a1, a2); |
df9351e3 RH |
1479 | } else { |
1480 | tcg_out_insn(s, 3508, RORV, ext, a0, a1, a2); | |
4a136e0a CF |
1481 | } |
1482 | break; | |
1483 | ||
1484 | case INDEX_op_rotl_i64: | |
df9351e3 RH |
1485 | case INDEX_op_rotl_i32: |
1486 | if (c2) { | |
8d8db193 | 1487 | tcg_out_rotl(s, ext, a0, a1, a2); |
4a136e0a | 1488 | } else { |
50573c66 | 1489 | tcg_out_insn(s, 3502, SUB, 0, TCG_REG_TMP, TCG_REG_XZR, a2); |
df9351e3 | 1490 | tcg_out_insn(s, 3508, RORV, ext, a0, a1, TCG_REG_TMP); |
4a136e0a CF |
1491 | } |
1492 | break; | |
1493 | ||
8d8db193 | 1494 | case INDEX_op_brcond_i32: |
90f1cd91 RH |
1495 | a1 = (int32_t)a1; |
1496 | /* FALLTHRU */ | |
1497 | case INDEX_op_brcond_i64: | |
cae1f6f3 | 1498 | tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], args[3]); |
4a136e0a CF |
1499 | break; |
1500 | ||
4a136e0a | 1501 | case INDEX_op_setcond_i32: |
90f1cd91 RH |
1502 | a2 = (int32_t)a2; |
1503 | /* FALLTHRU */ | |
1504 | case INDEX_op_setcond_i64: | |
1505 | tcg_out_cmp(s, ext, a1, a2, c2); | |
ed7a0aa8 RH |
1506 | /* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */ |
1507 | tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR, | |
1508 | TCG_REG_XZR, tcg_invert_cond(args[3])); | |
4a136e0a CF |
1509 | break; |
1510 | ||
04ce397b RH |
1511 | case INDEX_op_movcond_i32: |
1512 | a2 = (int32_t)a2; | |
1513 | /* FALLTHRU */ | |
1514 | case INDEX_op_movcond_i64: | |
1515 | tcg_out_cmp(s, ext, a1, a2, c2); | |
1516 | tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]); | |
1517 | break; | |
1518 | ||
de61d14f RH |
1519 | case INDEX_op_qemu_ld_i32: |
1520 | case INDEX_op_qemu_ld_i64: | |
9c53889b | 1521 | tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]); |
4a136e0a | 1522 | break; |
de61d14f RH |
1523 | case INDEX_op_qemu_st_i32: |
1524 | case INDEX_op_qemu_st_i64: | |
e81864a1 | 1525 | tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]); |
4a136e0a CF |
1526 | break; |
1527 | ||
f0293414 | 1528 | case INDEX_op_bswap64_i64: |
edd8824c RH |
1529 | tcg_out_rev64(s, a0, a1); |
1530 | break; | |
1531 | case INDEX_op_bswap32_i64: | |
9c4a059d | 1532 | case INDEX_op_bswap32_i32: |
edd8824c | 1533 | tcg_out_rev32(s, a0, a1); |
9c4a059d CF |
1534 | break; |
1535 | case INDEX_op_bswap16_i64: | |
1536 | case INDEX_op_bswap16_i32: | |
edd8824c | 1537 | tcg_out_rev16(s, a0, a1); |
9c4a059d CF |
1538 | break; |
1539 | ||
31f1275b | 1540 | case INDEX_op_ext8s_i64: |
31f1275b | 1541 | case INDEX_op_ext8s_i32: |
929f8b55 | 1542 | tcg_out_sxt(s, ext, MO_8, a0, a1); |
31f1275b CF |
1543 | break; |
1544 | case INDEX_op_ext16s_i64: | |
31f1275b | 1545 | case INDEX_op_ext16s_i32: |
929f8b55 | 1546 | tcg_out_sxt(s, ext, MO_16, a0, a1); |
31f1275b CF |
1547 | break; |
1548 | case INDEX_op_ext32s_i64: | |
929f8b55 | 1549 | tcg_out_sxt(s, TCG_TYPE_I64, MO_32, a0, a1); |
31f1275b CF |
1550 | break; |
1551 | case INDEX_op_ext8u_i64: | |
1552 | case INDEX_op_ext8u_i32: | |
929f8b55 | 1553 | tcg_out_uxt(s, MO_8, a0, a1); |
31f1275b CF |
1554 | break; |
1555 | case INDEX_op_ext16u_i64: | |
1556 | case INDEX_op_ext16u_i32: | |
929f8b55 | 1557 | tcg_out_uxt(s, MO_16, a0, a1); |
31f1275b CF |
1558 | break; |
1559 | case INDEX_op_ext32u_i64: | |
929f8b55 | 1560 | tcg_out_movr(s, TCG_TYPE_I32, a0, a1); |
31f1275b CF |
1561 | break; |
1562 | ||
b3c56df7 RH |
1563 | case INDEX_op_deposit_i64: |
1564 | case INDEX_op_deposit_i32: | |
1565 | tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); | |
1566 | break; | |
1567 | ||
c6e929e7 RH |
1568 | case INDEX_op_add2_i32: |
1569 | tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), | |
1570 | (int32_t)args[4], args[5], const_args[4], | |
1571 | const_args[5], false); | |
1572 | break; | |
1573 | case INDEX_op_add2_i64: | |
1574 | tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], | |
1575 | args[5], const_args[4], const_args[5], false); | |
1576 | break; | |
1577 | case INDEX_op_sub2_i32: | |
1578 | tcg_out_addsub2(s, TCG_TYPE_I32, a0, a1, REG0(2), REG0(3), | |
1579 | (int32_t)args[4], args[5], const_args[4], | |
1580 | const_args[5], true); | |
1581 | break; | |
1582 | case INDEX_op_sub2_i64: | |
1583 | tcg_out_addsub2(s, TCG_TYPE_I64, a0, a1, REG0(2), REG0(3), args[4], | |
1584 | args[5], const_args[4], const_args[5], true); | |
1585 | break; | |
1586 | ||
1fcc9ddf RH |
1587 | case INDEX_op_muluh_i64: |
1588 | tcg_out_insn(s, 3508, UMULH, TCG_TYPE_I64, a0, a1, a2); | |
1589 | break; | |
1590 | case INDEX_op_mulsh_i64: | |
1591 | tcg_out_insn(s, 3508, SMULH, TCG_TYPE_I64, a0, a1, a2); | |
1592 | break; | |
1593 | ||
96d0ee7f | 1594 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ |
a51a6b6a | 1595 | case INDEX_op_mov_i64: |
96d0ee7f | 1596 | case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ |
a51a6b6a | 1597 | case INDEX_op_movi_i64: |
96d0ee7f | 1598 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ |
4a136e0a | 1599 | default: |
a51a6b6a | 1600 | tcg_abort(); |
4a136e0a | 1601 | } |
04ce397b RH |
1602 | |
1603 | #undef REG0 | |
4a136e0a CF |
1604 | } |
1605 | ||
1606 | static const TCGTargetOpDef aarch64_op_defs[] = { | |
1607 | { INDEX_op_exit_tb, { } }, | |
1608 | { INDEX_op_goto_tb, { } }, | |
4a136e0a CF |
1609 | { INDEX_op_br, { } }, |
1610 | ||
4a136e0a CF |
1611 | { INDEX_op_ld8u_i32, { "r", "r" } }, |
1612 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
1613 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
1614 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
1615 | { INDEX_op_ld_i32, { "r", "r" } }, | |
1616 | { INDEX_op_ld8u_i64, { "r", "r" } }, | |
1617 | { INDEX_op_ld8s_i64, { "r", "r" } }, | |
1618 | { INDEX_op_ld16u_i64, { "r", "r" } }, | |
1619 | { INDEX_op_ld16s_i64, { "r", "r" } }, | |
1620 | { INDEX_op_ld32u_i64, { "r", "r" } }, | |
1621 | { INDEX_op_ld32s_i64, { "r", "r" } }, | |
1622 | { INDEX_op_ld_i64, { "r", "r" } }, | |
1623 | ||
e81864a1 RH |
1624 | { INDEX_op_st8_i32, { "rZ", "r" } }, |
1625 | { INDEX_op_st16_i32, { "rZ", "r" } }, | |
1626 | { INDEX_op_st_i32, { "rZ", "r" } }, | |
1627 | { INDEX_op_st8_i64, { "rZ", "r" } }, | |
1628 | { INDEX_op_st16_i64, { "rZ", "r" } }, | |
1629 | { INDEX_op_st32_i64, { "rZ", "r" } }, | |
1630 | { INDEX_op_st_i64, { "rZ", "r" } }, | |
4a136e0a | 1631 | |
170bf931 | 1632 | { INDEX_op_add_i32, { "r", "r", "rA" } }, |
90f1cd91 | 1633 | { INDEX_op_add_i64, { "r", "r", "rA" } }, |
170bf931 | 1634 | { INDEX_op_sub_i32, { "r", "r", "rA" } }, |
90f1cd91 | 1635 | { INDEX_op_sub_i64, { "r", "r", "rA" } }, |
4a136e0a CF |
1636 | { INDEX_op_mul_i32, { "r", "r", "r" } }, |
1637 | { INDEX_op_mul_i64, { "r", "r", "r" } }, | |
8678b71c RH |
1638 | { INDEX_op_div_i32, { "r", "r", "r" } }, |
1639 | { INDEX_op_div_i64, { "r", "r", "r" } }, | |
1640 | { INDEX_op_divu_i32, { "r", "r", "r" } }, | |
1641 | { INDEX_op_divu_i64, { "r", "r", "r" } }, | |
1642 | { INDEX_op_rem_i32, { "r", "r", "r" } }, | |
1643 | { INDEX_op_rem_i64, { "r", "r", "r" } }, | |
1644 | { INDEX_op_remu_i32, { "r", "r", "r" } }, | |
1645 | { INDEX_op_remu_i64, { "r", "r", "r" } }, | |
170bf931 | 1646 | { INDEX_op_and_i32, { "r", "r", "rL" } }, |
e029f293 | 1647 | { INDEX_op_and_i64, { "r", "r", "rL" } }, |
170bf931 | 1648 | { INDEX_op_or_i32, { "r", "r", "rL" } }, |
e029f293 | 1649 | { INDEX_op_or_i64, { "r", "r", "rL" } }, |
170bf931 | 1650 | { INDEX_op_xor_i32, { "r", "r", "rL" } }, |
e029f293 | 1651 | { INDEX_op_xor_i64, { "r", "r", "rL" } }, |
170bf931 | 1652 | { INDEX_op_andc_i32, { "r", "r", "rL" } }, |
14b155dd | 1653 | { INDEX_op_andc_i64, { "r", "r", "rL" } }, |
170bf931 | 1654 | { INDEX_op_orc_i32, { "r", "r", "rL" } }, |
14b155dd | 1655 | { INDEX_op_orc_i64, { "r", "r", "rL" } }, |
170bf931 | 1656 | { INDEX_op_eqv_i32, { "r", "r", "rL" } }, |
14b155dd RH |
1657 | { INDEX_op_eqv_i64, { "r", "r", "rL" } }, |
1658 | ||
1659 | { INDEX_op_neg_i32, { "r", "r" } }, | |
1660 | { INDEX_op_neg_i64, { "r", "r" } }, | |
1661 | { INDEX_op_not_i32, { "r", "r" } }, | |
1662 | { INDEX_op_not_i64, { "r", "r" } }, | |
4a136e0a CF |
1663 | |
1664 | { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
1665 | { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
1666 | { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
1667 | { INDEX_op_rotl_i32, { "r", "r", "ri" } }, | |
1668 | { INDEX_op_rotr_i32, { "r", "r", "ri" } }, | |
1669 | { INDEX_op_shl_i64, { "r", "r", "ri" } }, | |
1670 | { INDEX_op_shr_i64, { "r", "r", "ri" } }, | |
1671 | { INDEX_op_sar_i64, { "r", "r", "ri" } }, | |
1672 | { INDEX_op_rotl_i64, { "r", "r", "ri" } }, | |
1673 | { INDEX_op_rotr_i64, { "r", "r", "ri" } }, | |
1674 | ||
170bf931 | 1675 | { INDEX_op_brcond_i32, { "r", "rA" } }, |
90f1cd91 | 1676 | { INDEX_op_brcond_i64, { "r", "rA" } }, |
170bf931 | 1677 | { INDEX_op_setcond_i32, { "r", "r", "rA" } }, |
90f1cd91 | 1678 | { INDEX_op_setcond_i64, { "r", "r", "rA" } }, |
170bf931 | 1679 | { INDEX_op_movcond_i32, { "r", "r", "rA", "rZ", "rZ" } }, |
04ce397b | 1680 | { INDEX_op_movcond_i64, { "r", "r", "rA", "rZ", "rZ" } }, |
4a136e0a | 1681 | |
de61d14f RH |
1682 | { INDEX_op_qemu_ld_i32, { "r", "l" } }, |
1683 | { INDEX_op_qemu_ld_i64, { "r", "l" } }, | |
e81864a1 RH |
1684 | { INDEX_op_qemu_st_i32, { "lZ", "l" } }, |
1685 | { INDEX_op_qemu_st_i64, { "lZ", "l" } }, | |
9c4a059d CF |
1686 | |
1687 | { INDEX_op_bswap16_i32, { "r", "r" } }, | |
1688 | { INDEX_op_bswap32_i32, { "r", "r" } }, | |
1689 | { INDEX_op_bswap16_i64, { "r", "r" } }, | |
1690 | { INDEX_op_bswap32_i64, { "r", "r" } }, | |
1691 | { INDEX_op_bswap64_i64, { "r", "r" } }, | |
1692 | ||
31f1275b CF |
1693 | { INDEX_op_ext8s_i32, { "r", "r" } }, |
1694 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
1695 | { INDEX_op_ext8u_i32, { "r", "r" } }, | |
1696 | { INDEX_op_ext16u_i32, { "r", "r" } }, | |
1697 | ||
1698 | { INDEX_op_ext8s_i64, { "r", "r" } }, | |
1699 | { INDEX_op_ext16s_i64, { "r", "r" } }, | |
1700 | { INDEX_op_ext32s_i64, { "r", "r" } }, | |
1701 | { INDEX_op_ext8u_i64, { "r", "r" } }, | |
1702 | { INDEX_op_ext16u_i64, { "r", "r" } }, | |
1703 | { INDEX_op_ext32u_i64, { "r", "r" } }, | |
1704 | ||
b3c56df7 RH |
1705 | { INDEX_op_deposit_i32, { "r", "0", "rZ" } }, |
1706 | { INDEX_op_deposit_i64, { "r", "0", "rZ" } }, | |
1707 | ||
170bf931 | 1708 | { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, |
c6e929e7 | 1709 | { INDEX_op_add2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, |
170bf931 | 1710 | { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, |
c6e929e7 RH |
1711 | { INDEX_op_sub2_i64, { "r", "r", "rZ", "rZ", "rA", "rMZ" } }, |
1712 | ||
1fcc9ddf RH |
1713 | { INDEX_op_muluh_i64, { "r", "r", "r" } }, |
1714 | { INDEX_op_mulsh_i64, { "r", "r", "r" } }, | |
1715 | ||
4a136e0a CF |
1716 | { -1 }, |
1717 | }; | |
1718 | ||
1719 | static void tcg_target_init(TCGContext *s) | |
1720 | { | |
4a136e0a CF |
1721 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); |
1722 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff); | |
1723 | ||
1724 | tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
1725 | (1 << TCG_REG_X0) | (1 << TCG_REG_X1) | | |
1726 | (1 << TCG_REG_X2) | (1 << TCG_REG_X3) | | |
1727 | (1 << TCG_REG_X4) | (1 << TCG_REG_X5) | | |
1728 | (1 << TCG_REG_X6) | (1 << TCG_REG_X7) | | |
1729 | (1 << TCG_REG_X8) | (1 << TCG_REG_X9) | | |
1730 | (1 << TCG_REG_X10) | (1 << TCG_REG_X11) | | |
1731 | (1 << TCG_REG_X12) | (1 << TCG_REG_X13) | | |
1732 | (1 << TCG_REG_X14) | (1 << TCG_REG_X15) | | |
1733 | (1 << TCG_REG_X16) | (1 << TCG_REG_X17) | | |
d82b78e4 | 1734 | (1 << TCG_REG_X18) | (1 << TCG_REG_X30)); |
4a136e0a CF |
1735 | |
1736 | tcg_regset_clear(s->reserved_regs); | |
1737 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); | |
1738 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); | |
1739 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); | |
1740 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_X18); /* platform register */ | |
1741 | ||
1742 | tcg_add_target_add_op_defs(aarch64_op_defs); | |
1743 | } | |
1744 | ||
38d195aa RH |
1745 | /* Saving pairs: (X19, X20) .. (X27, X28), (X29(fp), X30(lr)). */ |
1746 | #define PUSH_SIZE ((30 - 19 + 1) * 8) | |
1747 | ||
1748 | #define FRAME_SIZE \ | |
1749 | ((PUSH_SIZE \ | |
1750 | + TCG_STATIC_CALL_ARGS_SIZE \ | |
1751 | + CPU_TEMP_BUF_NLONGS * sizeof(long) \ | |
1752 | + TCG_TARGET_STACK_ALIGN - 1) \ | |
1753 | & ~(TCG_TARGET_STACK_ALIGN - 1)) | |
1754 | ||
1755 | /* We're expecting a 2 byte uleb128 encoded value. */ | |
1756 | QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); | |
1757 | ||
1758 | /* We're expecting to use a single ADDI insn. */ | |
1759 | QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); | |
1760 | ||
4a136e0a CF |
1761 | static void tcg_target_qemu_prologue(TCGContext *s) |
1762 | { | |
4a136e0a CF |
1763 | TCGReg r; |
1764 | ||
95f72aa9 RH |
1765 | /* Push (FP, LR) and allocate space for all saved registers. */ |
1766 | tcg_out_insn(s, 3314, STP, TCG_REG_FP, TCG_REG_LR, | |
38d195aa | 1767 | TCG_REG_SP, -PUSH_SIZE, 1, 1); |
4a136e0a | 1768 | |
d82b78e4 | 1769 | /* Set up frame pointer for canonical unwinding. */ |
929f8b55 | 1770 | tcg_out_movr_sp(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); |
4a136e0a | 1771 | |
d82b78e4 | 1772 | /* Store callee-preserved regs x19..x28. */ |
4a136e0a | 1773 | for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { |
95f72aa9 RH |
1774 | int ofs = (r - TCG_REG_X19 + 2) * 8; |
1775 | tcg_out_insn(s, 3314, STP, r, r + 1, TCG_REG_SP, ofs, 1, 0); | |
4a136e0a CF |
1776 | } |
1777 | ||
096c46c0 RH |
1778 | /* Make stack space for TCG locals. */ |
1779 | tcg_out_insn(s, 3401, SUBI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, | |
38d195aa | 1780 | FRAME_SIZE - PUSH_SIZE); |
096c46c0 | 1781 | |
95f72aa9 | 1782 | /* Inform TCG about how to find TCG locals with register, offset, size. */ |
4a136e0a CF |
1783 | tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, |
1784 | CPU_TEMP_BUF_NLONGS * sizeof(long)); | |
1785 | ||
6a91c7c9 JK |
1786 | #if defined(CONFIG_USE_GUEST_BASE) |
1787 | if (GUEST_BASE) { | |
1788 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, GUEST_BASE); | |
1789 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); | |
1790 | } | |
1791 | #endif | |
1792 | ||
4a136e0a | 1793 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); |
81d8a5ee | 1794 | tcg_out_insn(s, 3207, BR, tcg_target_call_iarg_regs[1]); |
4a136e0a CF |
1795 | |
1796 | tb_ret_addr = s->code_ptr; | |
1797 | ||
096c46c0 RH |
1798 | /* Remove TCG locals stack space. */ |
1799 | tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, TCG_REG_SP, TCG_REG_SP, | |
38d195aa | 1800 | FRAME_SIZE - PUSH_SIZE); |
4a136e0a | 1801 | |
95f72aa9 | 1802 | /* Restore registers x19..x28. */ |
4a136e0a | 1803 | for (r = TCG_REG_X19; r <= TCG_REG_X27; r += 2) { |
95f72aa9 RH |
1804 | int ofs = (r - TCG_REG_X19 + 2) * 8; |
1805 | tcg_out_insn(s, 3314, LDP, r, r + 1, TCG_REG_SP, ofs, 1, 0); | |
4a136e0a CF |
1806 | } |
1807 | ||
95f72aa9 RH |
1808 | /* Pop (FP, LR), restore SP to previous frame. */ |
1809 | tcg_out_insn(s, 3314, LDP, TCG_REG_FP, TCG_REG_LR, | |
38d195aa | 1810 | TCG_REG_SP, PUSH_SIZE, 0, 1); |
81d8a5ee | 1811 | tcg_out_insn(s, 3207, RET, TCG_REG_LR); |
4a136e0a | 1812 | } |
38d195aa RH |
1813 | |
1814 | typedef struct { | |
3d9bddb3 | 1815 | DebugFrameHeader h; |
38d195aa RH |
1816 | uint8_t fde_def_cfa[4]; |
1817 | uint8_t fde_reg_ofs[24]; | |
1818 | } DebugFrame; | |
1819 | ||
1820 | #define ELF_HOST_MACHINE EM_AARCH64 | |
1821 | ||
3d9bddb3 RH |
1822 | static const DebugFrame debug_frame = { |
1823 | .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ | |
1824 | .h.cie.id = -1, | |
1825 | .h.cie.version = 1, | |
1826 | .h.cie.code_align = 1, | |
1827 | .h.cie.data_align = 0x78, /* sleb128 -8 */ | |
1828 | .h.cie.return_column = TCG_REG_LR, | |
38d195aa RH |
1829 | |
1830 | /* Total FDE size does not include the "len" member. */ | |
3d9bddb3 | 1831 | .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), |
38d195aa RH |
1832 | |
1833 | .fde_def_cfa = { | |
1834 | 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ | |
1835 | (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ | |
1836 | (FRAME_SIZE >> 7) | |
1837 | }, | |
1838 | .fde_reg_ofs = { | |
1839 | 0x80 + 28, 1, /* DW_CFA_offset, x28, -8 */ | |
1840 | 0x80 + 27, 2, /* DW_CFA_offset, x27, -16 */ | |
1841 | 0x80 + 26, 3, /* DW_CFA_offset, x26, -24 */ | |
1842 | 0x80 + 25, 4, /* DW_CFA_offset, x25, -32 */ | |
1843 | 0x80 + 24, 5, /* DW_CFA_offset, x24, -40 */ | |
1844 | 0x80 + 23, 6, /* DW_CFA_offset, x23, -48 */ | |
1845 | 0x80 + 22, 7, /* DW_CFA_offset, x22, -56 */ | |
1846 | 0x80 + 21, 8, /* DW_CFA_offset, x21, -64 */ | |
1847 | 0x80 + 20, 9, /* DW_CFA_offset, x20, -72 */ | |
1848 | 0x80 + 19, 10, /* DW_CFA_offset, x1p, -80 */ | |
1849 | 0x80 + 30, 11, /* DW_CFA_offset, lr, -88 */ | |
1850 | 0x80 + 29, 12, /* DW_CFA_offset, fp, -96 */ | |
1851 | } | |
1852 | }; | |
1853 | ||
1854 | void tcg_register_jit(void *buf, size_t buf_size) | |
1855 | { | |
38d195aa RH |
1856 | tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); |
1857 | } |