]>
Commit | Line | Data |
---|---|---|
505e75c5 AF |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2018 SiFive, Inc | |
5 | * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org> | |
6 | * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net> | |
7 | * Copyright (c) 2008 Fabrice Bellard | |
8 | * | |
9 | * Based on i386/tcg-target.c and mips/tcg-target.c | |
10 | * | |
11 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
12 | * of this software and associated documentation files (the "Software"), to deal | |
13 | * in the Software without restriction, including without limitation the rights | |
14 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
15 | * copies of the Software, and to permit persons to whom the Software is | |
16 | * furnished to do so, subject to the following conditions: | |
17 | * | |
18 | * The above copyright notice and this permission notice shall be included in | |
19 | * all copies or substantial portions of the Software. | |
20 | * | |
21 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
22 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
23 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
24 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
25 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
26 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
27 | * THE SOFTWARE. | |
28 | */ | |
29 | ||
139c1837 | 30 | #include "../tcg-pool.c.inc" |
505e75c5 AF |
31 | |
32 | #ifdef CONFIG_DEBUG_TCG | |
33 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
34 | "zero", | |
35 | "ra", | |
36 | "sp", | |
37 | "gp", | |
38 | "tp", | |
39 | "t0", | |
40 | "t1", | |
41 | "t2", | |
42 | "s0", | |
43 | "s1", | |
44 | "a0", | |
45 | "a1", | |
46 | "a2", | |
47 | "a3", | |
48 | "a4", | |
49 | "a5", | |
50 | "a6", | |
51 | "a7", | |
52 | "s2", | |
53 | "s3", | |
54 | "s4", | |
55 | "s5", | |
56 | "s6", | |
57 | "s7", | |
58 | "s8", | |
59 | "s9", | |
60 | "s10", | |
61 | "s11", | |
62 | "t3", | |
63 | "t4", | |
64 | "t5", | |
65 | "t6" | |
66 | }; | |
67 | #endif | |
68 | ||
69 | static const int tcg_target_reg_alloc_order[] = { | |
70 | /* Call saved registers */ | |
71 | /* TCG_REG_S0 reservered for TCG_AREG0 */ | |
72 | TCG_REG_S1, | |
73 | TCG_REG_S2, | |
74 | TCG_REG_S3, | |
75 | TCG_REG_S4, | |
76 | TCG_REG_S5, | |
77 | TCG_REG_S6, | |
78 | TCG_REG_S7, | |
79 | TCG_REG_S8, | |
80 | TCG_REG_S9, | |
81 | TCG_REG_S10, | |
82 | TCG_REG_S11, | |
83 | ||
84 | /* Call clobbered registers */ | |
85 | TCG_REG_T0, | |
86 | TCG_REG_T1, | |
87 | TCG_REG_T2, | |
88 | TCG_REG_T3, | |
89 | TCG_REG_T4, | |
90 | TCG_REG_T5, | |
91 | TCG_REG_T6, | |
92 | ||
93 | /* Argument registers */ | |
94 | TCG_REG_A0, | |
95 | TCG_REG_A1, | |
96 | TCG_REG_A2, | |
97 | TCG_REG_A3, | |
98 | TCG_REG_A4, | |
99 | TCG_REG_A5, | |
100 | TCG_REG_A6, | |
101 | TCG_REG_A7, | |
102 | }; | |
103 | ||
104 | static const int tcg_target_call_iarg_regs[] = { | |
105 | TCG_REG_A0, | |
106 | TCG_REG_A1, | |
107 | TCG_REG_A2, | |
108 | TCG_REG_A3, | |
109 | TCG_REG_A4, | |
110 | TCG_REG_A5, | |
111 | TCG_REG_A6, | |
112 | TCG_REG_A7, | |
113 | }; | |
114 | ||
115 | static const int tcg_target_call_oarg_regs[] = { | |
116 | TCG_REG_A0, | |
117 | TCG_REG_A1, | |
118 | }; | |
8ce23a13 AF |
119 | |
120 | #define TCG_CT_CONST_ZERO 0x100 | |
121 | #define TCG_CT_CONST_S12 0x200 | |
122 | #define TCG_CT_CONST_N12 0x400 | |
123 | #define TCG_CT_CONST_M12 0x800 | |
124 | ||
125 | static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) | |
126 | { | |
127 | if (TCG_TARGET_REG_BITS == 32) { | |
128 | return sextract32(val, pos, len); | |
129 | } else { | |
130 | return sextract64(val, pos, len); | |
131 | } | |
132 | } | |
133 | ||
134 | /* parse target specific constraints */ | |
135 | static const char *target_parse_constraint(TCGArgConstraint *ct, | |
136 | const char *ct_str, TCGType type) | |
137 | { | |
138 | switch (*ct_str++) { | |
139 | case 'r': | |
9be0d080 | 140 | ct->regs = 0xffffffff; |
8ce23a13 AF |
141 | break; |
142 | case 'L': | |
143 | /* qemu_ld/qemu_st constraint */ | |
9be0d080 | 144 | ct->regs = 0xffffffff; |
8ce23a13 AF |
145 | /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ |
146 | #if defined(CONFIG_SOFTMMU) | |
9be0d080 RH |
147 | tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[0]); |
148 | tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[1]); | |
149 | tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[2]); | |
150 | tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[3]); | |
151 | tcg_regset_reset_reg(ct->regs, tcg_target_call_iarg_regs[4]); | |
8ce23a13 AF |
152 | #endif |
153 | break; | |
154 | case 'I': | |
155 | ct->ct |= TCG_CT_CONST_S12; | |
156 | break; | |
157 | case 'N': | |
158 | ct->ct |= TCG_CT_CONST_N12; | |
159 | break; | |
160 | case 'M': | |
161 | ct->ct |= TCG_CT_CONST_M12; | |
162 | break; | |
163 | case 'Z': | |
164 | /* we can use a zero immediate as a zero register argument. */ | |
165 | ct->ct |= TCG_CT_CONST_ZERO; | |
166 | break; | |
167 | default: | |
168 | return NULL; | |
169 | } | |
170 | return ct_str; | |
171 | } | |
172 | ||
173 | /* test if a constant matches the constraint */ | |
174 | static int tcg_target_const_match(tcg_target_long val, TCGType type, | |
175 | const TCGArgConstraint *arg_ct) | |
176 | { | |
177 | int ct = arg_ct->ct; | |
178 | if (ct & TCG_CT_CONST) { | |
179 | return 1; | |
180 | } | |
181 | if ((ct & TCG_CT_CONST_ZERO) && val == 0) { | |
182 | return 1; | |
183 | } | |
184 | if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { | |
185 | return 1; | |
186 | } | |
187 | if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { | |
188 | return 1; | |
189 | } | |
190 | if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { | |
191 | return 1; | |
192 | } | |
193 | return 0; | |
194 | } | |
195 | ||
196 | /* | |
197 | * RISC-V Base ISA opcodes (IM) | |
198 | */ | |
199 | ||
200 | typedef enum { | |
201 | OPC_ADD = 0x33, | |
202 | OPC_ADDI = 0x13, | |
203 | OPC_AND = 0x7033, | |
204 | OPC_ANDI = 0x7013, | |
205 | OPC_AUIPC = 0x17, | |
206 | OPC_BEQ = 0x63, | |
207 | OPC_BGE = 0x5063, | |
208 | OPC_BGEU = 0x7063, | |
209 | OPC_BLT = 0x4063, | |
210 | OPC_BLTU = 0x6063, | |
211 | OPC_BNE = 0x1063, | |
212 | OPC_DIV = 0x2004033, | |
213 | OPC_DIVU = 0x2005033, | |
214 | OPC_JAL = 0x6f, | |
215 | OPC_JALR = 0x67, | |
216 | OPC_LB = 0x3, | |
217 | OPC_LBU = 0x4003, | |
218 | OPC_LD = 0x3003, | |
219 | OPC_LH = 0x1003, | |
220 | OPC_LHU = 0x5003, | |
221 | OPC_LUI = 0x37, | |
222 | OPC_LW = 0x2003, | |
223 | OPC_LWU = 0x6003, | |
224 | OPC_MUL = 0x2000033, | |
225 | OPC_MULH = 0x2001033, | |
226 | OPC_MULHSU = 0x2002033, | |
227 | OPC_MULHU = 0x2003033, | |
228 | OPC_OR = 0x6033, | |
229 | OPC_ORI = 0x6013, | |
230 | OPC_REM = 0x2006033, | |
231 | OPC_REMU = 0x2007033, | |
232 | OPC_SB = 0x23, | |
233 | OPC_SD = 0x3023, | |
234 | OPC_SH = 0x1023, | |
235 | OPC_SLL = 0x1033, | |
236 | OPC_SLLI = 0x1013, | |
237 | OPC_SLT = 0x2033, | |
238 | OPC_SLTI = 0x2013, | |
239 | OPC_SLTIU = 0x3013, | |
240 | OPC_SLTU = 0x3033, | |
241 | OPC_SRA = 0x40005033, | |
242 | OPC_SRAI = 0x40005013, | |
243 | OPC_SRL = 0x5033, | |
244 | OPC_SRLI = 0x5013, | |
245 | OPC_SUB = 0x40000033, | |
246 | OPC_SW = 0x2023, | |
247 | OPC_XOR = 0x4033, | |
248 | OPC_XORI = 0x4013, | |
249 | ||
250 | #if TCG_TARGET_REG_BITS == 64 | |
251 | OPC_ADDIW = 0x1b, | |
252 | OPC_ADDW = 0x3b, | |
253 | OPC_DIVUW = 0x200503b, | |
254 | OPC_DIVW = 0x200403b, | |
255 | OPC_MULW = 0x200003b, | |
256 | OPC_REMUW = 0x200703b, | |
257 | OPC_REMW = 0x200603b, | |
258 | OPC_SLLIW = 0x101b, | |
259 | OPC_SLLW = 0x103b, | |
260 | OPC_SRAIW = 0x4000501b, | |
261 | OPC_SRAW = 0x4000503b, | |
262 | OPC_SRLIW = 0x501b, | |
263 | OPC_SRLW = 0x503b, | |
264 | OPC_SUBW = 0x4000003b, | |
265 | #else | |
266 | /* Simplify code throughout by defining aliases for RV32. */ | |
267 | OPC_ADDIW = OPC_ADDI, | |
268 | OPC_ADDW = OPC_ADD, | |
269 | OPC_DIVUW = OPC_DIVU, | |
270 | OPC_DIVW = OPC_DIV, | |
271 | OPC_MULW = OPC_MUL, | |
272 | OPC_REMUW = OPC_REMU, | |
273 | OPC_REMW = OPC_REM, | |
274 | OPC_SLLIW = OPC_SLLI, | |
275 | OPC_SLLW = OPC_SLL, | |
276 | OPC_SRAIW = OPC_SRAI, | |
277 | OPC_SRAW = OPC_SRA, | |
278 | OPC_SRLIW = OPC_SRLI, | |
279 | OPC_SRLW = OPC_SRL, | |
280 | OPC_SUBW = OPC_SUB, | |
281 | #endif | |
282 | ||
283 | OPC_FENCE = 0x0000000f, | |
284 | } RISCVInsn; | |
54a9ce0f AF |
285 | |
286 | /* | |
287 | * RISC-V immediate and instruction encoders (excludes 16-bit RVC) | |
288 | */ | |
289 | ||
290 | /* Type-R */ | |
291 | ||
292 | static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) | |
293 | { | |
294 | return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; | |
295 | } | |
296 | ||
297 | /* Type-I */ | |
298 | ||
299 | static int32_t encode_imm12(uint32_t imm) | |
300 | { | |
301 | return (imm & 0xfff) << 20; | |
302 | } | |
303 | ||
304 | static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) | |
305 | { | |
306 | return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); | |
307 | } | |
308 | ||
309 | /* Type-S */ | |
310 | ||
311 | static int32_t encode_simm12(uint32_t imm) | |
312 | { | |
313 | int32_t ret = 0; | |
314 | ||
315 | ret |= (imm & 0xFE0) << 20; | |
316 | ret |= (imm & 0x1F) << 7; | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
321 | static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) | |
322 | { | |
323 | return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); | |
324 | } | |
325 | ||
326 | /* Type-SB */ | |
327 | ||
328 | static int32_t encode_sbimm12(uint32_t imm) | |
329 | { | |
330 | int32_t ret = 0; | |
331 | ||
332 | ret |= (imm & 0x1000) << 19; | |
333 | ret |= (imm & 0x7e0) << 20; | |
334 | ret |= (imm & 0x1e) << 7; | |
335 | ret |= (imm & 0x800) >> 4; | |
336 | ||
337 | return ret; | |
338 | } | |
339 | ||
340 | static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) | |
341 | { | |
342 | return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); | |
343 | } | |
344 | ||
345 | /* Type-U */ | |
346 | ||
347 | static int32_t encode_uimm20(uint32_t imm) | |
348 | { | |
349 | return imm & 0xfffff000; | |
350 | } | |
351 | ||
352 | static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) | |
353 | { | |
354 | return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); | |
355 | } | |
356 | ||
357 | /* Type-UJ */ | |
358 | ||
359 | static int32_t encode_ujimm20(uint32_t imm) | |
360 | { | |
361 | int32_t ret = 0; | |
362 | ||
363 | ret |= (imm & 0x0007fe) << (21 - 1); | |
364 | ret |= (imm & 0x000800) << (20 - 11); | |
365 | ret |= (imm & 0x0ff000) << (12 - 12); | |
366 | ret |= (imm & 0x100000) << (31 - 20); | |
367 | ||
368 | return ret; | |
369 | } | |
370 | ||
371 | static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) | |
372 | { | |
373 | return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); | |
374 | } | |
bedf14e3 AF |
375 | |
376 | /* | |
377 | * RISC-V instruction emitters | |
378 | */ | |
379 | ||
380 | static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, | |
381 | TCGReg rd, TCGReg rs1, TCGReg rs2) | |
382 | { | |
383 | tcg_out32(s, encode_r(opc, rd, rs1, rs2)); | |
384 | } | |
385 | ||
386 | static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, | |
387 | TCGReg rd, TCGReg rs1, TCGArg imm) | |
388 | { | |
389 | tcg_out32(s, encode_i(opc, rd, rs1, imm)); | |
390 | } | |
391 | ||
392 | static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, | |
393 | TCGReg rs1, TCGReg rs2, uint32_t imm) | |
394 | { | |
395 | tcg_out32(s, encode_s(opc, rs1, rs2, imm)); | |
396 | } | |
397 | ||
398 | static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, | |
399 | TCGReg rs1, TCGReg rs2, uint32_t imm) | |
400 | { | |
401 | tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); | |
402 | } | |
403 | ||
404 | static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, | |
405 | TCGReg rd, uint32_t imm) | |
406 | { | |
407 | tcg_out32(s, encode_u(opc, rd, imm)); | |
408 | } | |
409 | ||
410 | static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, | |
411 | TCGReg rd, uint32_t imm) | |
412 | { | |
413 | tcg_out32(s, encode_uj(opc, rd, imm)); | |
414 | } | |
415 | ||
416 | static void tcg_out_nop_fill(tcg_insn_unit *p, int count) | |
417 | { | |
418 | int i; | |
419 | for (i = 0; i < count; ++i) { | |
420 | p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); | |
421 | } | |
422 | } | |
dfa8e74f AF |
423 | |
424 | /* | |
425 | * Relocations | |
426 | */ | |
427 | ||
793f7381 | 428 | static bool reloc_sbimm12(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
dfa8e74f | 429 | { |
793f7381 RH |
430 | const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
431 | intptr_t offset = (intptr_t)target - (intptr_t)src_rx; | |
dfa8e74f | 432 | |
844d0442 RH |
433 | tcg_debug_assert((offset & 1) == 0); |
434 | if (offset == sextreg(offset, 0, 12)) { | |
793f7381 | 435 | *src_rw |= encode_sbimm12(offset); |
dfa8e74f AF |
436 | return true; |
437 | } | |
438 | ||
439 | return false; | |
440 | } | |
441 | ||
793f7381 | 442 | static bool reloc_jimm20(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
dfa8e74f | 443 | { |
793f7381 RH |
444 | const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
445 | intptr_t offset = (intptr_t)target - (intptr_t)src_rx; | |
dfa8e74f | 446 | |
844d0442 RH |
447 | tcg_debug_assert((offset & 1) == 0); |
448 | if (offset == sextreg(offset, 0, 20)) { | |
793f7381 | 449 | *src_rw |= encode_ujimm20(offset); |
dfa8e74f AF |
450 | return true; |
451 | } | |
452 | ||
453 | return false; | |
454 | } | |
455 | ||
793f7381 | 456 | static bool reloc_call(tcg_insn_unit *src_rw, const tcg_insn_unit *target) |
dfa8e74f | 457 | { |
793f7381 RH |
458 | const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); |
459 | intptr_t offset = (intptr_t)target - (intptr_t)src_rx; | |
dfa8e74f AF |
460 | int32_t lo = sextreg(offset, 0, 12); |
461 | int32_t hi = offset - lo; | |
462 | ||
463 | if (offset == hi + lo) { | |
793f7381 RH |
464 | src_rw[0] |= encode_uimm20(hi); |
465 | src_rw[1] |= encode_imm12(lo); | |
dfa8e74f AF |
466 | return true; |
467 | } | |
468 | ||
469 | return false; | |
470 | } | |
471 | ||
472 | static bool patch_reloc(tcg_insn_unit *code_ptr, int type, | |
473 | intptr_t value, intptr_t addend) | |
474 | { | |
dfa8e74f | 475 | tcg_debug_assert(addend == 0); |
dfa8e74f AF |
476 | switch (type) { |
477 | case R_RISCV_BRANCH: | |
4b6a52d0 | 478 | return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); |
dfa8e74f AF |
479 | case R_RISCV_JAL: |
480 | return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); | |
dfa8e74f AF |
481 | case R_RISCV_CALL: |
482 | return reloc_call(code_ptr, (tcg_insn_unit *)value); | |
dfa8e74f | 483 | default: |
4b6a52d0 | 484 | g_assert_not_reached(); |
dfa8e74f AF |
485 | } |
486 | } | |
6cd2eda3 AF |
487 | |
488 | /* | |
489 | * TCG intrinsics | |
490 | */ | |
491 | ||
78113e83 | 492 | static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) |
6cd2eda3 AF |
493 | { |
494 | if (ret == arg) { | |
78113e83 | 495 | return true; |
6cd2eda3 AF |
496 | } |
497 | switch (type) { | |
498 | case TCG_TYPE_I32: | |
499 | case TCG_TYPE_I64: | |
500 | tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); | |
501 | break; | |
502 | default: | |
503 | g_assert_not_reached(); | |
504 | } | |
78113e83 | 505 | return true; |
6cd2eda3 AF |
506 | } |
507 | ||
508 | static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, | |
509 | tcg_target_long val) | |
510 | { | |
511 | tcg_target_long lo, hi, tmp; | |
512 | int shift, ret; | |
513 | ||
514 | if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { | |
515 | val = (int32_t)val; | |
516 | } | |
517 | ||
518 | lo = sextreg(val, 0, 12); | |
519 | if (val == lo) { | |
520 | tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); | |
521 | return; | |
522 | } | |
523 | ||
524 | hi = val - lo; | |
525 | if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { | |
526 | tcg_out_opc_upper(s, OPC_LUI, rd, hi); | |
527 | if (lo != 0) { | |
528 | tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); | |
529 | } | |
530 | return; | |
531 | } | |
532 | ||
533 | /* We can only be here if TCG_TARGET_REG_BITS != 32 */ | |
534 | tmp = tcg_pcrel_diff(s, (void *)val); | |
535 | if (tmp == (int32_t)tmp) { | |
536 | tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); | |
537 | tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); | |
793f7381 | 538 | ret = reloc_call(s->code_ptr - 2, (const tcg_insn_unit *)val); |
6cd2eda3 AF |
539 | tcg_debug_assert(ret == true); |
540 | return; | |
541 | } | |
542 | ||
543 | /* Look for a single 20-bit section. */ | |
544 | shift = ctz64(val); | |
545 | tmp = val >> shift; | |
546 | if (tmp == sextreg(tmp, 0, 20)) { | |
547 | tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); | |
548 | if (shift > 12) { | |
549 | tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); | |
550 | } else { | |
551 | tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); | |
552 | } | |
553 | return; | |
554 | } | |
555 | ||
556 | /* Look for a few high zero bits, with lots of bits set in the middle. */ | |
557 | shift = clz64(val); | |
558 | tmp = val << shift; | |
559 | if (tmp == sextreg(tmp, 12, 20) << 12) { | |
560 | tcg_out_opc_upper(s, OPC_LUI, rd, tmp); | |
561 | tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); | |
562 | return; | |
563 | } else if (tmp == sextreg(tmp, 0, 12)) { | |
564 | tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); | |
565 | tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); | |
566 | return; | |
567 | } | |
568 | ||
569 | /* Drop into the constant pool. */ | |
570 | new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); | |
571 | tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); | |
572 | tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); | |
573 | } | |
27fd6414 AF |
574 | |
575 | static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) | |
576 | { | |
577 | tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); | |
578 | } | |
579 | ||
580 | static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) | |
581 | { | |
582 | tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); | |
583 | tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); | |
584 | } | |
585 | ||
586 | static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) | |
587 | { | |
588 | tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); | |
589 | tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); | |
590 | } | |
591 | ||
592 | static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) | |
593 | { | |
594 | tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); | |
595 | tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); | |
596 | } | |
597 | ||
598 | static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) | |
599 | { | |
600 | tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); | |
601 | tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); | |
602 | } | |
603 | ||
604 | static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) | |
605 | { | |
606 | tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); | |
607 | } | |
61535d49 AF |
608 | |
609 | static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, | |
610 | TCGReg addr, intptr_t offset) | |
611 | { | |
612 | intptr_t imm12 = sextreg(offset, 0, 12); | |
613 | ||
614 | if (offset != imm12) { | |
615 | intptr_t diff = offset - (uintptr_t)s->code_ptr; | |
616 | ||
617 | if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { | |
618 | imm12 = sextreg(diff, 0, 12); | |
619 | tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); | |
620 | } else { | |
621 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); | |
622 | if (addr != TCG_REG_ZERO) { | |
623 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); | |
624 | } | |
625 | } | |
626 | addr = TCG_REG_TMP2; | |
627 | } | |
628 | ||
629 | switch (opc) { | |
630 | case OPC_SB: | |
631 | case OPC_SH: | |
632 | case OPC_SW: | |
633 | case OPC_SD: | |
634 | tcg_out_opc_store(s, opc, addr, data, imm12); | |
635 | break; | |
636 | case OPC_LB: | |
637 | case OPC_LBU: | |
638 | case OPC_LH: | |
639 | case OPC_LHU: | |
640 | case OPC_LW: | |
641 | case OPC_LWU: | |
642 | case OPC_LD: | |
643 | tcg_out_opc_imm(s, opc, data, addr, imm12); | |
644 | break; | |
645 | default: | |
646 | g_assert_not_reached(); | |
647 | } | |
648 | } | |
649 | ||
650 | static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, | |
651 | TCGReg arg1, intptr_t arg2) | |
652 | { | |
653 | bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); | |
654 | tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); | |
655 | } | |
656 | ||
657 | static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, | |
658 | TCGReg arg1, intptr_t arg2) | |
659 | { | |
660 | bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); | |
661 | tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); | |
662 | } | |
663 | ||
664 | static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, | |
665 | TCGReg base, intptr_t ofs) | |
666 | { | |
667 | if (val == 0) { | |
668 | tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); | |
669 | return true; | |
670 | } | |
671 | return false; | |
672 | } | |
28ca738e AF |
673 | |
674 | static void tcg_out_addsub2(TCGContext *s, | |
675 | TCGReg rl, TCGReg rh, | |
676 | TCGReg al, TCGReg ah, | |
677 | TCGArg bl, TCGArg bh, | |
678 | bool cbl, bool cbh, bool is_sub, bool is32bit) | |
679 | { | |
680 | const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; | |
681 | const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; | |
682 | const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; | |
683 | TCGReg th = TCG_REG_TMP1; | |
684 | ||
685 | /* If we have a negative constant such that negating it would | |
686 | make the high part zero, we can (usually) eliminate one insn. */ | |
687 | if (cbl && cbh && bh == -1 && bl != 0) { | |
688 | bl = -bl; | |
689 | bh = 0; | |
690 | is_sub = !is_sub; | |
691 | } | |
692 | ||
693 | /* By operating on the high part first, we get to use the final | |
694 | carry operation to move back from the temporary. */ | |
695 | if (!cbh) { | |
696 | tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); | |
697 | } else if (bh != 0 || ah == rl) { | |
698 | tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); | |
699 | } else { | |
700 | th = ah; | |
701 | } | |
702 | ||
703 | /* Note that tcg optimization should eliminate the bl == 0 case. */ | |
704 | if (is_sub) { | |
705 | if (cbl) { | |
706 | tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); | |
707 | tcg_out_opc_imm(s, opc_addi, rl, al, -bl); | |
708 | } else { | |
709 | tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); | |
710 | tcg_out_opc_reg(s, opc_sub, rl, al, bl); | |
711 | } | |
712 | tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); | |
713 | } else { | |
714 | if (cbl) { | |
715 | tcg_out_opc_imm(s, opc_addi, rl, al, bl); | |
716 | tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); | |
717 | } else if (rl == al && rl == bl) { | |
718 | tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); | |
719 | tcg_out_opc_reg(s, opc_addi, rl, al, bl); | |
720 | } else { | |
721 | tcg_out_opc_reg(s, opc_add, rl, al, bl); | |
722 | tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, | |
723 | rl, (rl == bl ? al : bl)); | |
724 | } | |
725 | tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); | |
726 | } | |
727 | } | |
15840069 AF |
728 | |
729 | static const struct { | |
730 | RISCVInsn op; | |
731 | bool swap; | |
732 | } tcg_brcond_to_riscv[] = { | |
733 | [TCG_COND_EQ] = { OPC_BEQ, false }, | |
734 | [TCG_COND_NE] = { OPC_BNE, false }, | |
735 | [TCG_COND_LT] = { OPC_BLT, false }, | |
736 | [TCG_COND_GE] = { OPC_BGE, false }, | |
737 | [TCG_COND_LE] = { OPC_BGE, true }, | |
738 | [TCG_COND_GT] = { OPC_BLT, true }, | |
739 | [TCG_COND_LTU] = { OPC_BLTU, false }, | |
740 | [TCG_COND_GEU] = { OPC_BGEU, false }, | |
741 | [TCG_COND_LEU] = { OPC_BGEU, true }, | |
742 | [TCG_COND_GTU] = { OPC_BLTU, true } | |
743 | }; | |
744 | ||
745 | static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, | |
746 | TCGReg arg2, TCGLabel *l) | |
747 | { | |
748 | RISCVInsn op = tcg_brcond_to_riscv[cond].op; | |
749 | ||
750 | tcg_debug_assert(op != 0); | |
751 | ||
752 | if (tcg_brcond_to_riscv[cond].swap) { | |
753 | TCGReg t = arg1; | |
754 | arg1 = arg2; | |
755 | arg2 = t; | |
756 | } | |
757 | ||
4b6a52d0 RH |
758 | tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); |
759 | tcg_out_opc_branch(s, op, arg1, arg2, 0); | |
15840069 AF |
760 | } |
761 | ||
762 | static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, | |
763 | TCGReg arg1, TCGReg arg2) | |
764 | { | |
765 | switch (cond) { | |
766 | case TCG_COND_EQ: | |
767 | tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); | |
768 | tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); | |
769 | break; | |
770 | case TCG_COND_NE: | |
771 | tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); | |
772 | tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); | |
773 | break; | |
774 | case TCG_COND_LT: | |
775 | tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); | |
776 | break; | |
777 | case TCG_COND_GE: | |
778 | tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); | |
779 | tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); | |
780 | break; | |
781 | case TCG_COND_LE: | |
782 | tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); | |
783 | tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); | |
784 | break; | |
785 | case TCG_COND_GT: | |
786 | tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); | |
787 | break; | |
788 | case TCG_COND_LTU: | |
789 | tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); | |
790 | break; | |
791 | case TCG_COND_GEU: | |
792 | tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); | |
793 | tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); | |
794 | break; | |
795 | case TCG_COND_LEU: | |
796 | tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); | |
797 | tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); | |
798 | break; | |
799 | case TCG_COND_GTU: | |
800 | tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); | |
801 | break; | |
802 | default: | |
803 | g_assert_not_reached(); | |
804 | break; | |
805 | } | |
806 | } | |
807 | ||
808 | static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, | |
809 | TCGReg bl, TCGReg bh, TCGLabel *l) | |
810 | { | |
811 | /* todo */ | |
812 | g_assert_not_reached(); | |
813 | } | |
814 | ||
815 | static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, | |
816 | TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) | |
817 | { | |
818 | /* todo */ | |
819 | g_assert_not_reached(); | |
820 | } | |
821 | ||
2be7d76b | 822 | static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) |
15840069 AF |
823 | { |
824 | TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; | |
825 | ptrdiff_t offset = tcg_pcrel_diff(s, arg); | |
826 | int ret; | |
827 | ||
844d0442 RH |
828 | tcg_debug_assert((offset & 1) == 0); |
829 | if (offset == sextreg(offset, 0, 20)) { | |
15840069 AF |
830 | /* short jump: -2097150 to 2097152 */ |
831 | tcg_out_opc_jump(s, OPC_JAL, link, offset); | |
844d0442 | 832 | } else if (TCG_TARGET_REG_BITS == 32 || offset == (int32_t)offset) { |
15840069 AF |
833 | /* long jump: -2147483646 to 2147483648 */ |
834 | tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); | |
835 | tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); | |
844d0442 | 836 | ret = reloc_call(s->code_ptr - 2, arg); |
15840069 AF |
837 | tcg_debug_assert(ret == true); |
838 | } else if (TCG_TARGET_REG_BITS == 64) { | |
839 | /* far jump: 64-bit */ | |
840 | tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); | |
841 | tcg_target_long base = (tcg_target_long)arg - imm; | |
842 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); | |
843 | tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); | |
844 | } else { | |
845 | g_assert_not_reached(); | |
846 | } | |
847 | } | |
848 | ||
2be7d76b | 849 | static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) |
15840069 AF |
850 | { |
851 | tcg_out_call_int(s, arg, false); | |
852 | } | |
efbea94c AF |
853 | |
854 | static void tcg_out_mb(TCGContext *s, TCGArg a0) | |
855 | { | |
856 | tcg_insn_unit insn = OPC_FENCE; | |
857 | ||
858 | if (a0 & TCG_MO_LD_LD) { | |
859 | insn |= 0x02200000; | |
860 | } | |
861 | if (a0 & TCG_MO_ST_LD) { | |
862 | insn |= 0x01200000; | |
863 | } | |
864 | if (a0 & TCG_MO_LD_ST) { | |
865 | insn |= 0x02100000; | |
866 | } | |
867 | if (a0 & TCG_MO_ST_ST) { | |
868 | insn |= 0x02200000; | |
869 | } | |
870 | tcg_out32(s, insn); | |
871 | } | |
872 | ||
873 | /* | |
874 | * Load/store and TLB | |
875 | */ | |
876 | ||
877 | #if defined(CONFIG_SOFTMMU) | |
139c1837 | 878 | #include "../tcg-ldst.c.inc" |
efbea94c AF |
879 | |
880 | /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, | |
881 | * TCGMemOpIdx oi, uintptr_t ra) | |
882 | */ | |
883 | static void * const qemu_ld_helpers[16] = { | |
884 | [MO_UB] = helper_ret_ldub_mmu, | |
885 | [MO_SB] = helper_ret_ldsb_mmu, | |
886 | [MO_LEUW] = helper_le_lduw_mmu, | |
887 | [MO_LESW] = helper_le_ldsw_mmu, | |
888 | [MO_LEUL] = helper_le_ldul_mmu, | |
889 | #if TCG_TARGET_REG_BITS == 64 | |
890 | [MO_LESL] = helper_le_ldsl_mmu, | |
891 | #endif | |
892 | [MO_LEQ] = helper_le_ldq_mmu, | |
893 | [MO_BEUW] = helper_be_lduw_mmu, | |
894 | [MO_BESW] = helper_be_ldsw_mmu, | |
895 | [MO_BEUL] = helper_be_ldul_mmu, | |
896 | #if TCG_TARGET_REG_BITS == 64 | |
897 | [MO_BESL] = helper_be_ldsl_mmu, | |
898 | #endif | |
899 | [MO_BEQ] = helper_be_ldq_mmu, | |
900 | }; | |
901 | ||
902 | /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, | |
903 | * uintxx_t val, TCGMemOpIdx oi, | |
904 | * uintptr_t ra) | |
905 | */ | |
906 | static void * const qemu_st_helpers[16] = { | |
907 | [MO_UB] = helper_ret_stb_mmu, | |
908 | [MO_LEUW] = helper_le_stw_mmu, | |
909 | [MO_LEUL] = helper_le_stl_mmu, | |
910 | [MO_LEQ] = helper_le_stq_mmu, | |
911 | [MO_BEUW] = helper_be_stw_mmu, | |
912 | [MO_BEUL] = helper_be_stl_mmu, | |
913 | [MO_BEQ] = helper_be_stq_mmu, | |
914 | }; | |
915 | ||
41b70f22 RH |
916 | /* We don't support oversize guests */ |
917 | QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); | |
918 | ||
269bd5d8 RH |
919 | /* We expect to use a 12-bit negative offset from ENV. */ |
920 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); | |
921 | QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); | |
922 | ||
793f7381 | 923 | static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) |
844d0442 RH |
924 | { |
925 | tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); | |
926 | bool ok = reloc_jimm20(s->code_ptr - 1, target); | |
927 | tcg_debug_assert(ok); | |
928 | } | |
929 | ||
efbea94c AF |
930 | static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, |
931 | TCGReg addrh, TCGMemOpIdx oi, | |
932 | tcg_insn_unit **label_ptr, bool is_load) | |
933 | { | |
14776ab5 | 934 | MemOp opc = get_memop(oi); |
efbea94c AF |
935 | unsigned s_bits = opc & MO_SIZE; |
936 | unsigned a_bits = get_alignment_bits(opc); | |
41b70f22 | 937 | tcg_target_long compare_mask; |
efbea94c | 938 | int mem_index = get_mmuidx(oi); |
269bd5d8 RH |
939 | int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); |
940 | int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); | |
941 | int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); | |
41b70f22 RH |
942 | TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; |
943 | ||
7ab7e9c7 AF |
944 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs); |
945 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs); | |
efbea94c | 946 | |
41b70f22 RH |
947 | tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, |
948 | TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); | |
949 | tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); | |
950 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); | |
efbea94c AF |
951 | |
952 | /* Load the tlb comparator and the addend. */ | |
41b70f22 RH |
953 | tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, |
954 | is_load ? offsetof(CPUTLBEntry, addr_read) | |
955 | : offsetof(CPUTLBEntry, addr_write)); | |
956 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, | |
957 | offsetof(CPUTLBEntry, addend)); | |
efbea94c | 958 | |
41b70f22 RH |
959 | /* We don't support unaligned accesses. */ |
960 | if (a_bits < s_bits) { | |
961 | a_bits = s_bits; | |
962 | } | |
efbea94c | 963 | /* Clear the non-page, non-alignment bits from the address. */ |
41b70f22 RH |
964 | compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); |
965 | if (compare_mask == sextreg(compare_mask, 0, 12)) { | |
966 | tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); | |
efbea94c | 967 | } else { |
41b70f22 | 968 | tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); |
efbea94c | 969 | tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); |
41b70f22 | 970 | } |
efbea94c AF |
971 | |
972 | /* Compare masked address with the TLB entry. */ | |
973 | label_ptr[0] = s->code_ptr; | |
974 | tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); | |
efbea94c AF |
975 | |
976 | /* TLB Hit - translate address using addend. */ | |
977 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | |
978 | tcg_out_ext32u(s, TCG_REG_TMP0, addrl); | |
979 | addrl = TCG_REG_TMP0; | |
980 | } | |
981 | tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); | |
982 | } | |
983 | ||
984 | static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, | |
985 | TCGType ext, | |
986 | TCGReg datalo, TCGReg datahi, | |
987 | TCGReg addrlo, TCGReg addrhi, | |
988 | void *raddr, tcg_insn_unit **label_ptr) | |
989 | { | |
990 | TCGLabelQemuLdst *label = new_ldst_label(s); | |
991 | ||
992 | label->is_ld = is_ld; | |
993 | label->oi = oi; | |
994 | label->type = ext; | |
995 | label->datalo_reg = datalo; | |
996 | label->datahi_reg = datahi; | |
997 | label->addrlo_reg = addrlo; | |
998 | label->addrhi_reg = addrhi; | |
793f7381 RH |
999 | /* TODO: Cast goes away when all hosts converted */ |
1000 | label->raddr = (void *)tcg_splitwx_to_rx(raddr); | |
efbea94c AF |
1001 | label->label_ptr[0] = label_ptr[0]; |
1002 | } | |
1003 | ||
aeee05f5 | 1004 | static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
efbea94c AF |
1005 | { |
1006 | TCGMemOpIdx oi = l->oi; | |
14776ab5 | 1007 | MemOp opc = get_memop(oi); |
efbea94c AF |
1008 | TCGReg a0 = tcg_target_call_iarg_regs[0]; |
1009 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | |
1010 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | |
1011 | TCGReg a3 = tcg_target_call_iarg_regs[3]; | |
1012 | ||
1013 | /* We don't support oversize guests */ | |
1014 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | |
1015 | g_assert_not_reached(); | |
1016 | } | |
1017 | ||
1018 | /* resolve label address */ | |
793f7381 | 1019 | if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { |
aeee05f5 RH |
1020 | return false; |
1021 | } | |
efbea94c AF |
1022 | |
1023 | /* call load helper */ | |
1024 | tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); | |
1025 | tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); | |
1026 | tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); | |
1027 | tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); | |
1028 | ||
1029 | tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); | |
1030 | tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); | |
1031 | ||
1032 | tcg_out_goto(s, l->raddr); | |
aeee05f5 | 1033 | return true; |
efbea94c AF |
1034 | } |
1035 | ||
aeee05f5 | 1036 | static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) |
efbea94c AF |
1037 | { |
1038 | TCGMemOpIdx oi = l->oi; | |
14776ab5 TN |
1039 | MemOp opc = get_memop(oi); |
1040 | MemOp s_bits = opc & MO_SIZE; | |
efbea94c AF |
1041 | TCGReg a0 = tcg_target_call_iarg_regs[0]; |
1042 | TCGReg a1 = tcg_target_call_iarg_regs[1]; | |
1043 | TCGReg a2 = tcg_target_call_iarg_regs[2]; | |
1044 | TCGReg a3 = tcg_target_call_iarg_regs[3]; | |
1045 | TCGReg a4 = tcg_target_call_iarg_regs[4]; | |
1046 | ||
1047 | /* We don't support oversize guests */ | |
1048 | if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { | |
1049 | g_assert_not_reached(); | |
1050 | } | |
1051 | ||
1052 | /* resolve label address */ | |
793f7381 | 1053 | if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { |
aeee05f5 RH |
1054 | return false; |
1055 | } | |
efbea94c AF |
1056 | |
1057 | /* call store helper */ | |
1058 | tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); | |
1059 | tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); | |
1060 | tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); | |
1061 | switch (s_bits) { | |
1062 | case MO_8: | |
1063 | tcg_out_ext8u(s, a2, a2); | |
1064 | break; | |
1065 | case MO_16: | |
1066 | tcg_out_ext16u(s, a2, a2); | |
1067 | break; | |
1068 | default: | |
1069 | break; | |
1070 | } | |
1071 | tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); | |
1072 | tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); | |
1073 | ||
1074 | tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); | |
1075 | ||
1076 | tcg_out_goto(s, l->raddr); | |
aeee05f5 | 1077 | return true; |
efbea94c AF |
1078 | } |
1079 | #endif /* CONFIG_SOFTMMU */ | |
03a7d021 AF |
1080 | |
1081 | static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, | |
14776ab5 | 1082 | TCGReg base, MemOp opc, bool is_64) |
03a7d021 | 1083 | { |
14776ab5 | 1084 | const MemOp bswap = opc & MO_BSWAP; |
03a7d021 AF |
1085 | |
1086 | /* We don't yet handle byteswapping, assert */ | |
1087 | g_assert(!bswap); | |
1088 | ||
1089 | switch (opc & (MO_SSIZE)) { | |
1090 | case MO_UB: | |
1091 | tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); | |
1092 | break; | |
1093 | case MO_SB: | |
1094 | tcg_out_opc_imm(s, OPC_LB, lo, base, 0); | |
1095 | break; | |
1096 | case MO_UW: | |
1097 | tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); | |
1098 | break; | |
1099 | case MO_SW: | |
1100 | tcg_out_opc_imm(s, OPC_LH, lo, base, 0); | |
1101 | break; | |
1102 | case MO_UL: | |
1103 | if (TCG_TARGET_REG_BITS == 64 && is_64) { | |
1104 | tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); | |
1105 | break; | |
1106 | } | |
1107 | /* FALLTHRU */ | |
1108 | case MO_SL: | |
1109 | tcg_out_opc_imm(s, OPC_LW, lo, base, 0); | |
1110 | break; | |
1111 | case MO_Q: | |
1112 | /* Prefer to load from offset 0 first, but allow for overlap. */ | |
1113 | if (TCG_TARGET_REG_BITS == 64) { | |
1114 | tcg_out_opc_imm(s, OPC_LD, lo, base, 0); | |
1115 | } else if (lo != base) { | |
1116 | tcg_out_opc_imm(s, OPC_LW, lo, base, 0); | |
1117 | tcg_out_opc_imm(s, OPC_LW, hi, base, 4); | |
1118 | } else { | |
1119 | tcg_out_opc_imm(s, OPC_LW, hi, base, 4); | |
1120 | tcg_out_opc_imm(s, OPC_LW, lo, base, 0); | |
1121 | } | |
1122 | break; | |
1123 | default: | |
1124 | g_assert_not_reached(); | |
1125 | } | |
1126 | } | |
1127 | ||
1128 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) | |
1129 | { | |
1130 | TCGReg addr_regl, addr_regh __attribute__((unused)); | |
1131 | TCGReg data_regl, data_regh; | |
1132 | TCGMemOpIdx oi; | |
14776ab5 | 1133 | MemOp opc; |
03a7d021 AF |
1134 | #if defined(CONFIG_SOFTMMU) |
1135 | tcg_insn_unit *label_ptr[1]; | |
1136 | #endif | |
1137 | TCGReg base = TCG_REG_TMP0; | |
1138 | ||
1139 | data_regl = *args++; | |
1140 | data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); | |
1141 | addr_regl = *args++; | |
1142 | addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | |
1143 | oi = *args++; | |
1144 | opc = get_memop(oi); | |
1145 | ||
1146 | #if defined(CONFIG_SOFTMMU) | |
1147 | tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); | |
1148 | tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | |
1149 | add_qemu_ldst_label(s, 1, oi, | |
1150 | (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | |
1151 | data_regl, data_regh, addr_regl, addr_regh, | |
1152 | s->code_ptr, label_ptr); | |
1153 | #else | |
1154 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | |
1155 | tcg_out_ext32u(s, base, addr_regl); | |
1156 | addr_regl = base; | |
1157 | } | |
1158 | ||
1159 | if (guest_base == 0) { | |
1160 | tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); | |
1161 | } else { | |
1162 | tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); | |
1163 | } | |
1164 | tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); | |
1165 | #endif | |
1166 | } | |
1167 | ||
1168 | static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, | |
14776ab5 | 1169 | TCGReg base, MemOp opc) |
03a7d021 | 1170 | { |
14776ab5 | 1171 | const MemOp bswap = opc & MO_BSWAP; |
03a7d021 AF |
1172 | |
1173 | /* We don't yet handle byteswapping, assert */ | |
1174 | g_assert(!bswap); | |
1175 | ||
1176 | switch (opc & (MO_SSIZE)) { | |
1177 | case MO_8: | |
1178 | tcg_out_opc_store(s, OPC_SB, base, lo, 0); | |
1179 | break; | |
1180 | case MO_16: | |
1181 | tcg_out_opc_store(s, OPC_SH, base, lo, 0); | |
1182 | break; | |
1183 | case MO_32: | |
1184 | tcg_out_opc_store(s, OPC_SW, base, lo, 0); | |
1185 | break; | |
1186 | case MO_64: | |
1187 | if (TCG_TARGET_REG_BITS == 64) { | |
1188 | tcg_out_opc_store(s, OPC_SD, base, lo, 0); | |
1189 | } else { | |
1190 | tcg_out_opc_store(s, OPC_SW, base, lo, 0); | |
1191 | tcg_out_opc_store(s, OPC_SW, base, hi, 4); | |
1192 | } | |
1193 | break; | |
1194 | default: | |
1195 | g_assert_not_reached(); | |
1196 | } | |
1197 | } | |
1198 | ||
1199 | static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) | |
1200 | { | |
1201 | TCGReg addr_regl, addr_regh __attribute__((unused)); | |
1202 | TCGReg data_regl, data_regh; | |
1203 | TCGMemOpIdx oi; | |
14776ab5 | 1204 | MemOp opc; |
03a7d021 AF |
1205 | #if defined(CONFIG_SOFTMMU) |
1206 | tcg_insn_unit *label_ptr[1]; | |
1207 | #endif | |
1208 | TCGReg base = TCG_REG_TMP0; | |
1209 | ||
1210 | data_regl = *args++; | |
1211 | data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); | |
1212 | addr_regl = *args++; | |
1213 | addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); | |
1214 | oi = *args++; | |
1215 | opc = get_memop(oi); | |
1216 | ||
1217 | #if defined(CONFIG_SOFTMMU) | |
1218 | tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); | |
1219 | tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | |
1220 | add_qemu_ldst_label(s, 0, oi, | |
1221 | (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), | |
1222 | data_regl, data_regh, addr_regl, addr_regh, | |
1223 | s->code_ptr, label_ptr); | |
1224 | #else | |
1225 | if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { | |
1226 | tcg_out_ext32u(s, base, addr_regl); | |
1227 | addr_regl = base; | |
1228 | } | |
1229 | ||
1230 | if (guest_base == 0) { | |
1231 | tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); | |
1232 | } else { | |
1233 | tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); | |
1234 | } | |
1235 | tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); | |
1236 | #endif | |
1237 | } | |
bdf50381 | 1238 | |
793f7381 | 1239 | static const tcg_insn_unit *tb_ret_addr; |
bdf50381 AF |
1240 | |
1241 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, | |
1242 | const TCGArg *args, const int *const_args) | |
1243 | { | |
1244 | TCGArg a0 = args[0]; | |
1245 | TCGArg a1 = args[1]; | |
1246 | TCGArg a2 = args[2]; | |
1247 | int c2 = const_args[2]; | |
1248 | ||
1249 | switch (opc) { | |
1250 | case INDEX_op_exit_tb: | |
1251 | /* Reuse the zeroing that exists for goto_ptr. */ | |
1252 | if (a0 == 0) { | |
8b5c2b62 | 1253 | tcg_out_call_int(s, tcg_code_gen_epilogue, true); |
bdf50381 AF |
1254 | } else { |
1255 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); | |
1256 | tcg_out_call_int(s, tb_ret_addr, true); | |
1257 | } | |
1258 | break; | |
1259 | ||
1260 | case INDEX_op_goto_tb: | |
1261 | assert(s->tb_jmp_insn_offset == 0); | |
1262 | /* indirect jump method */ | |
1263 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, | |
1264 | (uintptr_t)(s->tb_jmp_target_addr + a0)); | |
1265 | tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); | |
1266 | set_jmp_reset_offset(s, a0); | |
1267 | break; | |
1268 | ||
1269 | case INDEX_op_goto_ptr: | |
1270 | tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); | |
1271 | break; | |
1272 | ||
1273 | case INDEX_op_br: | |
1274 | tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); | |
1275 | tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); | |
1276 | break; | |
1277 | ||
1278 | case INDEX_op_ld8u_i32: | |
1279 | case INDEX_op_ld8u_i64: | |
1280 | tcg_out_ldst(s, OPC_LBU, a0, a1, a2); | |
1281 | break; | |
1282 | case INDEX_op_ld8s_i32: | |
1283 | case INDEX_op_ld8s_i64: | |
1284 | tcg_out_ldst(s, OPC_LB, a0, a1, a2); | |
1285 | break; | |
1286 | case INDEX_op_ld16u_i32: | |
1287 | case INDEX_op_ld16u_i64: | |
1288 | tcg_out_ldst(s, OPC_LHU, a0, a1, a2); | |
1289 | break; | |
1290 | case INDEX_op_ld16s_i32: | |
1291 | case INDEX_op_ld16s_i64: | |
1292 | tcg_out_ldst(s, OPC_LH, a0, a1, a2); | |
1293 | break; | |
1294 | case INDEX_op_ld32u_i64: | |
1295 | tcg_out_ldst(s, OPC_LWU, a0, a1, a2); | |
1296 | break; | |
1297 | case INDEX_op_ld_i32: | |
1298 | case INDEX_op_ld32s_i64: | |
1299 | tcg_out_ldst(s, OPC_LW, a0, a1, a2); | |
1300 | break; | |
1301 | case INDEX_op_ld_i64: | |
1302 | tcg_out_ldst(s, OPC_LD, a0, a1, a2); | |
1303 | break; | |
1304 | ||
1305 | case INDEX_op_st8_i32: | |
1306 | case INDEX_op_st8_i64: | |
1307 | tcg_out_ldst(s, OPC_SB, a0, a1, a2); | |
1308 | break; | |
1309 | case INDEX_op_st16_i32: | |
1310 | case INDEX_op_st16_i64: | |
1311 | tcg_out_ldst(s, OPC_SH, a0, a1, a2); | |
1312 | break; | |
1313 | case INDEX_op_st_i32: | |
1314 | case INDEX_op_st32_i64: | |
1315 | tcg_out_ldst(s, OPC_SW, a0, a1, a2); | |
1316 | break; | |
1317 | case INDEX_op_st_i64: | |
1318 | tcg_out_ldst(s, OPC_SD, a0, a1, a2); | |
1319 | break; | |
1320 | ||
1321 | case INDEX_op_add_i32: | |
1322 | if (c2) { | |
1323 | tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); | |
1324 | } else { | |
1325 | tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); | |
1326 | } | |
1327 | break; | |
1328 | case INDEX_op_add_i64: | |
1329 | if (c2) { | |
1330 | tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); | |
1331 | } else { | |
1332 | tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); | |
1333 | } | |
1334 | break; | |
1335 | ||
1336 | case INDEX_op_sub_i32: | |
1337 | if (c2) { | |
1338 | tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); | |
1339 | } else { | |
1340 | tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); | |
1341 | } | |
1342 | break; | |
1343 | case INDEX_op_sub_i64: | |
1344 | if (c2) { | |
1345 | tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); | |
1346 | } else { | |
1347 | tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); | |
1348 | } | |
1349 | break; | |
1350 | ||
1351 | case INDEX_op_and_i32: | |
1352 | case INDEX_op_and_i64: | |
1353 | if (c2) { | |
1354 | tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); | |
1355 | } else { | |
1356 | tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); | |
1357 | } | |
1358 | break; | |
1359 | ||
1360 | case INDEX_op_or_i32: | |
1361 | case INDEX_op_or_i64: | |
1362 | if (c2) { | |
1363 | tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); | |
1364 | } else { | |
1365 | tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); | |
1366 | } | |
1367 | break; | |
1368 | ||
1369 | case INDEX_op_xor_i32: | |
1370 | case INDEX_op_xor_i64: | |
1371 | if (c2) { | |
1372 | tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); | |
1373 | } else { | |
1374 | tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); | |
1375 | } | |
1376 | break; | |
1377 | ||
1378 | case INDEX_op_not_i32: | |
1379 | case INDEX_op_not_i64: | |
1380 | tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); | |
1381 | break; | |
1382 | ||
1383 | case INDEX_op_neg_i32: | |
1384 | tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); | |
1385 | break; | |
1386 | case INDEX_op_neg_i64: | |
1387 | tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); | |
1388 | break; | |
1389 | ||
1390 | case INDEX_op_mul_i32: | |
1391 | tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); | |
1392 | break; | |
1393 | case INDEX_op_mul_i64: | |
1394 | tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); | |
1395 | break; | |
1396 | ||
1397 | case INDEX_op_div_i32: | |
1398 | tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); | |
1399 | break; | |
1400 | case INDEX_op_div_i64: | |
1401 | tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); | |
1402 | break; | |
1403 | ||
1404 | case INDEX_op_divu_i32: | |
1405 | tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); | |
1406 | break; | |
1407 | case INDEX_op_divu_i64: | |
1408 | tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); | |
1409 | break; | |
1410 | ||
1411 | case INDEX_op_rem_i32: | |
1412 | tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); | |
1413 | break; | |
1414 | case INDEX_op_rem_i64: | |
1415 | tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); | |
1416 | break; | |
1417 | ||
1418 | case INDEX_op_remu_i32: | |
1419 | tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); | |
1420 | break; | |
1421 | case INDEX_op_remu_i64: | |
1422 | tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); | |
1423 | break; | |
1424 | ||
1425 | case INDEX_op_shl_i32: | |
1426 | if (c2) { | |
d2f3066e | 1427 | tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2 & 0x1f); |
bdf50381 AF |
1428 | } else { |
1429 | tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); | |
1430 | } | |
1431 | break; | |
1432 | case INDEX_op_shl_i64: | |
1433 | if (c2) { | |
d2f3066e | 1434 | tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2 & 0x3f); |
bdf50381 AF |
1435 | } else { |
1436 | tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); | |
1437 | } | |
1438 | break; | |
1439 | ||
1440 | case INDEX_op_shr_i32: | |
1441 | if (c2) { | |
d2f3066e | 1442 | tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2 & 0x1f); |
bdf50381 AF |
1443 | } else { |
1444 | tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); | |
1445 | } | |
1446 | break; | |
1447 | case INDEX_op_shr_i64: | |
1448 | if (c2) { | |
d2f3066e | 1449 | tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2 & 0x3f); |
bdf50381 AF |
1450 | } else { |
1451 | tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); | |
1452 | } | |
1453 | break; | |
1454 | ||
1455 | case INDEX_op_sar_i32: | |
1456 | if (c2) { | |
d2f3066e | 1457 | tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2 & 0x1f); |
bdf50381 AF |
1458 | } else { |
1459 | tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); | |
1460 | } | |
1461 | break; | |
1462 | case INDEX_op_sar_i64: | |
1463 | if (c2) { | |
d2f3066e | 1464 | tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2 & 0x3f); |
bdf50381 AF |
1465 | } else { |
1466 | tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); | |
1467 | } | |
1468 | break; | |
1469 | ||
1470 | case INDEX_op_add2_i32: | |
1471 | tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], | |
1472 | const_args[4], const_args[5], false, true); | |
1473 | break; | |
1474 | case INDEX_op_add2_i64: | |
1475 | tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], | |
1476 | const_args[4], const_args[5], false, false); | |
1477 | break; | |
1478 | case INDEX_op_sub2_i32: | |
1479 | tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], | |
1480 | const_args[4], const_args[5], true, true); | |
1481 | break; | |
1482 | case INDEX_op_sub2_i64: | |
1483 | tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], | |
1484 | const_args[4], const_args[5], true, false); | |
1485 | break; | |
1486 | ||
1487 | case INDEX_op_brcond_i32: | |
1488 | case INDEX_op_brcond_i64: | |
1489 | tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); | |
1490 | break; | |
1491 | case INDEX_op_brcond2_i32: | |
1492 | tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); | |
1493 | break; | |
1494 | ||
1495 | case INDEX_op_setcond_i32: | |
1496 | case INDEX_op_setcond_i64: | |
1497 | tcg_out_setcond(s, args[3], a0, a1, a2); | |
1498 | break; | |
1499 | case INDEX_op_setcond2_i32: | |
1500 | tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); | |
1501 | break; | |
1502 | ||
1503 | case INDEX_op_qemu_ld_i32: | |
1504 | tcg_out_qemu_ld(s, args, false); | |
1505 | break; | |
1506 | case INDEX_op_qemu_ld_i64: | |
1507 | tcg_out_qemu_ld(s, args, true); | |
1508 | break; | |
1509 | case INDEX_op_qemu_st_i32: | |
1510 | tcg_out_qemu_st(s, args, false); | |
1511 | break; | |
1512 | case INDEX_op_qemu_st_i64: | |
1513 | tcg_out_qemu_st(s, args, true); | |
1514 | break; | |
1515 | ||
1516 | case INDEX_op_ext8u_i32: | |
1517 | case INDEX_op_ext8u_i64: | |
1518 | tcg_out_ext8u(s, a0, a1); | |
1519 | break; | |
1520 | ||
1521 | case INDEX_op_ext16u_i32: | |
1522 | case INDEX_op_ext16u_i64: | |
1523 | tcg_out_ext16u(s, a0, a1); | |
1524 | break; | |
1525 | ||
1526 | case INDEX_op_ext32u_i64: | |
1527 | case INDEX_op_extu_i32_i64: | |
1528 | tcg_out_ext32u(s, a0, a1); | |
1529 | break; | |
1530 | ||
1531 | case INDEX_op_ext8s_i32: | |
1532 | case INDEX_op_ext8s_i64: | |
1533 | tcg_out_ext8s(s, a0, a1); | |
1534 | break; | |
1535 | ||
1536 | case INDEX_op_ext16s_i32: | |
1537 | case INDEX_op_ext16s_i64: | |
1538 | tcg_out_ext16s(s, a0, a1); | |
1539 | break; | |
1540 | ||
1541 | case INDEX_op_ext32s_i64: | |
1542 | case INDEX_op_extrl_i64_i32: | |
1543 | case INDEX_op_ext_i32_i64: | |
1544 | tcg_out_ext32s(s, a0, a1); | |
1545 | break; | |
1546 | ||
1547 | case INDEX_op_extrh_i64_i32: | |
1548 | tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); | |
1549 | break; | |
1550 | ||
1551 | case INDEX_op_mulsh_i32: | |
1552 | case INDEX_op_mulsh_i64: | |
1553 | tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); | |
1554 | break; | |
1555 | ||
1556 | case INDEX_op_muluh_i32: | |
1557 | case INDEX_op_muluh_i64: | |
1558 | tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); | |
1559 | break; | |
1560 | ||
1561 | case INDEX_op_mb: | |
1562 | tcg_out_mb(s, a0); | |
1563 | break; | |
1564 | ||
1565 | case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ | |
1566 | case INDEX_op_mov_i64: | |
1567 | case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ | |
1568 | case INDEX_op_movi_i64: | |
1569 | case INDEX_op_call: /* Always emitted via tcg_out_call. */ | |
1570 | default: | |
1571 | g_assert_not_reached(); | |
1572 | } | |
1573 | } | |
1574 | ||
1575 | static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) | |
1576 | { | |
1577 | static const TCGTargetOpDef r | |
1578 | = { .args_ct_str = { "r" } }; | |
1579 | static const TCGTargetOpDef r_r | |
1580 | = { .args_ct_str = { "r", "r" } }; | |
1581 | static const TCGTargetOpDef rZ_r | |
1582 | = { .args_ct_str = { "rZ", "r" } }; | |
1583 | static const TCGTargetOpDef rZ_rZ | |
1584 | = { .args_ct_str = { "rZ", "rZ" } }; | |
1585 | static const TCGTargetOpDef rZ_rZ_rZ_rZ | |
1586 | = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; | |
1587 | static const TCGTargetOpDef r_r_ri | |
1588 | = { .args_ct_str = { "r", "r", "ri" } }; | |
1589 | static const TCGTargetOpDef r_r_rI | |
1590 | = { .args_ct_str = { "r", "r", "rI" } }; | |
1591 | static const TCGTargetOpDef r_rZ_rN | |
1592 | = { .args_ct_str = { "r", "rZ", "rN" } }; | |
1593 | static const TCGTargetOpDef r_rZ_rZ | |
1594 | = { .args_ct_str = { "r", "rZ", "rZ" } }; | |
1595 | static const TCGTargetOpDef r_rZ_rZ_rZ_rZ | |
1596 | = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; | |
1597 | static const TCGTargetOpDef r_L | |
1598 | = { .args_ct_str = { "r", "L" } }; | |
1599 | static const TCGTargetOpDef r_r_L | |
1600 | = { .args_ct_str = { "r", "r", "L" } }; | |
1601 | static const TCGTargetOpDef r_L_L | |
1602 | = { .args_ct_str = { "r", "L", "L" } }; | |
1603 | static const TCGTargetOpDef r_r_L_L | |
1604 | = { .args_ct_str = { "r", "r", "L", "L" } }; | |
1605 | static const TCGTargetOpDef LZ_L | |
1606 | = { .args_ct_str = { "LZ", "L" } }; | |
1607 | static const TCGTargetOpDef LZ_L_L | |
1608 | = { .args_ct_str = { "LZ", "L", "L" } }; | |
1609 | static const TCGTargetOpDef LZ_LZ_L | |
1610 | = { .args_ct_str = { "LZ", "LZ", "L" } }; | |
1611 | static const TCGTargetOpDef LZ_LZ_L_L | |
1612 | = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; | |
1613 | static const TCGTargetOpDef r_r_rZ_rZ_rM_rM | |
1614 | = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; | |
1615 | ||
1616 | switch (op) { | |
1617 | case INDEX_op_goto_ptr: | |
1618 | return &r; | |
1619 | ||
1620 | case INDEX_op_ld8u_i32: | |
1621 | case INDEX_op_ld8s_i32: | |
1622 | case INDEX_op_ld16u_i32: | |
1623 | case INDEX_op_ld16s_i32: | |
1624 | case INDEX_op_ld_i32: | |
1625 | case INDEX_op_not_i32: | |
1626 | case INDEX_op_neg_i32: | |
1627 | case INDEX_op_ld8u_i64: | |
1628 | case INDEX_op_ld8s_i64: | |
1629 | case INDEX_op_ld16u_i64: | |
1630 | case INDEX_op_ld16s_i64: | |
1631 | case INDEX_op_ld32s_i64: | |
1632 | case INDEX_op_ld32u_i64: | |
1633 | case INDEX_op_ld_i64: | |
1634 | case INDEX_op_not_i64: | |
1635 | case INDEX_op_neg_i64: | |
1636 | case INDEX_op_ext8u_i32: | |
1637 | case INDEX_op_ext8u_i64: | |
1638 | case INDEX_op_ext16u_i32: | |
1639 | case INDEX_op_ext16u_i64: | |
1640 | case INDEX_op_ext32u_i64: | |
1641 | case INDEX_op_extu_i32_i64: | |
1642 | case INDEX_op_ext8s_i32: | |
1643 | case INDEX_op_ext8s_i64: | |
1644 | case INDEX_op_ext16s_i32: | |
1645 | case INDEX_op_ext16s_i64: | |
1646 | case INDEX_op_ext32s_i64: | |
1647 | case INDEX_op_extrl_i64_i32: | |
1648 | case INDEX_op_extrh_i64_i32: | |
1649 | case INDEX_op_ext_i32_i64: | |
1650 | return &r_r; | |
1651 | ||
1652 | case INDEX_op_st8_i32: | |
1653 | case INDEX_op_st16_i32: | |
1654 | case INDEX_op_st_i32: | |
1655 | case INDEX_op_st8_i64: | |
1656 | case INDEX_op_st16_i64: | |
1657 | case INDEX_op_st32_i64: | |
1658 | case INDEX_op_st_i64: | |
1659 | return &rZ_r; | |
1660 | ||
1661 | case INDEX_op_add_i32: | |
1662 | case INDEX_op_and_i32: | |
1663 | case INDEX_op_or_i32: | |
1664 | case INDEX_op_xor_i32: | |
1665 | case INDEX_op_add_i64: | |
1666 | case INDEX_op_and_i64: | |
1667 | case INDEX_op_or_i64: | |
1668 | case INDEX_op_xor_i64: | |
1669 | return &r_r_rI; | |
1670 | ||
1671 | case INDEX_op_sub_i32: | |
1672 | case INDEX_op_sub_i64: | |
1673 | return &r_rZ_rN; | |
1674 | ||
1675 | case INDEX_op_mul_i32: | |
1676 | case INDEX_op_mulsh_i32: | |
1677 | case INDEX_op_muluh_i32: | |
1678 | case INDEX_op_div_i32: | |
1679 | case INDEX_op_divu_i32: | |
1680 | case INDEX_op_rem_i32: | |
1681 | case INDEX_op_remu_i32: | |
1682 | case INDEX_op_setcond_i32: | |
1683 | case INDEX_op_mul_i64: | |
1684 | case INDEX_op_mulsh_i64: | |
1685 | case INDEX_op_muluh_i64: | |
1686 | case INDEX_op_div_i64: | |
1687 | case INDEX_op_divu_i64: | |
1688 | case INDEX_op_rem_i64: | |
1689 | case INDEX_op_remu_i64: | |
1690 | case INDEX_op_setcond_i64: | |
1691 | return &r_rZ_rZ; | |
1692 | ||
1693 | case INDEX_op_shl_i32: | |
1694 | case INDEX_op_shr_i32: | |
1695 | case INDEX_op_sar_i32: | |
1696 | case INDEX_op_shl_i64: | |
1697 | case INDEX_op_shr_i64: | |
1698 | case INDEX_op_sar_i64: | |
1699 | return &r_r_ri; | |
1700 | ||
1701 | case INDEX_op_brcond_i32: | |
1702 | case INDEX_op_brcond_i64: | |
1703 | return &rZ_rZ; | |
1704 | ||
1705 | case INDEX_op_add2_i32: | |
1706 | case INDEX_op_add2_i64: | |
1707 | case INDEX_op_sub2_i32: | |
1708 | case INDEX_op_sub2_i64: | |
1709 | return &r_r_rZ_rZ_rM_rM; | |
1710 | ||
1711 | case INDEX_op_brcond2_i32: | |
1712 | return &rZ_rZ_rZ_rZ; | |
1713 | ||
1714 | case INDEX_op_setcond2_i32: | |
1715 | return &r_rZ_rZ_rZ_rZ; | |
1716 | ||
1717 | case INDEX_op_qemu_ld_i32: | |
1718 | return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; | |
1719 | case INDEX_op_qemu_st_i32: | |
1720 | return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; | |
1721 | case INDEX_op_qemu_ld_i64: | |
1722 | return TCG_TARGET_REG_BITS == 64 ? &r_L | |
1723 | : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L | |
1724 | : &r_r_L_L; | |
1725 | case INDEX_op_qemu_st_i64: | |
1726 | return TCG_TARGET_REG_BITS == 64 ? &LZ_L | |
1727 | : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L | |
1728 | : &LZ_LZ_L_L; | |
1729 | ||
1730 | default: | |
1731 | return NULL; | |
1732 | } | |
1733 | } | |
92c041c5 AF |
1734 | |
1735 | static const int tcg_target_callee_save_regs[] = { | |
1736 | TCG_REG_S0, /* used for the global env (TCG_AREG0) */ | |
1737 | TCG_REG_S1, | |
1738 | TCG_REG_S2, | |
1739 | TCG_REG_S3, | |
1740 | TCG_REG_S4, | |
1741 | TCG_REG_S5, | |
1742 | TCG_REG_S6, | |
1743 | TCG_REG_S7, | |
1744 | TCG_REG_S8, | |
1745 | TCG_REG_S9, | |
1746 | TCG_REG_S10, | |
1747 | TCG_REG_S11, | |
1748 | TCG_REG_RA, /* should be last for ABI compliance */ | |
1749 | }; | |
1750 | ||
1751 | /* Stack frame parameters. */ | |
1752 | #define REG_SIZE (TCG_TARGET_REG_BITS / 8) | |
1753 | #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) | |
1754 | #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) | |
1755 | #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ | |
1756 | + TCG_TARGET_STACK_ALIGN - 1) \ | |
1757 | & -TCG_TARGET_STACK_ALIGN) | |
1758 | #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) | |
1759 | ||
1760 | /* We're expecting to be able to use an immediate for frame allocation. */ | |
1761 | QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); | |
1762 | ||
1763 | /* Generate global QEMU prologue and epilogue code */ | |
1764 | static void tcg_target_qemu_prologue(TCGContext *s) | |
1765 | { | |
1766 | int i; | |
1767 | ||
1768 | tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); | |
1769 | ||
1770 | /* TB prologue */ | |
1771 | tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); | |
1772 | for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { | |
1773 | tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], | |
1774 | TCG_REG_SP, SAVE_OFS + i * REG_SIZE); | |
1775 | } | |
1776 | ||
1777 | #if !defined(CONFIG_SOFTMMU) | |
1778 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); | |
1779 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); | |
1780 | #endif | |
1781 | ||
1782 | /* Call generated code */ | |
1783 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | |
1784 | tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); | |
1785 | ||
1786 | /* Return path for goto_ptr. Set return value to 0 */ | |
793f7381 RH |
1787 | /* TODO: Cast goes away when all hosts converted */ |
1788 | tcg_code_gen_epilogue = (void *)tcg_splitwx_to_rx(s->code_ptr); | |
92c041c5 AF |
1789 | tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); |
1790 | ||
1791 | /* TB epilogue */ | |
793f7381 | 1792 | tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); |
92c041c5 AF |
1793 | for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { |
1794 | tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], | |
1795 | TCG_REG_SP, SAVE_OFS + i * REG_SIZE); | |
1796 | } | |
1797 | ||
1798 | tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); | |
1799 | tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); | |
1800 | } | |
1801 | ||
7a5549f2 AF |
1802 | static void tcg_target_init(TCGContext *s) |
1803 | { | |
1804 | tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; | |
1805 | if (TCG_TARGET_REG_BITS == 64) { | |
1806 | tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; | |
1807 | } | |
1808 | ||
1809 | tcg_target_call_clobber_regs = -1u; | |
1810 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); | |
1811 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); | |
1812 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); | |
1813 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); | |
1814 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); | |
1815 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); | |
1816 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); | |
1817 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); | |
1818 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); | |
1819 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); | |
1820 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); | |
1821 | tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); | |
1822 | ||
1823 | s->reserved_regs = 0; | |
1824 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); | |
1825 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); | |
1826 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); | |
1827 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); | |
1828 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); | |
1829 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); | |
1830 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); | |
1831 | } | |
1832 | ||
92c041c5 AF |
1833 | typedef struct { |
1834 | DebugFrameHeader h; | |
1835 | uint8_t fde_def_cfa[4]; | |
1836 | uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; | |
1837 | } DebugFrame; | |
1838 | ||
1839 | #define ELF_HOST_MACHINE EM_RISCV | |
1840 | ||
1841 | static const DebugFrame debug_frame = { | |
1842 | .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ | |
1843 | .h.cie.id = -1, | |
1844 | .h.cie.version = 1, | |
1845 | .h.cie.code_align = 1, | |
1846 | .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ | |
1847 | .h.cie.return_column = TCG_REG_RA, | |
1848 | ||
1849 | /* Total FDE size does not include the "len" member. */ | |
1850 | .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), | |
1851 | ||
1852 | .fde_def_cfa = { | |
1853 | 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ | |
1854 | (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ | |
1855 | (FRAME_SIZE >> 7) | |
1856 | }, | |
1857 | .fde_reg_ofs = { | |
1858 | 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ | |
1859 | 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ | |
1860 | 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ | |
1861 | 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ | |
1862 | 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ | |
1863 | 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ | |
1864 | 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ | |
1865 | 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ | |
1866 | 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ | |
1867 | 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ | |
1868 | 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ | |
1869 | 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ | |
1870 | } | |
1871 | }; | |
1872 | ||
755bf9e5 | 1873 | void tcg_register_jit(const void *buf, size_t buf_size) |
92c041c5 AF |
1874 | { |
1875 | tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); | |
1876 | } |