]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/tci/tcg-target.c.inc
tcg: Adjust simd_desc size encoding
[mirror_qemu.git] / tcg / tci / tcg-target.c.inc
CommitLineData
7316329a
SW
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* TODO list:
26 * - See TODO comments in code.
27 */
28
29/* Marker for missing code. */
30#define TODO() \
31 do { \
32 fprintf(stderr, "TODO %s:%u: %s()\n", \
33 __FILE__, __LINE__, __func__); \
34 tcg_abort(); \
35 } while (0)
36
7316329a
SW
37/* Bitfield n...m (in 32 bit value). */
38#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
39
7316329a
SW
40/* Macros used in tcg_target_op_defs. */
41#define R "r"
42#define RI "ri"
43#if TCG_TARGET_REG_BITS == 32
44# define R64 "r", "r"
45#else
46# define R64 "r"
47#endif
48#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
49# define L "L", "L"
50# define S "S", "S"
51#else
52# define L "L"
53# define S "S"
54#endif
55
56/* TODO: documentation. */
57static const TCGTargetOpDef tcg_target_op_defs[] = {
58 { INDEX_op_exit_tb, { NULL } },
59 { INDEX_op_goto_tb, { NULL } },
7316329a
SW
60 { INDEX_op_br, { NULL } },
61
7316329a
SW
62 { INDEX_op_ld8u_i32, { R, R } },
63 { INDEX_op_ld8s_i32, { R, R } },
64 { INDEX_op_ld16u_i32, { R, R } },
65 { INDEX_op_ld16s_i32, { R, R } },
66 { INDEX_op_ld_i32, { R, R } },
67 { INDEX_op_st8_i32, { R, R } },
68 { INDEX_op_st16_i32, { R, R } },
69 { INDEX_op_st_i32, { R, R } },
70
71 { INDEX_op_add_i32, { R, RI, RI } },
72 { INDEX_op_sub_i32, { R, RI, RI } },
73 { INDEX_op_mul_i32, { R, RI, RI } },
74#if TCG_TARGET_HAS_div_i32
75 { INDEX_op_div_i32, { R, R, R } },
76 { INDEX_op_divu_i32, { R, R, R } },
77 { INDEX_op_rem_i32, { R, R, R } },
78 { INDEX_op_remu_i32, { R, R, R } },
79#elif TCG_TARGET_HAS_div2_i32
80 { INDEX_op_div2_i32, { R, R, "0", "1", R } },
81 { INDEX_op_divu2_i32, { R, R, "0", "1", R } },
82#endif
83 /* TODO: Does R, RI, RI result in faster code than R, R, RI?
84 If both operands are constants, we can optimize. */
85 { INDEX_op_and_i32, { R, RI, RI } },
86#if TCG_TARGET_HAS_andc_i32
87 { INDEX_op_andc_i32, { R, RI, RI } },
88#endif
89#if TCG_TARGET_HAS_eqv_i32
90 { INDEX_op_eqv_i32, { R, RI, RI } },
91#endif
92#if TCG_TARGET_HAS_nand_i32
93 { INDEX_op_nand_i32, { R, RI, RI } },
94#endif
95#if TCG_TARGET_HAS_nor_i32
96 { INDEX_op_nor_i32, { R, RI, RI } },
97#endif
98 { INDEX_op_or_i32, { R, RI, RI } },
99#if TCG_TARGET_HAS_orc_i32
100 { INDEX_op_orc_i32, { R, RI, RI } },
101#endif
102 { INDEX_op_xor_i32, { R, RI, RI } },
103 { INDEX_op_shl_i32, { R, RI, RI } },
104 { INDEX_op_shr_i32, { R, RI, RI } },
105 { INDEX_op_sar_i32, { R, RI, RI } },
106#if TCG_TARGET_HAS_rot_i32
107 { INDEX_op_rotl_i32, { R, RI, RI } },
108 { INDEX_op_rotr_i32, { R, RI, RI } },
109#endif
e24dc9fe
SW
110#if TCG_TARGET_HAS_deposit_i32
111 { INDEX_op_deposit_i32, { R, "0", R } },
112#endif
7316329a
SW
113
114 { INDEX_op_brcond_i32, { R, RI } },
115
116 { INDEX_op_setcond_i32, { R, R, RI } },
117#if TCG_TARGET_REG_BITS == 64
118 { INDEX_op_setcond_i64, { R, R, RI } },
119#endif /* TCG_TARGET_REG_BITS == 64 */
120
121#if TCG_TARGET_REG_BITS == 32
122 /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
123 { INDEX_op_add2_i32, { R, R, R, R, R, R } },
124 { INDEX_op_sub2_i32, { R, R, R, R, R, R } },
125 { INDEX_op_brcond2_i32, { R, R, RI, RI } },
126 { INDEX_op_mulu2_i32, { R, R, R, R } },
127 { INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
128#endif
129
130#if TCG_TARGET_HAS_not_i32
131 { INDEX_op_not_i32, { R, R } },
132#endif
133#if TCG_TARGET_HAS_neg_i32
134 { INDEX_op_neg_i32, { R, R } },
135#endif
136
137#if TCG_TARGET_REG_BITS == 64
7316329a
SW
138 { INDEX_op_ld8u_i64, { R, R } },
139 { INDEX_op_ld8s_i64, { R, R } },
140 { INDEX_op_ld16u_i64, { R, R } },
141 { INDEX_op_ld16s_i64, { R, R } },
142 { INDEX_op_ld32u_i64, { R, R } },
143 { INDEX_op_ld32s_i64, { R, R } },
144 { INDEX_op_ld_i64, { R, R } },
145
146 { INDEX_op_st8_i64, { R, R } },
147 { INDEX_op_st16_i64, { R, R } },
148 { INDEX_op_st32_i64, { R, R } },
149 { INDEX_op_st_i64, { R, R } },
150
151 { INDEX_op_add_i64, { R, RI, RI } },
152 { INDEX_op_sub_i64, { R, RI, RI } },
153 { INDEX_op_mul_i64, { R, RI, RI } },
154#if TCG_TARGET_HAS_div_i64
155 { INDEX_op_div_i64, { R, R, R } },
156 { INDEX_op_divu_i64, { R, R, R } },
157 { INDEX_op_rem_i64, { R, R, R } },
158 { INDEX_op_remu_i64, { R, R, R } },
159#elif TCG_TARGET_HAS_div2_i64
160 { INDEX_op_div2_i64, { R, R, "0", "1", R } },
161 { INDEX_op_divu2_i64, { R, R, "0", "1", R } },
162#endif
163 { INDEX_op_and_i64, { R, RI, RI } },
164#if TCG_TARGET_HAS_andc_i64
165 { INDEX_op_andc_i64, { R, RI, RI } },
166#endif
167#if TCG_TARGET_HAS_eqv_i64
168 { INDEX_op_eqv_i64, { R, RI, RI } },
169#endif
170#if TCG_TARGET_HAS_nand_i64
171 { INDEX_op_nand_i64, { R, RI, RI } },
172#endif
173#if TCG_TARGET_HAS_nor_i64
174 { INDEX_op_nor_i64, { R, RI, RI } },
175#endif
176 { INDEX_op_or_i64, { R, RI, RI } },
177#if TCG_TARGET_HAS_orc_i64
178 { INDEX_op_orc_i64, { R, RI, RI } },
179#endif
180 { INDEX_op_xor_i64, { R, RI, RI } },
181 { INDEX_op_shl_i64, { R, RI, RI } },
182 { INDEX_op_shr_i64, { R, RI, RI } },
183 { INDEX_op_sar_i64, { R, RI, RI } },
184#if TCG_TARGET_HAS_rot_i64
185 { INDEX_op_rotl_i64, { R, RI, RI } },
186 { INDEX_op_rotr_i64, { R, RI, RI } },
e24dc9fe
SW
187#endif
188#if TCG_TARGET_HAS_deposit_i64
189 { INDEX_op_deposit_i64, { R, "0", R } },
7316329a
SW
190#endif
191 { INDEX_op_brcond_i64, { R, RI } },
192
193#if TCG_TARGET_HAS_ext8s_i64
194 { INDEX_op_ext8s_i64, { R, R } },
195#endif
196#if TCG_TARGET_HAS_ext16s_i64
197 { INDEX_op_ext16s_i64, { R, R } },
198#endif
199#if TCG_TARGET_HAS_ext32s_i64
200 { INDEX_op_ext32s_i64, { R, R } },
201#endif
202#if TCG_TARGET_HAS_ext8u_i64
203 { INDEX_op_ext8u_i64, { R, R } },
204#endif
205#if TCG_TARGET_HAS_ext16u_i64
206 { INDEX_op_ext16u_i64, { R, R } },
207#endif
208#if TCG_TARGET_HAS_ext32u_i64
209 { INDEX_op_ext32u_i64, { R, R } },
210#endif
4f2331e5
AJ
211 { INDEX_op_ext_i32_i64, { R, R } },
212 { INDEX_op_extu_i32_i64, { R, R } },
7316329a
SW
213#if TCG_TARGET_HAS_bswap16_i64
214 { INDEX_op_bswap16_i64, { R, R } },
215#endif
216#if TCG_TARGET_HAS_bswap32_i64
217 { INDEX_op_bswap32_i64, { R, R } },
218#endif
219#if TCG_TARGET_HAS_bswap64_i64
220 { INDEX_op_bswap64_i64, { R, R } },
221#endif
222#if TCG_TARGET_HAS_not_i64
223 { INDEX_op_not_i64, { R, R } },
224#endif
225#if TCG_TARGET_HAS_neg_i64
226 { INDEX_op_neg_i64, { R, R } },
227#endif
228#endif /* TCG_TARGET_REG_BITS == 64 */
229
76782fab
RH
230 { INDEX_op_qemu_ld_i32, { R, L } },
231 { INDEX_op_qemu_ld_i64, { R64, L } },
7316329a 232
76782fab
RH
233 { INDEX_op_qemu_st_i32, { R, S } },
234 { INDEX_op_qemu_st_i64, { R64, S } },
7316329a
SW
235
236#if TCG_TARGET_HAS_ext8s_i32
237 { INDEX_op_ext8s_i32, { R, R } },
238#endif
239#if TCG_TARGET_HAS_ext16s_i32
240 { INDEX_op_ext16s_i32, { R, R } },
241#endif
242#if TCG_TARGET_HAS_ext8u_i32
243 { INDEX_op_ext8u_i32, { R, R } },
244#endif
245#if TCG_TARGET_HAS_ext16u_i32
246 { INDEX_op_ext16u_i32, { R, R } },
247#endif
248
249#if TCG_TARGET_HAS_bswap16_i32
250 { INDEX_op_bswap16_i32, { R, R } },
251#endif
252#if TCG_TARGET_HAS_bswap32_i32
253 { INDEX_op_bswap32_i32, { R, R } },
254#endif
255
a1e69e2f 256 { INDEX_op_mb, { } },
7316329a
SW
257 { -1 },
258};
259
f69d277e
RH
260static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
261{
262 int i, n = ARRAY_SIZE(tcg_target_op_defs);
263
264 for (i = 0; i < n; ++i) {
265 if (tcg_target_op_defs[i].op == op) {
266 return &tcg_target_op_defs[i];
267 }
268 }
269 return NULL;
270}
271
7316329a
SW
272static const int tcg_target_reg_alloc_order[] = {
273 TCG_REG_R0,
274 TCG_REG_R1,
275 TCG_REG_R2,
276 TCG_REG_R3,
277#if 0 /* used for TCG_REG_CALL_STACK */
278 TCG_REG_R4,
279#endif
280 TCG_REG_R5,
281 TCG_REG_R6,
282 TCG_REG_R7,
283#if TCG_TARGET_NB_REGS >= 16
284 TCG_REG_R8,
285 TCG_REG_R9,
286 TCG_REG_R10,
287 TCG_REG_R11,
288 TCG_REG_R12,
289 TCG_REG_R13,
290 TCG_REG_R14,
291 TCG_REG_R15,
292#endif
293};
294
1df3caa9 295#if MAX_OPC_PARAM_IARGS != 6
7316329a
SW
296# error Fix needed, number of supported input arguments changed!
297#endif
298
299static const int tcg_target_call_iarg_regs[] = {
300 TCG_REG_R0,
301 TCG_REG_R1,
302 TCG_REG_R2,
303 TCG_REG_R3,
7316329a
SW
304#if 0 /* used for TCG_REG_CALL_STACK */
305 TCG_REG_R4,
306#endif
307 TCG_REG_R5,
1df3caa9 308 TCG_REG_R6,
6673f47d
SW
309#if TCG_TARGET_REG_BITS == 32
310 /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
7316329a
SW
311 TCG_REG_R7,
312#if TCG_TARGET_NB_REGS >= 16
313 TCG_REG_R8,
6673f47d
SW
314 TCG_REG_R9,
315 TCG_REG_R10,
1df3caa9
RH
316 TCG_REG_R11,
317 TCG_REG_R12,
7316329a
SW
318#else
319# error Too few input registers available
320#endif
321#endif
322};
323
324static const int tcg_target_call_oarg_regs[] = {
325 TCG_REG_R0,
326#if TCG_TARGET_REG_BITS == 32
327 TCG_REG_R1
328#endif
329};
330
8d8fdbae 331#ifdef CONFIG_DEBUG_TCG
7316329a
SW
332static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
333 "r00",
334 "r01",
335 "r02",
336 "r03",
337 "r04",
338 "r05",
339 "r06",
340 "r07",
341#if TCG_TARGET_NB_REGS >= 16
342 "r08",
343 "r09",
344 "r10",
345 "r11",
346 "r12",
347 "r13",
348 "r14",
349 "r15",
350#if TCG_TARGET_NB_REGS >= 32
351 "r16",
352 "r17",
353 "r18",
354 "r19",
355 "r20",
356 "r21",
357 "r22",
358 "r23",
359 "r24",
360 "r25",
361 "r26",
362 "r27",
363 "r28",
364 "r29",
365 "r30",
366 "r31"
367#endif
368#endif
369};
370#endif
371
6ac17786 372static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 373 intptr_t value, intptr_t addend)
7316329a
SW
374{
375 /* tcg_out_reloc always uses the same type, addend. */
eabb7b91
AJ
376 tcg_debug_assert(type == sizeof(tcg_target_long));
377 tcg_debug_assert(addend == 0);
378 tcg_debug_assert(value != 0);
a7f96f76
RH
379 if (TCG_TARGET_REG_BITS == 32) {
380 tcg_patch32(code_ptr, value);
381 } else {
382 tcg_patch64(code_ptr, value);
383 }
6ac17786 384 return true;
7316329a
SW
385}
386
387/* Parse target specific constraints. */
069ea736
RH
388static const char *target_parse_constraint(TCGArgConstraint *ct,
389 const char *ct_str, TCGType type)
7316329a 390{
069ea736 391 switch (*ct_str++) {
7316329a
SW
392 case 'r':
393 case 'L': /* qemu_ld constraint */
394 case 'S': /* qemu_st constraint */
395 ct->ct |= TCG_CT_REG;
f46934df 396 ct->u.regs = BIT(TCG_TARGET_NB_REGS) - 1;
7316329a
SW
397 break;
398 default:
069ea736 399 return NULL;
7316329a 400 }
069ea736 401 return ct_str;
7316329a
SW
402}
403
404#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
405/* Show current bytecode. Used by tcg interpreter. */
406void tci_disas(uint8_t opc)
407{
408 const TCGOpDef *def = &tcg_op_defs[opc];
409 fprintf(stderr, "TCG %s %u, %u, %u\n",
410 def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs);
411}
412#endif
413
414/* Write value (native size). */
415static void tcg_out_i(TCGContext *s, tcg_target_ulong v)
416{
a7f96f76
RH
417 if (TCG_TARGET_REG_BITS == 32) {
418 tcg_out32(s, v);
419 } else {
420 tcg_out64(s, v);
421 }
7316329a
SW
422}
423
7316329a
SW
424/* Write opcode. */
425static void tcg_out_op_t(TCGContext *s, TCGOpcode op)
426{
427 tcg_out8(s, op);
428 tcg_out8(s, 0);
429}
430
431/* Write register. */
432static void tcg_out_r(TCGContext *s, TCGArg t0)
433{
eabb7b91 434 tcg_debug_assert(t0 < TCG_TARGET_NB_REGS);
7316329a
SW
435 tcg_out8(s, t0);
436}
437
438/* Write register or constant (native size). */
439static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg)
440{
441 if (const_arg) {
eabb7b91 442 tcg_debug_assert(const_arg == 1);
7316329a
SW
443 tcg_out8(s, TCG_CONST);
444 tcg_out_i(s, arg);
445 } else {
446 tcg_out_r(s, arg);
447 }
448}
449
450/* Write register or constant (32 bit). */
451static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
452{
453 if (const_arg) {
eabb7b91 454 tcg_debug_assert(const_arg == 1);
7316329a
SW
455 tcg_out8(s, TCG_CONST);
456 tcg_out32(s, arg);
457 } else {
458 tcg_out_r(s, arg);
459 }
460}
461
462#if TCG_TARGET_REG_BITS == 64
463/* Write register or constant (64 bit). */
464static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg)
465{
466 if (const_arg) {
eabb7b91 467 tcg_debug_assert(const_arg == 1);
7316329a
SW
468 tcg_out8(s, TCG_CONST);
469 tcg_out64(s, arg);
470 } else {
471 tcg_out_r(s, arg);
472 }
473}
474#endif
475
476/* Write label. */
bec16311 477static void tci_out_label(TCGContext *s, TCGLabel *label)
7316329a 478{
7316329a
SW
479 if (label->has_value) {
480 tcg_out_i(s, label->u.value);
eabb7b91 481 tcg_debug_assert(label->u.value);
7316329a 482 } else {
bec16311 483 tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), label, 0);
3c01ae0e 484 s->code_ptr += sizeof(tcg_target_ulong);
7316329a
SW
485 }
486}
487
2a534aff 488static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 489 intptr_t arg2)
7316329a
SW
490{
491 uint8_t *old_code_ptr = s->code_ptr;
492 if (type == TCG_TYPE_I32) {
493 tcg_out_op_t(s, INDEX_op_ld_i32);
494 tcg_out_r(s, ret);
495 tcg_out_r(s, arg1);
496 tcg_out32(s, arg2);
497 } else {
eabb7b91 498 tcg_debug_assert(type == TCG_TYPE_I64);
7316329a
SW
499#if TCG_TARGET_REG_BITS == 64
500 tcg_out_op_t(s, INDEX_op_ld_i64);
501 tcg_out_r(s, ret);
502 tcg_out_r(s, arg1);
eabb7b91 503 tcg_debug_assert(arg2 == (int32_t)arg2);
7316329a
SW
504 tcg_out32(s, arg2);
505#else
506 TODO();
507#endif
508 }
509 old_code_ptr[1] = s->code_ptr - old_code_ptr;
510}
511
78113e83 512static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
7316329a
SW
513{
514 uint8_t *old_code_ptr = s->code_ptr;
eabb7b91 515 tcg_debug_assert(ret != arg);
7316329a
SW
516#if TCG_TARGET_REG_BITS == 32
517 tcg_out_op_t(s, INDEX_op_mov_i32);
518#else
519 tcg_out_op_t(s, INDEX_op_mov_i64);
520#endif
521 tcg_out_r(s, ret);
522 tcg_out_r(s, arg);
523 old_code_ptr[1] = s->code_ptr - old_code_ptr;
78113e83 524 return true;
7316329a
SW
525}
526
527static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 528 TCGReg t0, tcg_target_long arg)
7316329a
SW
529{
530 uint8_t *old_code_ptr = s->code_ptr;
531 uint32_t arg32 = arg;
532 if (type == TCG_TYPE_I32 || arg == arg32) {
533 tcg_out_op_t(s, INDEX_op_movi_i32);
534 tcg_out_r(s, t0);
535 tcg_out32(s, arg32);
536 } else {
eabb7b91 537 tcg_debug_assert(type == TCG_TYPE_I64);
7316329a
SW
538#if TCG_TARGET_REG_BITS == 64
539 tcg_out_op_t(s, INDEX_op_movi_i64);
540 tcg_out_r(s, t0);
541 tcg_out64(s, arg);
542#else
543 TODO();
544#endif
545 }
546 old_code_ptr[1] = s->code_ptr - old_code_ptr;
547}
548
dddbb2e1
RH
549static inline void tcg_out_call(TCGContext *s, tcg_insn_unit *arg)
550{
a3abb292
RH
551 uint8_t *old_code_ptr = s->code_ptr;
552 tcg_out_op_t(s, INDEX_op_call);
dddbb2e1 553 tcg_out_ri(s, 1, (uintptr_t)arg);
a3abb292 554 old_code_ptr[1] = s->code_ptr - old_code_ptr;
dddbb2e1
RH
555}
556
7316329a
SW
557static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
558 const int *const_args)
559{
560 uint8_t *old_code_ptr = s->code_ptr;
561
562 tcg_out_op_t(s, opc);
563
564 switch (opc) {
565 case INDEX_op_exit_tb:
566 tcg_out64(s, args[0]);
567 break;
568 case INDEX_op_goto_tb:
f309101c 569 if (s->tb_jmp_insn_offset) {
7316329a 570 /* Direct jump method. */
76442a93
SF
571 /* Align for atomic patching and thread safety */
572 s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4);
f309101c 573 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
7316329a
SW
574 tcg_out32(s, 0);
575 } else {
576 /* Indirect jump method. */
577 TODO();
578 }
9f754620 579 set_jmp_reset_offset(s, args[0]);
7316329a
SW
580 break;
581 case INDEX_op_br:
bec16311 582 tci_out_label(s, arg_label(args[0]));
7316329a 583 break;
7316329a
SW
584 case INDEX_op_setcond_i32:
585 tcg_out_r(s, args[0]);
586 tcg_out_r(s, args[1]);
587 tcg_out_ri32(s, const_args[2], args[2]);
588 tcg_out8(s, args[3]); /* condition */
589 break;
590#if TCG_TARGET_REG_BITS == 32
591 case INDEX_op_setcond2_i32:
592 /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
593 tcg_out_r(s, args[0]);
594 tcg_out_r(s, args[1]);
595 tcg_out_r(s, args[2]);
596 tcg_out_ri32(s, const_args[3], args[3]);
597 tcg_out_ri32(s, const_args[4], args[4]);
598 tcg_out8(s, args[5]); /* condition */
599 break;
600#elif TCG_TARGET_REG_BITS == 64
601 case INDEX_op_setcond_i64:
602 tcg_out_r(s, args[0]);
603 tcg_out_r(s, args[1]);
604 tcg_out_ri64(s, const_args[2], args[2]);
605 tcg_out8(s, args[3]); /* condition */
606 break;
607#endif
7316329a
SW
608 case INDEX_op_ld8u_i32:
609 case INDEX_op_ld8s_i32:
610 case INDEX_op_ld16u_i32:
611 case INDEX_op_ld16s_i32:
612 case INDEX_op_ld_i32:
613 case INDEX_op_st8_i32:
614 case INDEX_op_st16_i32:
615 case INDEX_op_st_i32:
616 case INDEX_op_ld8u_i64:
617 case INDEX_op_ld8s_i64:
618 case INDEX_op_ld16u_i64:
619 case INDEX_op_ld16s_i64:
620 case INDEX_op_ld32u_i64:
621 case INDEX_op_ld32s_i64:
622 case INDEX_op_ld_i64:
623 case INDEX_op_st8_i64:
624 case INDEX_op_st16_i64:
625 case INDEX_op_st32_i64:
626 case INDEX_op_st_i64:
627 tcg_out_r(s, args[0]);
628 tcg_out_r(s, args[1]);
eabb7b91 629 tcg_debug_assert(args[2] == (int32_t)args[2]);
7316329a
SW
630 tcg_out32(s, args[2]);
631 break;
632 case INDEX_op_add_i32:
633 case INDEX_op_sub_i32:
634 case INDEX_op_mul_i32:
635 case INDEX_op_and_i32:
636 case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
637 case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
638 case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
639 case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
640 case INDEX_op_or_i32:
641 case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
642 case INDEX_op_xor_i32:
643 case INDEX_op_shl_i32:
644 case INDEX_op_shr_i32:
645 case INDEX_op_sar_i32:
646 case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
647 case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
648 tcg_out_r(s, args[0]);
649 tcg_out_ri32(s, const_args[1], args[1]);
650 tcg_out_ri32(s, const_args[2], args[2]);
651 break;
e24dc9fe
SW
652 case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
653 tcg_out_r(s, args[0]);
654 tcg_out_r(s, args[1]);
655 tcg_out_r(s, args[2]);
eabb7b91 656 tcg_debug_assert(args[3] <= UINT8_MAX);
e24dc9fe 657 tcg_out8(s, args[3]);
eabb7b91 658 tcg_debug_assert(args[4] <= UINT8_MAX);
e24dc9fe
SW
659 tcg_out8(s, args[4]);
660 break;
7316329a
SW
661
662#if TCG_TARGET_REG_BITS == 64
7316329a
SW
663 case INDEX_op_add_i64:
664 case INDEX_op_sub_i64:
665 case INDEX_op_mul_i64:
666 case INDEX_op_and_i64:
667 case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
668 case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
669 case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
670 case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
671 case INDEX_op_or_i64:
672 case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
673 case INDEX_op_xor_i64:
674 case INDEX_op_shl_i64:
675 case INDEX_op_shr_i64:
676 case INDEX_op_sar_i64:
7316329a
SW
677 case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
678 case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
679 tcg_out_r(s, args[0]);
680 tcg_out_ri64(s, const_args[1], args[1]);
681 tcg_out_ri64(s, const_args[2], args[2]);
682 break;
e24dc9fe
SW
683 case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
684 tcg_out_r(s, args[0]);
685 tcg_out_r(s, args[1]);
686 tcg_out_r(s, args[2]);
eabb7b91 687 tcg_debug_assert(args[3] <= UINT8_MAX);
e24dc9fe 688 tcg_out8(s, args[3]);
eabb7b91 689 tcg_debug_assert(args[4] <= UINT8_MAX);
e24dc9fe
SW
690 tcg_out8(s, args[4]);
691 break;
7316329a
SW
692 case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
693 case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
694 case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
695 case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
696 TODO();
697 break;
698 case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
699 case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
700 TODO();
701 break;
702 case INDEX_op_brcond_i64:
703 tcg_out_r(s, args[0]);
704 tcg_out_ri64(s, const_args[1], args[1]);
705 tcg_out8(s, args[2]); /* condition */
bec16311 706 tci_out_label(s, arg_label(args[3]));
7316329a
SW
707 break;
708 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
709 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
710 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
711 case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
712 case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
713 case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
714 case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
715 case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
716 case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
717 case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
718 case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
4f2331e5
AJ
719 case INDEX_op_ext_i32_i64:
720 case INDEX_op_extu_i32_i64:
7316329a
SW
721#endif /* TCG_TARGET_REG_BITS == 64 */
722 case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
723 case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
724 case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
725 case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
726 case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
727 case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
728 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
729 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
730 tcg_out_r(s, args[0]);
731 tcg_out_r(s, args[1]);
732 break;
733 case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
734 case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
735 case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
736 case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
737 tcg_out_r(s, args[0]);
738 tcg_out_ri32(s, const_args[1], args[1]);
739 tcg_out_ri32(s, const_args[2], args[2]);
740 break;
741 case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
742 case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
743 TODO();
744 break;
745#if TCG_TARGET_REG_BITS == 32
746 case INDEX_op_add2_i32:
747 case INDEX_op_sub2_i32:
748 tcg_out_r(s, args[0]);
749 tcg_out_r(s, args[1]);
750 tcg_out_r(s, args[2]);
751 tcg_out_r(s, args[3]);
752 tcg_out_r(s, args[4]);
753 tcg_out_r(s, args[5]);
754 break;
755 case INDEX_op_brcond2_i32:
756 tcg_out_r(s, args[0]);
757 tcg_out_r(s, args[1]);
758 tcg_out_ri32(s, const_args[2], args[2]);
759 tcg_out_ri32(s, const_args[3], args[3]);
760 tcg_out8(s, args[4]); /* condition */
bec16311 761 tci_out_label(s, arg_label(args[5]));
7316329a
SW
762 break;
763 case INDEX_op_mulu2_i32:
764 tcg_out_r(s, args[0]);
765 tcg_out_r(s, args[1]);
766 tcg_out_r(s, args[2]);
767 tcg_out_r(s, args[3]);
768 break;
769#endif
770 case INDEX_op_brcond_i32:
771 tcg_out_r(s, args[0]);
772 tcg_out_ri32(s, const_args[1], args[1]);
773 tcg_out8(s, args[2]); /* condition */
bec16311 774 tci_out_label(s, arg_label(args[3]));
7316329a 775 break;
76782fab 776 case INDEX_op_qemu_ld_i32:
7316329a
SW
777 tcg_out_r(s, *args++);
778 tcg_out_r(s, *args++);
76782fab
RH
779 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
780 tcg_out_r(s, *args++);
781 }
782 tcg_out_i(s, *args++);
7316329a 783 break;
76782fab 784 case INDEX_op_qemu_ld_i64:
7316329a 785 tcg_out_r(s, *args++);
76782fab
RH
786 if (TCG_TARGET_REG_BITS == 32) {
787 tcg_out_r(s, *args++);
788 }
7316329a 789 tcg_out_r(s, *args++);
76782fab
RH
790 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
791 tcg_out_r(s, *args++);
792 }
793 tcg_out_i(s, *args++);
7316329a 794 break;
76782fab 795 case INDEX_op_qemu_st_i32:
7316329a
SW
796 tcg_out_r(s, *args++);
797 tcg_out_r(s, *args++);
76782fab
RH
798 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
799 tcg_out_r(s, *args++);
800 }
801 tcg_out_i(s, *args++);
7316329a 802 break;
76782fab 803 case INDEX_op_qemu_st_i64:
7316329a 804 tcg_out_r(s, *args++);
76782fab
RH
805 if (TCG_TARGET_REG_BITS == 32) {
806 tcg_out_r(s, *args++);
807 }
7316329a 808 tcg_out_r(s, *args++);
76782fab
RH
809 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
810 tcg_out_r(s, *args++);
811 }
812 tcg_out_i(s, *args++);
7316329a 813 break;
a1e69e2f
PK
814 case INDEX_op_mb:
815 break;
96d0ee7f
RH
816 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
817 case INDEX_op_mov_i64:
818 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
819 case INDEX_op_movi_i64:
820 case INDEX_op_call: /* Always emitted via tcg_out_call. */
7316329a 821 default:
7316329a
SW
822 tcg_abort();
823 }
824 old_code_ptr[1] = s->code_ptr - old_code_ptr;
825}
826
2a534aff 827static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 828 intptr_t arg2)
7316329a
SW
829{
830 uint8_t *old_code_ptr = s->code_ptr;
831 if (type == TCG_TYPE_I32) {
832 tcg_out_op_t(s, INDEX_op_st_i32);
833 tcg_out_r(s, arg);
834 tcg_out_r(s, arg1);
835 tcg_out32(s, arg2);
836 } else {
eabb7b91 837 tcg_debug_assert(type == TCG_TYPE_I64);
7316329a
SW
838#if TCG_TARGET_REG_BITS == 64
839 tcg_out_op_t(s, INDEX_op_st_i64);
840 tcg_out_r(s, arg);
841 tcg_out_r(s, arg1);
842 tcg_out32(s, arg2);
843#else
844 TODO();
845#endif
846 }
847 old_code_ptr[1] = s->code_ptr - old_code_ptr;
848}
849
59d7c14e
RH
850static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
851 TCGReg base, intptr_t ofs)
852{
853 return false;
854}
855
7316329a 856/* Test if a constant matches the constraint. */
f6c6afc1 857static int tcg_target_const_match(tcg_target_long val, TCGType type,
7316329a
SW
858 const TCGArgConstraint *arg_ct)
859{
860 /* No need to return 0 or 1, 0 or != 0 is good enough. */
861 return arg_ct->ct & TCG_CT_CONST;
862}
863
7316329a
SW
864static void tcg_target_init(TCGContext *s)
865{
866#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
867 const char *envval = getenv("DEBUG_TCG");
868 if (envval) {
24537a01 869 qemu_set_log(strtol(envval, NULL, 0));
7316329a
SW
870 }
871#endif
872
873 /* The current code uses uint8_t for tcg operations. */
eabb7b91 874 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
7316329a
SW
875
876 /* Registers available for 32 bit operations. */
f46934df 877 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
7316329a 878 /* Registers available for 64 bit operations. */
f46934df 879 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
7316329a 880 /* TODO: Which registers should be set here? */
f46934df 881 tcg_target_call_clobber_regs = BIT(TCG_TARGET_NB_REGS) - 1;
ee79c356 882
ccb1bb66 883 s->reserved_regs = 0;
7316329a 884 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
ee79c356
RH
885
886 /* We use negative offsets from "sp" so that we can distinguish
887 stores that might pretend to be call arguments. */
888 tcg_set_frame(s, TCG_REG_CALL_STACK,
889 -CPU_TEMP_BUF_NLONGS * sizeof(long),
7316329a
SW
890 CPU_TEMP_BUF_NLONGS * sizeof(long));
891}
892
893/* Generate global QEMU prologue and epilogue code. */
4699ca6d 894static inline void tcg_target_qemu_prologue(TCGContext *s)
7316329a 895{
7316329a 896}