]> git.proxmox.com Git - qemu.git/blame - tcg/tci/tcg-target.c
Merge remote-tracking branch 'afaerber/tags/qom-cpu-for-anthony' into staging
[qemu.git] / tcg / tci / tcg-target.c
CommitLineData
7316329a
SW
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25/* TODO list:
26 * - See TODO comments in code.
27 */
28
29/* Marker for missing code. */
30#define TODO() \
31 do { \
32 fprintf(stderr, "TODO %s:%u: %s()\n", \
33 __FILE__, __LINE__, __func__); \
34 tcg_abort(); \
35 } while (0)
36
7316329a
SW
37/* Bitfield n...m (in 32 bit value). */
38#define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m)
39
7316329a
SW
40/* Macros used in tcg_target_op_defs. */
41#define R "r"
42#define RI "ri"
43#if TCG_TARGET_REG_BITS == 32
44# define R64 "r", "r"
45#else
46# define R64 "r"
47#endif
48#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
49# define L "L", "L"
50# define S "S", "S"
51#else
52# define L "L"
53# define S "S"
54#endif
55
56/* TODO: documentation. */
57static const TCGTargetOpDef tcg_target_op_defs[] = {
58 { INDEX_op_exit_tb, { NULL } },
59 { INDEX_op_goto_tb, { NULL } },
60 { INDEX_op_call, { RI } },
7316329a
SW
61 { INDEX_op_br, { NULL } },
62
63 { INDEX_op_mov_i32, { R, R } },
64 { INDEX_op_movi_i32, { R } },
65
66 { INDEX_op_ld8u_i32, { R, R } },
67 { INDEX_op_ld8s_i32, { R, R } },
68 { INDEX_op_ld16u_i32, { R, R } },
69 { INDEX_op_ld16s_i32, { R, R } },
70 { INDEX_op_ld_i32, { R, R } },
71 { INDEX_op_st8_i32, { R, R } },
72 { INDEX_op_st16_i32, { R, R } },
73 { INDEX_op_st_i32, { R, R } },
74
75 { INDEX_op_add_i32, { R, RI, RI } },
76 { INDEX_op_sub_i32, { R, RI, RI } },
77 { INDEX_op_mul_i32, { R, RI, RI } },
78#if TCG_TARGET_HAS_div_i32
79 { INDEX_op_div_i32, { R, R, R } },
80 { INDEX_op_divu_i32, { R, R, R } },
81 { INDEX_op_rem_i32, { R, R, R } },
82 { INDEX_op_remu_i32, { R, R, R } },
83#elif TCG_TARGET_HAS_div2_i32
84 { INDEX_op_div2_i32, { R, R, "0", "1", R } },
85 { INDEX_op_divu2_i32, { R, R, "0", "1", R } },
86#endif
87 /* TODO: Does R, RI, RI result in faster code than R, R, RI?
88 If both operands are constants, we can optimize. */
89 { INDEX_op_and_i32, { R, RI, RI } },
90#if TCG_TARGET_HAS_andc_i32
91 { INDEX_op_andc_i32, { R, RI, RI } },
92#endif
93#if TCG_TARGET_HAS_eqv_i32
94 { INDEX_op_eqv_i32, { R, RI, RI } },
95#endif
96#if TCG_TARGET_HAS_nand_i32
97 { INDEX_op_nand_i32, { R, RI, RI } },
98#endif
99#if TCG_TARGET_HAS_nor_i32
100 { INDEX_op_nor_i32, { R, RI, RI } },
101#endif
102 { INDEX_op_or_i32, { R, RI, RI } },
103#if TCG_TARGET_HAS_orc_i32
104 { INDEX_op_orc_i32, { R, RI, RI } },
105#endif
106 { INDEX_op_xor_i32, { R, RI, RI } },
107 { INDEX_op_shl_i32, { R, RI, RI } },
108 { INDEX_op_shr_i32, { R, RI, RI } },
109 { INDEX_op_sar_i32, { R, RI, RI } },
110#if TCG_TARGET_HAS_rot_i32
111 { INDEX_op_rotl_i32, { R, RI, RI } },
112 { INDEX_op_rotr_i32, { R, RI, RI } },
113#endif
e24dc9fe
SW
114#if TCG_TARGET_HAS_deposit_i32
115 { INDEX_op_deposit_i32, { R, "0", R } },
116#endif
7316329a
SW
117
118 { INDEX_op_brcond_i32, { R, RI } },
119
120 { INDEX_op_setcond_i32, { R, R, RI } },
121#if TCG_TARGET_REG_BITS == 64
122 { INDEX_op_setcond_i64, { R, R, RI } },
123#endif /* TCG_TARGET_REG_BITS == 64 */
124
125#if TCG_TARGET_REG_BITS == 32
126 /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */
127 { INDEX_op_add2_i32, { R, R, R, R, R, R } },
128 { INDEX_op_sub2_i32, { R, R, R, R, R, R } },
129 { INDEX_op_brcond2_i32, { R, R, RI, RI } },
130 { INDEX_op_mulu2_i32, { R, R, R, R } },
131 { INDEX_op_setcond2_i32, { R, R, R, RI, RI } },
132#endif
133
134#if TCG_TARGET_HAS_not_i32
135 { INDEX_op_not_i32, { R, R } },
136#endif
137#if TCG_TARGET_HAS_neg_i32
138 { INDEX_op_neg_i32, { R, R } },
139#endif
140
141#if TCG_TARGET_REG_BITS == 64
142 { INDEX_op_mov_i64, { R, R } },
143 { INDEX_op_movi_i64, { R } },
144
145 { INDEX_op_ld8u_i64, { R, R } },
146 { INDEX_op_ld8s_i64, { R, R } },
147 { INDEX_op_ld16u_i64, { R, R } },
148 { INDEX_op_ld16s_i64, { R, R } },
149 { INDEX_op_ld32u_i64, { R, R } },
150 { INDEX_op_ld32s_i64, { R, R } },
151 { INDEX_op_ld_i64, { R, R } },
152
153 { INDEX_op_st8_i64, { R, R } },
154 { INDEX_op_st16_i64, { R, R } },
155 { INDEX_op_st32_i64, { R, R } },
156 { INDEX_op_st_i64, { R, R } },
157
158 { INDEX_op_add_i64, { R, RI, RI } },
159 { INDEX_op_sub_i64, { R, RI, RI } },
160 { INDEX_op_mul_i64, { R, RI, RI } },
161#if TCG_TARGET_HAS_div_i64
162 { INDEX_op_div_i64, { R, R, R } },
163 { INDEX_op_divu_i64, { R, R, R } },
164 { INDEX_op_rem_i64, { R, R, R } },
165 { INDEX_op_remu_i64, { R, R, R } },
166#elif TCG_TARGET_HAS_div2_i64
167 { INDEX_op_div2_i64, { R, R, "0", "1", R } },
168 { INDEX_op_divu2_i64, { R, R, "0", "1", R } },
169#endif
170 { INDEX_op_and_i64, { R, RI, RI } },
171#if TCG_TARGET_HAS_andc_i64
172 { INDEX_op_andc_i64, { R, RI, RI } },
173#endif
174#if TCG_TARGET_HAS_eqv_i64
175 { INDEX_op_eqv_i64, { R, RI, RI } },
176#endif
177#if TCG_TARGET_HAS_nand_i64
178 { INDEX_op_nand_i64, { R, RI, RI } },
179#endif
180#if TCG_TARGET_HAS_nor_i64
181 { INDEX_op_nor_i64, { R, RI, RI } },
182#endif
183 { INDEX_op_or_i64, { R, RI, RI } },
184#if TCG_TARGET_HAS_orc_i64
185 { INDEX_op_orc_i64, { R, RI, RI } },
186#endif
187 { INDEX_op_xor_i64, { R, RI, RI } },
188 { INDEX_op_shl_i64, { R, RI, RI } },
189 { INDEX_op_shr_i64, { R, RI, RI } },
190 { INDEX_op_sar_i64, { R, RI, RI } },
191#if TCG_TARGET_HAS_rot_i64
192 { INDEX_op_rotl_i64, { R, RI, RI } },
193 { INDEX_op_rotr_i64, { R, RI, RI } },
e24dc9fe
SW
194#endif
195#if TCG_TARGET_HAS_deposit_i64
196 { INDEX_op_deposit_i64, { R, "0", R } },
7316329a
SW
197#endif
198 { INDEX_op_brcond_i64, { R, RI } },
199
200#if TCG_TARGET_HAS_ext8s_i64
201 { INDEX_op_ext8s_i64, { R, R } },
202#endif
203#if TCG_TARGET_HAS_ext16s_i64
204 { INDEX_op_ext16s_i64, { R, R } },
205#endif
206#if TCG_TARGET_HAS_ext32s_i64
207 { INDEX_op_ext32s_i64, { R, R } },
208#endif
209#if TCG_TARGET_HAS_ext8u_i64
210 { INDEX_op_ext8u_i64, { R, R } },
211#endif
212#if TCG_TARGET_HAS_ext16u_i64
213 { INDEX_op_ext16u_i64, { R, R } },
214#endif
215#if TCG_TARGET_HAS_ext32u_i64
216 { INDEX_op_ext32u_i64, { R, R } },
217#endif
218#if TCG_TARGET_HAS_bswap16_i64
219 { INDEX_op_bswap16_i64, { R, R } },
220#endif
221#if TCG_TARGET_HAS_bswap32_i64
222 { INDEX_op_bswap32_i64, { R, R } },
223#endif
224#if TCG_TARGET_HAS_bswap64_i64
225 { INDEX_op_bswap64_i64, { R, R } },
226#endif
227#if TCG_TARGET_HAS_not_i64
228 { INDEX_op_not_i64, { R, R } },
229#endif
230#if TCG_TARGET_HAS_neg_i64
231 { INDEX_op_neg_i64, { R, R } },
232#endif
233#endif /* TCG_TARGET_REG_BITS == 64 */
234
235 { INDEX_op_qemu_ld8u, { R, L } },
236 { INDEX_op_qemu_ld8s, { R, L } },
237 { INDEX_op_qemu_ld16u, { R, L } },
238 { INDEX_op_qemu_ld16s, { R, L } },
239 { INDEX_op_qemu_ld32, { R, L } },
240#if TCG_TARGET_REG_BITS == 64
241 { INDEX_op_qemu_ld32u, { R, L } },
242 { INDEX_op_qemu_ld32s, { R, L } },
243#endif
244 { INDEX_op_qemu_ld64, { R64, L } },
245
246 { INDEX_op_qemu_st8, { R, S } },
247 { INDEX_op_qemu_st16, { R, S } },
248 { INDEX_op_qemu_st32, { R, S } },
249 { INDEX_op_qemu_st64, { R64, S } },
250
251#if TCG_TARGET_HAS_ext8s_i32
252 { INDEX_op_ext8s_i32, { R, R } },
253#endif
254#if TCG_TARGET_HAS_ext16s_i32
255 { INDEX_op_ext16s_i32, { R, R } },
256#endif
257#if TCG_TARGET_HAS_ext8u_i32
258 { INDEX_op_ext8u_i32, { R, R } },
259#endif
260#if TCG_TARGET_HAS_ext16u_i32
261 { INDEX_op_ext16u_i32, { R, R } },
262#endif
263
264#if TCG_TARGET_HAS_bswap16_i32
265 { INDEX_op_bswap16_i32, { R, R } },
266#endif
267#if TCG_TARGET_HAS_bswap32_i32
268 { INDEX_op_bswap32_i32, { R, R } },
269#endif
270
271 { -1 },
272};
273
274static const int tcg_target_reg_alloc_order[] = {
275 TCG_REG_R0,
276 TCG_REG_R1,
277 TCG_REG_R2,
278 TCG_REG_R3,
279#if 0 /* used for TCG_REG_CALL_STACK */
280 TCG_REG_R4,
281#endif
282 TCG_REG_R5,
283 TCG_REG_R6,
284 TCG_REG_R7,
285#if TCG_TARGET_NB_REGS >= 16
286 TCG_REG_R8,
287 TCG_REG_R9,
288 TCG_REG_R10,
289 TCG_REG_R11,
290 TCG_REG_R12,
291 TCG_REG_R13,
292 TCG_REG_R14,
293 TCG_REG_R15,
294#endif
295};
296
6673f47d 297#if MAX_OPC_PARAM_IARGS != 5
7316329a
SW
298# error Fix needed, number of supported input arguments changed!
299#endif
300
301static const int tcg_target_call_iarg_regs[] = {
302 TCG_REG_R0,
303 TCG_REG_R1,
304 TCG_REG_R2,
305 TCG_REG_R3,
7316329a
SW
306#if 0 /* used for TCG_REG_CALL_STACK */
307 TCG_REG_R4,
308#endif
309 TCG_REG_R5,
6673f47d
SW
310#if TCG_TARGET_REG_BITS == 32
311 /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */
7316329a
SW
312 TCG_REG_R6,
313 TCG_REG_R7,
314#if TCG_TARGET_NB_REGS >= 16
315 TCG_REG_R8,
6673f47d
SW
316 TCG_REG_R9,
317 TCG_REG_R10,
7316329a
SW
318#else
319# error Too few input registers available
320#endif
321#endif
322};
323
324static const int tcg_target_call_oarg_regs[] = {
325 TCG_REG_R0,
326#if TCG_TARGET_REG_BITS == 32
327 TCG_REG_R1
328#endif
329};
330
331#ifndef NDEBUG
332static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
333 "r00",
334 "r01",
335 "r02",
336 "r03",
337 "r04",
338 "r05",
339 "r06",
340 "r07",
341#if TCG_TARGET_NB_REGS >= 16
342 "r08",
343 "r09",
344 "r10",
345 "r11",
346 "r12",
347 "r13",
348 "r14",
349 "r15",
350#if TCG_TARGET_NB_REGS >= 32
351 "r16",
352 "r17",
353 "r18",
354 "r19",
355 "r20",
356 "r21",
357 "r22",
358 "r23",
359 "r24",
360 "r25",
361 "r26",
362 "r27",
363 "r28",
364 "r29",
365 "r30",
366 "r31"
367#endif
368#endif
369};
370#endif
371
7316329a 372static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 373 intptr_t value, intptr_t addend)
7316329a
SW
374{
375 /* tcg_out_reloc always uses the same type, addend. */
376 assert(type == sizeof(tcg_target_long));
377 assert(addend == 0);
378 assert(value != 0);
379 *(tcg_target_long *)code_ptr = value;
380}
381
382/* Parse target specific constraints. */
383static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
384{
385 const char *ct_str = *pct_str;
386 switch (ct_str[0]) {
387 case 'r':
388 case 'L': /* qemu_ld constraint */
389 case 'S': /* qemu_st constraint */
390 ct->ct |= TCG_CT_REG;
391 tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1);
392 break;
393 default:
394 return -1;
395 }
396 ct_str++;
397 *pct_str = ct_str;
398 return 0;
399}
400
401#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
402/* Show current bytecode. Used by tcg interpreter. */
403void tci_disas(uint8_t opc)
404{
405 const TCGOpDef *def = &tcg_op_defs[opc];
406 fprintf(stderr, "TCG %s %u, %u, %u\n",
407 def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs);
408}
409#endif
410
411/* Write value (native size). */
412static void tcg_out_i(TCGContext *s, tcg_target_ulong v)
413{
414 *(tcg_target_ulong *)s->code_ptr = v;
415 s->code_ptr += sizeof(tcg_target_ulong);
416}
417
7316329a
SW
418/* Write opcode. */
419static void tcg_out_op_t(TCGContext *s, TCGOpcode op)
420{
421 tcg_out8(s, op);
422 tcg_out8(s, 0);
423}
424
425/* Write register. */
426static void tcg_out_r(TCGContext *s, TCGArg t0)
427{
428 assert(t0 < TCG_TARGET_NB_REGS);
429 tcg_out8(s, t0);
430}
431
432/* Write register or constant (native size). */
433static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg)
434{
435 if (const_arg) {
436 assert(const_arg == 1);
437 tcg_out8(s, TCG_CONST);
438 tcg_out_i(s, arg);
439 } else {
440 tcg_out_r(s, arg);
441 }
442}
443
444/* Write register or constant (32 bit). */
445static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg)
446{
447 if (const_arg) {
448 assert(const_arg == 1);
449 tcg_out8(s, TCG_CONST);
450 tcg_out32(s, arg);
451 } else {
452 tcg_out_r(s, arg);
453 }
454}
455
456#if TCG_TARGET_REG_BITS == 64
457/* Write register or constant (64 bit). */
458static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg)
459{
460 if (const_arg) {
461 assert(const_arg == 1);
462 tcg_out8(s, TCG_CONST);
463 tcg_out64(s, arg);
464 } else {
465 tcg_out_r(s, arg);
466 }
467}
468#endif
469
470/* Write label. */
471static void tci_out_label(TCGContext *s, TCGArg arg)
472{
473 TCGLabel *label = &s->labels[arg];
474 if (label->has_value) {
475 tcg_out_i(s, label->u.value);
476 assert(label->u.value);
477 } else {
478 tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), arg, 0);
3c01ae0e 479 s->code_ptr += sizeof(tcg_target_ulong);
7316329a
SW
480 }
481}
482
2a534aff 483static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
a05b5b9b 484 intptr_t arg2)
7316329a
SW
485{
486 uint8_t *old_code_ptr = s->code_ptr;
487 if (type == TCG_TYPE_I32) {
488 tcg_out_op_t(s, INDEX_op_ld_i32);
489 tcg_out_r(s, ret);
490 tcg_out_r(s, arg1);
491 tcg_out32(s, arg2);
492 } else {
493 assert(type == TCG_TYPE_I64);
494#if TCG_TARGET_REG_BITS == 64
495 tcg_out_op_t(s, INDEX_op_ld_i64);
496 tcg_out_r(s, ret);
497 tcg_out_r(s, arg1);
03fc0548 498 assert(arg2 == (int32_t)arg2);
7316329a
SW
499 tcg_out32(s, arg2);
500#else
501 TODO();
502#endif
503 }
504 old_code_ptr[1] = s->code_ptr - old_code_ptr;
505}
506
2a534aff 507static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
7316329a
SW
508{
509 uint8_t *old_code_ptr = s->code_ptr;
510 assert(ret != arg);
511#if TCG_TARGET_REG_BITS == 32
512 tcg_out_op_t(s, INDEX_op_mov_i32);
513#else
514 tcg_out_op_t(s, INDEX_op_mov_i64);
515#endif
516 tcg_out_r(s, ret);
517 tcg_out_r(s, arg);
518 old_code_ptr[1] = s->code_ptr - old_code_ptr;
519}
520
521static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 522 TCGReg t0, tcg_target_long arg)
7316329a
SW
523{
524 uint8_t *old_code_ptr = s->code_ptr;
525 uint32_t arg32 = arg;
526 if (type == TCG_TYPE_I32 || arg == arg32) {
527 tcg_out_op_t(s, INDEX_op_movi_i32);
528 tcg_out_r(s, t0);
529 tcg_out32(s, arg32);
530 } else {
531 assert(type == TCG_TYPE_I64);
532#if TCG_TARGET_REG_BITS == 64
533 tcg_out_op_t(s, INDEX_op_movi_i64);
534 tcg_out_r(s, t0);
535 tcg_out64(s, arg);
536#else
537 TODO();
538#endif
539 }
540 old_code_ptr[1] = s->code_ptr - old_code_ptr;
541}
542
543static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
544 const int *const_args)
545{
546 uint8_t *old_code_ptr = s->code_ptr;
547
548 tcg_out_op_t(s, opc);
549
550 switch (opc) {
551 case INDEX_op_exit_tb:
552 tcg_out64(s, args[0]);
553 break;
554 case INDEX_op_goto_tb:
555 if (s->tb_jmp_offset) {
556 /* Direct jump method. */
557 assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset));
558 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
559 tcg_out32(s, 0);
560 } else {
561 /* Indirect jump method. */
562 TODO();
563 }
564 assert(args[0] < ARRAY_SIZE(s->tb_next_offset));
565 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
566 break;
567 case INDEX_op_br:
568 tci_out_label(s, args[0]);
569 break;
570 case INDEX_op_call:
571 tcg_out_ri(s, const_args[0], args[0]);
572 break;
7316329a
SW
573 case INDEX_op_setcond_i32:
574 tcg_out_r(s, args[0]);
575 tcg_out_r(s, args[1]);
576 tcg_out_ri32(s, const_args[2], args[2]);
577 tcg_out8(s, args[3]); /* condition */
578 break;
579#if TCG_TARGET_REG_BITS == 32
580 case INDEX_op_setcond2_i32:
581 /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */
582 tcg_out_r(s, args[0]);
583 tcg_out_r(s, args[1]);
584 tcg_out_r(s, args[2]);
585 tcg_out_ri32(s, const_args[3], args[3]);
586 tcg_out_ri32(s, const_args[4], args[4]);
587 tcg_out8(s, args[5]); /* condition */
588 break;
589#elif TCG_TARGET_REG_BITS == 64
590 case INDEX_op_setcond_i64:
591 tcg_out_r(s, args[0]);
592 tcg_out_r(s, args[1]);
593 tcg_out_ri64(s, const_args[2], args[2]);
594 tcg_out8(s, args[3]); /* condition */
595 break;
596#endif
597 case INDEX_op_movi_i32:
598 TODO(); /* Handled by tcg_out_movi? */
599 break;
600 case INDEX_op_ld8u_i32:
601 case INDEX_op_ld8s_i32:
602 case INDEX_op_ld16u_i32:
603 case INDEX_op_ld16s_i32:
604 case INDEX_op_ld_i32:
605 case INDEX_op_st8_i32:
606 case INDEX_op_st16_i32:
607 case INDEX_op_st_i32:
608 case INDEX_op_ld8u_i64:
609 case INDEX_op_ld8s_i64:
610 case INDEX_op_ld16u_i64:
611 case INDEX_op_ld16s_i64:
612 case INDEX_op_ld32u_i64:
613 case INDEX_op_ld32s_i64:
614 case INDEX_op_ld_i64:
615 case INDEX_op_st8_i64:
616 case INDEX_op_st16_i64:
617 case INDEX_op_st32_i64:
618 case INDEX_op_st_i64:
619 tcg_out_r(s, args[0]);
620 tcg_out_r(s, args[1]);
03fc0548 621 assert(args[2] == (int32_t)args[2]);
7316329a
SW
622 tcg_out32(s, args[2]);
623 break;
624 case INDEX_op_add_i32:
625 case INDEX_op_sub_i32:
626 case INDEX_op_mul_i32:
627 case INDEX_op_and_i32:
628 case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */
629 case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */
630 case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */
631 case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */
632 case INDEX_op_or_i32:
633 case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */
634 case INDEX_op_xor_i32:
635 case INDEX_op_shl_i32:
636 case INDEX_op_shr_i32:
637 case INDEX_op_sar_i32:
638 case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
639 case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */
640 tcg_out_r(s, args[0]);
641 tcg_out_ri32(s, const_args[1], args[1]);
642 tcg_out_ri32(s, const_args[2], args[2]);
643 break;
e24dc9fe
SW
644 case INDEX_op_deposit_i32: /* Optional (TCG_TARGET_HAS_deposit_i32). */
645 tcg_out_r(s, args[0]);
646 tcg_out_r(s, args[1]);
647 tcg_out_r(s, args[2]);
648 assert(args[3] <= UINT8_MAX);
649 tcg_out8(s, args[3]);
650 assert(args[4] <= UINT8_MAX);
651 tcg_out8(s, args[4]);
652 break;
7316329a
SW
653
654#if TCG_TARGET_REG_BITS == 64
655 case INDEX_op_mov_i64:
656 case INDEX_op_movi_i64:
657 TODO();
658 break;
659 case INDEX_op_add_i64:
660 case INDEX_op_sub_i64:
661 case INDEX_op_mul_i64:
662 case INDEX_op_and_i64:
663 case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */
664 case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */
665 case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */
666 case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */
667 case INDEX_op_or_i64:
668 case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */
669 case INDEX_op_xor_i64:
670 case INDEX_op_shl_i64:
671 case INDEX_op_shr_i64:
672 case INDEX_op_sar_i64:
673 /* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */
674 case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
675 case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */
676 tcg_out_r(s, args[0]);
677 tcg_out_ri64(s, const_args[1], args[1]);
678 tcg_out_ri64(s, const_args[2], args[2]);
679 break;
e24dc9fe
SW
680 case INDEX_op_deposit_i64: /* Optional (TCG_TARGET_HAS_deposit_i64). */
681 tcg_out_r(s, args[0]);
682 tcg_out_r(s, args[1]);
683 tcg_out_r(s, args[2]);
684 assert(args[3] <= UINT8_MAX);
685 tcg_out8(s, args[3]);
686 assert(args[4] <= UINT8_MAX);
687 tcg_out8(s, args[4]);
688 break;
7316329a
SW
689 case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
690 case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
691 case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
692 case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */
693 TODO();
694 break;
695 case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
696 case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */
697 TODO();
698 break;
699 case INDEX_op_brcond_i64:
700 tcg_out_r(s, args[0]);
701 tcg_out_ri64(s, const_args[1], args[1]);
702 tcg_out8(s, args[2]); /* condition */
703 tci_out_label(s, args[3]);
704 break;
705 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
706 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
707 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
708 case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */
709 case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */
710 case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */
711 case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */
712 case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */
713 case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */
714 case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */
715 case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */
716#endif /* TCG_TARGET_REG_BITS == 64 */
717 case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */
718 case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */
719 case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */
720 case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */
721 case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */
722 case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */
723 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
724 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
725 tcg_out_r(s, args[0]);
726 tcg_out_r(s, args[1]);
727 break;
728 case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
729 case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
730 case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
731 case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */
732 tcg_out_r(s, args[0]);
733 tcg_out_ri32(s, const_args[1], args[1]);
734 tcg_out_ri32(s, const_args[2], args[2]);
735 break;
736 case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
737 case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */
738 TODO();
739 break;
740#if TCG_TARGET_REG_BITS == 32
741 case INDEX_op_add2_i32:
742 case INDEX_op_sub2_i32:
743 tcg_out_r(s, args[0]);
744 tcg_out_r(s, args[1]);
745 tcg_out_r(s, args[2]);
746 tcg_out_r(s, args[3]);
747 tcg_out_r(s, args[4]);
748 tcg_out_r(s, args[5]);
749 break;
750 case INDEX_op_brcond2_i32:
751 tcg_out_r(s, args[0]);
752 tcg_out_r(s, args[1]);
753 tcg_out_ri32(s, const_args[2], args[2]);
754 tcg_out_ri32(s, const_args[3], args[3]);
755 tcg_out8(s, args[4]); /* condition */
756 tci_out_label(s, args[5]);
757 break;
758 case INDEX_op_mulu2_i32:
759 tcg_out_r(s, args[0]);
760 tcg_out_r(s, args[1]);
761 tcg_out_r(s, args[2]);
762 tcg_out_r(s, args[3]);
763 break;
764#endif
765 case INDEX_op_brcond_i32:
766 tcg_out_r(s, args[0]);
767 tcg_out_ri32(s, const_args[1], args[1]);
768 tcg_out8(s, args[2]); /* condition */
769 tci_out_label(s, args[3]);
770 break;
771 case INDEX_op_qemu_ld8u:
772 case INDEX_op_qemu_ld8s:
773 case INDEX_op_qemu_ld16u:
774 case INDEX_op_qemu_ld16s:
775 case INDEX_op_qemu_ld32:
776#if TCG_TARGET_REG_BITS == 64
777 case INDEX_op_qemu_ld32s:
778 case INDEX_op_qemu_ld32u:
779#endif
780 tcg_out_r(s, *args++);
781 tcg_out_r(s, *args++);
782#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
783 tcg_out_r(s, *args++);
784#endif
785#ifdef CONFIG_SOFTMMU
786 tcg_out_i(s, *args);
787#endif
788 break;
789 case INDEX_op_qemu_ld64:
790 tcg_out_r(s, *args++);
791#if TCG_TARGET_REG_BITS == 32
792 tcg_out_r(s, *args++);
793#endif
794 tcg_out_r(s, *args++);
795#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
796 tcg_out_r(s, *args++);
797#endif
798#ifdef CONFIG_SOFTMMU
799 tcg_out_i(s, *args);
800#endif
801 break;
802 case INDEX_op_qemu_st8:
803 case INDEX_op_qemu_st16:
804 case INDEX_op_qemu_st32:
805 tcg_out_r(s, *args++);
806 tcg_out_r(s, *args++);
807#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
808 tcg_out_r(s, *args++);
809#endif
810#ifdef CONFIG_SOFTMMU
811 tcg_out_i(s, *args);
812#endif
813 break;
814 case INDEX_op_qemu_st64:
815 tcg_out_r(s, *args++);
816#if TCG_TARGET_REG_BITS == 32
817 tcg_out_r(s, *args++);
818#endif
819 tcg_out_r(s, *args++);
820#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
821 tcg_out_r(s, *args++);
822#endif
823#ifdef CONFIG_SOFTMMU
824 tcg_out_i(s, *args);
825#endif
826 break;
827 case INDEX_op_end:
828 TODO();
829 break;
830 default:
831 fprintf(stderr, "Missing: %s\n", tcg_op_defs[opc].name);
832 tcg_abort();
833 }
834 old_code_ptr[1] = s->code_ptr - old_code_ptr;
835}
836
2a534aff 837static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
a05b5b9b 838 intptr_t arg2)
7316329a
SW
839{
840 uint8_t *old_code_ptr = s->code_ptr;
841 if (type == TCG_TYPE_I32) {
842 tcg_out_op_t(s, INDEX_op_st_i32);
843 tcg_out_r(s, arg);
844 tcg_out_r(s, arg1);
845 tcg_out32(s, arg2);
846 } else {
847 assert(type == TCG_TYPE_I64);
848#if TCG_TARGET_REG_BITS == 64
849 tcg_out_op_t(s, INDEX_op_st_i64);
850 tcg_out_r(s, arg);
851 tcg_out_r(s, arg1);
852 tcg_out32(s, arg2);
853#else
854 TODO();
855#endif
856 }
857 old_code_ptr[1] = s->code_ptr - old_code_ptr;
858}
859
860/* Test if a constant matches the constraint. */
861static int tcg_target_const_match(tcg_target_long val,
862 const TCGArgConstraint *arg_ct)
863{
864 /* No need to return 0 or 1, 0 or != 0 is good enough. */
865 return arg_ct->ct & TCG_CT_CONST;
866}
867
7316329a
SW
868static void tcg_target_init(TCGContext *s)
869{
870#if defined(CONFIG_DEBUG_TCG_INTERPRETER)
871 const char *envval = getenv("DEBUG_TCG");
872 if (envval) {
24537a01 873 qemu_set_log(strtol(envval, NULL, 0));
7316329a
SW
874 }
875#endif
876
877 /* The current code uses uint8_t for tcg operations. */
878 assert(ARRAY_SIZE(tcg_op_defs) <= UINT8_MAX);
879
880 /* Registers available for 32 bit operations. */
881 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0,
882 BIT(TCG_TARGET_NB_REGS) - 1);
883 /* Registers available for 64 bit operations. */
884 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0,
885 BIT(TCG_TARGET_NB_REGS) - 1);
886 /* TODO: Which registers should be set here? */
887 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
888 BIT(TCG_TARGET_NB_REGS) - 1);
ee79c356 889
7316329a
SW
890 tcg_regset_clear(s->reserved_regs);
891 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
892 tcg_add_target_add_op_defs(tcg_target_op_defs);
ee79c356
RH
893
894 /* We use negative offsets from "sp" so that we can distinguish
895 stores that might pretend to be call arguments. */
896 tcg_set_frame(s, TCG_REG_CALL_STACK,
897 -CPU_TEMP_BUF_NLONGS * sizeof(long),
7316329a
SW
898 CPU_TEMP_BUF_NLONGS * sizeof(long));
899}
900
901/* Generate global QEMU prologue and epilogue code. */
4699ca6d 902static inline void tcg_target_qemu_prologue(TCGContext *s)
7316329a 903{
7316329a 904}