]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci/tcg-target.c.inc
tcg: Split out tcg_out_goto_tb
[mirror_qemu.git] / tcg / tci / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2009, 2011 Stefan Weil
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "../tcg-pool.c.inc"
26
27 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
28 {
29 switch (op) {
30 case INDEX_op_goto_ptr:
31 return C_O0_I1(r);
32
33 case INDEX_op_ld8u_i32:
34 case INDEX_op_ld8s_i32:
35 case INDEX_op_ld16u_i32:
36 case INDEX_op_ld16s_i32:
37 case INDEX_op_ld_i32:
38 case INDEX_op_ld8u_i64:
39 case INDEX_op_ld8s_i64:
40 case INDEX_op_ld16u_i64:
41 case INDEX_op_ld16s_i64:
42 case INDEX_op_ld32u_i64:
43 case INDEX_op_ld32s_i64:
44 case INDEX_op_ld_i64:
45 case INDEX_op_not_i32:
46 case INDEX_op_not_i64:
47 case INDEX_op_neg_i32:
48 case INDEX_op_neg_i64:
49 case INDEX_op_ext8s_i32:
50 case INDEX_op_ext8s_i64:
51 case INDEX_op_ext16s_i32:
52 case INDEX_op_ext16s_i64:
53 case INDEX_op_ext8u_i32:
54 case INDEX_op_ext8u_i64:
55 case INDEX_op_ext16u_i32:
56 case INDEX_op_ext16u_i64:
57 case INDEX_op_ext32s_i64:
58 case INDEX_op_ext32u_i64:
59 case INDEX_op_ext_i32_i64:
60 case INDEX_op_extu_i32_i64:
61 case INDEX_op_bswap16_i32:
62 case INDEX_op_bswap16_i64:
63 case INDEX_op_bswap32_i32:
64 case INDEX_op_bswap32_i64:
65 case INDEX_op_bswap64_i64:
66 case INDEX_op_extract_i32:
67 case INDEX_op_extract_i64:
68 case INDEX_op_sextract_i32:
69 case INDEX_op_sextract_i64:
70 case INDEX_op_ctpop_i32:
71 case INDEX_op_ctpop_i64:
72 return C_O1_I1(r, r);
73
74 case INDEX_op_st8_i32:
75 case INDEX_op_st16_i32:
76 case INDEX_op_st_i32:
77 case INDEX_op_st8_i64:
78 case INDEX_op_st16_i64:
79 case INDEX_op_st32_i64:
80 case INDEX_op_st_i64:
81 return C_O0_I2(r, r);
82
83 case INDEX_op_div_i32:
84 case INDEX_op_div_i64:
85 case INDEX_op_divu_i32:
86 case INDEX_op_divu_i64:
87 case INDEX_op_rem_i32:
88 case INDEX_op_rem_i64:
89 case INDEX_op_remu_i32:
90 case INDEX_op_remu_i64:
91 case INDEX_op_add_i32:
92 case INDEX_op_add_i64:
93 case INDEX_op_sub_i32:
94 case INDEX_op_sub_i64:
95 case INDEX_op_mul_i32:
96 case INDEX_op_mul_i64:
97 case INDEX_op_and_i32:
98 case INDEX_op_and_i64:
99 case INDEX_op_andc_i32:
100 case INDEX_op_andc_i64:
101 case INDEX_op_eqv_i32:
102 case INDEX_op_eqv_i64:
103 case INDEX_op_nand_i32:
104 case INDEX_op_nand_i64:
105 case INDEX_op_nor_i32:
106 case INDEX_op_nor_i64:
107 case INDEX_op_or_i32:
108 case INDEX_op_or_i64:
109 case INDEX_op_orc_i32:
110 case INDEX_op_orc_i64:
111 case INDEX_op_xor_i32:
112 case INDEX_op_xor_i64:
113 case INDEX_op_shl_i32:
114 case INDEX_op_shl_i64:
115 case INDEX_op_shr_i32:
116 case INDEX_op_shr_i64:
117 case INDEX_op_sar_i32:
118 case INDEX_op_sar_i64:
119 case INDEX_op_rotl_i32:
120 case INDEX_op_rotl_i64:
121 case INDEX_op_rotr_i32:
122 case INDEX_op_rotr_i64:
123 case INDEX_op_setcond_i32:
124 case INDEX_op_setcond_i64:
125 case INDEX_op_deposit_i32:
126 case INDEX_op_deposit_i64:
127 case INDEX_op_clz_i32:
128 case INDEX_op_clz_i64:
129 case INDEX_op_ctz_i32:
130 case INDEX_op_ctz_i64:
131 return C_O1_I2(r, r, r);
132
133 case INDEX_op_brcond_i32:
134 case INDEX_op_brcond_i64:
135 return C_O0_I2(r, r);
136
137 case INDEX_op_add2_i32:
138 case INDEX_op_add2_i64:
139 case INDEX_op_sub2_i32:
140 case INDEX_op_sub2_i64:
141 return C_O2_I4(r, r, r, r, r, r);
142
143 #if TCG_TARGET_REG_BITS == 32
144 case INDEX_op_brcond2_i32:
145 return C_O0_I4(r, r, r, r);
146 #endif
147
148 case INDEX_op_mulu2_i32:
149 case INDEX_op_mulu2_i64:
150 case INDEX_op_muls2_i32:
151 case INDEX_op_muls2_i64:
152 return C_O2_I2(r, r, r, r);
153
154 case INDEX_op_movcond_i32:
155 case INDEX_op_movcond_i64:
156 case INDEX_op_setcond2_i32:
157 return C_O1_I4(r, r, r, r, r);
158
159 case INDEX_op_qemu_ld_i32:
160 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
161 ? C_O1_I1(r, r)
162 : C_O1_I2(r, r, r));
163 case INDEX_op_qemu_ld_i64:
164 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r)
165 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O2_I1(r, r, r)
166 : C_O2_I2(r, r, r, r));
167 case INDEX_op_qemu_st_i32:
168 return (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
169 ? C_O0_I2(r, r)
170 : C_O0_I3(r, r, r));
171 case INDEX_op_qemu_st_i64:
172 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r)
173 : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? C_O0_I3(r, r, r)
174 : C_O0_I4(r, r, r, r));
175
176 default:
177 g_assert_not_reached();
178 }
179 }
180
181 static const int tcg_target_reg_alloc_order[] = {
182 TCG_REG_R2,
183 TCG_REG_R3,
184 TCG_REG_R4,
185 TCG_REG_R5,
186 TCG_REG_R6,
187 TCG_REG_R7,
188 TCG_REG_R8,
189 TCG_REG_R9,
190 TCG_REG_R10,
191 TCG_REG_R11,
192 TCG_REG_R12,
193 TCG_REG_R13,
194 TCG_REG_R14,
195 TCG_REG_R15,
196 TCG_REG_R1,
197 TCG_REG_R0,
198 };
199
200 /* No call arguments via registers. All will be stored on the "stack". */
201 static const int tcg_target_call_iarg_regs[] = { };
202
203 static const int tcg_target_call_oarg_regs[] = {
204 TCG_REG_R0,
205 #if TCG_TARGET_REG_BITS == 32
206 TCG_REG_R1
207 #endif
208 };
209
210 #ifdef CONFIG_DEBUG_TCG
211 static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
212 "r00",
213 "r01",
214 "r02",
215 "r03",
216 "r04",
217 "r05",
218 "r06",
219 "r07",
220 "r08",
221 "r09",
222 "r10",
223 "r11",
224 "r12",
225 "r13",
226 "r14",
227 "r15",
228 };
229 #endif
230
231 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
232 intptr_t value, intptr_t addend)
233 {
234 intptr_t diff = value - (intptr_t)(code_ptr + 1);
235
236 tcg_debug_assert(addend == 0);
237 tcg_debug_assert(type == 20);
238
239 if (diff == sextract32(diff, 0, type)) {
240 tcg_patch32(code_ptr, deposit32(*code_ptr, 32 - type, type, diff));
241 return true;
242 }
243 return false;
244 }
245
246 static void stack_bounds_check(TCGReg base, target_long offset)
247 {
248 if (base == TCG_REG_CALL_STACK) {
249 tcg_debug_assert(offset >= 0);
250 tcg_debug_assert(offset < (TCG_STATIC_CALL_ARGS_SIZE +
251 TCG_STATIC_FRAME_SIZE));
252 }
253 }
254
255 static void tcg_out_op_l(TCGContext *s, TCGOpcode op, TCGLabel *l0)
256 {
257 tcg_insn_unit insn = 0;
258
259 tcg_out_reloc(s, s->code_ptr, 20, l0, 0);
260 insn = deposit32(insn, 0, 8, op);
261 tcg_out32(s, insn);
262 }
263
264 static void tcg_out_op_p(TCGContext *s, TCGOpcode op, void *p0)
265 {
266 tcg_insn_unit insn = 0;
267 intptr_t diff;
268
269 /* Special case for exit_tb: map null -> 0. */
270 if (p0 == NULL) {
271 diff = 0;
272 } else {
273 diff = p0 - (void *)(s->code_ptr + 1);
274 tcg_debug_assert(diff != 0);
275 if (diff != sextract32(diff, 0, 20)) {
276 tcg_raise_tb_overflow(s);
277 }
278 }
279 insn = deposit32(insn, 0, 8, op);
280 insn = deposit32(insn, 12, 20, diff);
281 tcg_out32(s, insn);
282 }
283
284 static void tcg_out_op_r(TCGContext *s, TCGOpcode op, TCGReg r0)
285 {
286 tcg_insn_unit insn = 0;
287
288 insn = deposit32(insn, 0, 8, op);
289 insn = deposit32(insn, 8, 4, r0);
290 tcg_out32(s, insn);
291 }
292
293 static void tcg_out_op_v(TCGContext *s, TCGOpcode op)
294 {
295 tcg_out32(s, (uint8_t)op);
296 }
297
298 static void tcg_out_op_ri(TCGContext *s, TCGOpcode op, TCGReg r0, int32_t i1)
299 {
300 tcg_insn_unit insn = 0;
301
302 tcg_debug_assert(i1 == sextract32(i1, 0, 20));
303 insn = deposit32(insn, 0, 8, op);
304 insn = deposit32(insn, 8, 4, r0);
305 insn = deposit32(insn, 12, 20, i1);
306 tcg_out32(s, insn);
307 }
308
309 static void tcg_out_op_rl(TCGContext *s, TCGOpcode op, TCGReg r0, TCGLabel *l1)
310 {
311 tcg_insn_unit insn = 0;
312
313 tcg_out_reloc(s, s->code_ptr, 20, l1, 0);
314 insn = deposit32(insn, 0, 8, op);
315 insn = deposit32(insn, 8, 4, r0);
316 tcg_out32(s, insn);
317 }
318
319 static void tcg_out_op_rr(TCGContext *s, TCGOpcode op, TCGReg r0, TCGReg r1)
320 {
321 tcg_insn_unit insn = 0;
322
323 insn = deposit32(insn, 0, 8, op);
324 insn = deposit32(insn, 8, 4, r0);
325 insn = deposit32(insn, 12, 4, r1);
326 tcg_out32(s, insn);
327 }
328
329 static void tcg_out_op_rrm(TCGContext *s, TCGOpcode op,
330 TCGReg r0, TCGReg r1, TCGArg m2)
331 {
332 tcg_insn_unit insn = 0;
333
334 tcg_debug_assert(m2 == extract32(m2, 0, 12));
335 insn = deposit32(insn, 0, 8, op);
336 insn = deposit32(insn, 8, 4, r0);
337 insn = deposit32(insn, 12, 4, r1);
338 insn = deposit32(insn, 20, 12, m2);
339 tcg_out32(s, insn);
340 }
341
342 static void tcg_out_op_rrr(TCGContext *s, TCGOpcode op,
343 TCGReg r0, TCGReg r1, TCGReg r2)
344 {
345 tcg_insn_unit insn = 0;
346
347 insn = deposit32(insn, 0, 8, op);
348 insn = deposit32(insn, 8, 4, r0);
349 insn = deposit32(insn, 12, 4, r1);
350 insn = deposit32(insn, 16, 4, r2);
351 tcg_out32(s, insn);
352 }
353
354 static void tcg_out_op_rrs(TCGContext *s, TCGOpcode op,
355 TCGReg r0, TCGReg r1, intptr_t i2)
356 {
357 tcg_insn_unit insn = 0;
358
359 tcg_debug_assert(i2 == sextract32(i2, 0, 16));
360 insn = deposit32(insn, 0, 8, op);
361 insn = deposit32(insn, 8, 4, r0);
362 insn = deposit32(insn, 12, 4, r1);
363 insn = deposit32(insn, 16, 16, i2);
364 tcg_out32(s, insn);
365 }
366
367 static void tcg_out_op_rrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
368 TCGReg r1, uint8_t b2, uint8_t b3)
369 {
370 tcg_insn_unit insn = 0;
371
372 tcg_debug_assert(b2 == extract32(b2, 0, 6));
373 tcg_debug_assert(b3 == extract32(b3, 0, 6));
374 insn = deposit32(insn, 0, 8, op);
375 insn = deposit32(insn, 8, 4, r0);
376 insn = deposit32(insn, 12, 4, r1);
377 insn = deposit32(insn, 16, 6, b2);
378 insn = deposit32(insn, 22, 6, b3);
379 tcg_out32(s, insn);
380 }
381
382 static void tcg_out_op_rrrc(TCGContext *s, TCGOpcode op,
383 TCGReg r0, TCGReg r1, TCGReg r2, TCGCond c3)
384 {
385 tcg_insn_unit insn = 0;
386
387 insn = deposit32(insn, 0, 8, op);
388 insn = deposit32(insn, 8, 4, r0);
389 insn = deposit32(insn, 12, 4, r1);
390 insn = deposit32(insn, 16, 4, r2);
391 insn = deposit32(insn, 20, 4, c3);
392 tcg_out32(s, insn);
393 }
394
395 static void tcg_out_op_rrrm(TCGContext *s, TCGOpcode op,
396 TCGReg r0, TCGReg r1, TCGReg r2, TCGArg m3)
397 {
398 tcg_insn_unit insn = 0;
399
400 tcg_debug_assert(m3 == extract32(m3, 0, 12));
401 insn = deposit32(insn, 0, 8, op);
402 insn = deposit32(insn, 8, 4, r0);
403 insn = deposit32(insn, 12, 4, r1);
404 insn = deposit32(insn, 16, 4, r2);
405 insn = deposit32(insn, 20, 12, m3);
406 tcg_out32(s, insn);
407 }
408
409 static void tcg_out_op_rrrbb(TCGContext *s, TCGOpcode op, TCGReg r0,
410 TCGReg r1, TCGReg r2, uint8_t b3, uint8_t b4)
411 {
412 tcg_insn_unit insn = 0;
413
414 tcg_debug_assert(b3 == extract32(b3, 0, 6));
415 tcg_debug_assert(b4 == extract32(b4, 0, 6));
416 insn = deposit32(insn, 0, 8, op);
417 insn = deposit32(insn, 8, 4, r0);
418 insn = deposit32(insn, 12, 4, r1);
419 insn = deposit32(insn, 16, 4, r2);
420 insn = deposit32(insn, 20, 6, b3);
421 insn = deposit32(insn, 26, 6, b4);
422 tcg_out32(s, insn);
423 }
424
425 static void tcg_out_op_rrrrr(TCGContext *s, TCGOpcode op, TCGReg r0,
426 TCGReg r1, TCGReg r2, TCGReg r3, TCGReg r4)
427 {
428 tcg_insn_unit insn = 0;
429
430 insn = deposit32(insn, 0, 8, op);
431 insn = deposit32(insn, 8, 4, r0);
432 insn = deposit32(insn, 12, 4, r1);
433 insn = deposit32(insn, 16, 4, r2);
434 insn = deposit32(insn, 20, 4, r3);
435 insn = deposit32(insn, 24, 4, r4);
436 tcg_out32(s, insn);
437 }
438
439 static void tcg_out_op_rrrr(TCGContext *s, TCGOpcode op,
440 TCGReg r0, TCGReg r1, TCGReg r2, TCGReg r3)
441 {
442 tcg_insn_unit insn = 0;
443
444 insn = deposit32(insn, 0, 8, op);
445 insn = deposit32(insn, 8, 4, r0);
446 insn = deposit32(insn, 12, 4, r1);
447 insn = deposit32(insn, 16, 4, r2);
448 insn = deposit32(insn, 20, 4, r3);
449 tcg_out32(s, insn);
450 }
451
452 static void tcg_out_op_rrrrrc(TCGContext *s, TCGOpcode op,
453 TCGReg r0, TCGReg r1, TCGReg r2,
454 TCGReg r3, TCGReg r4, TCGCond c5)
455 {
456 tcg_insn_unit insn = 0;
457
458 insn = deposit32(insn, 0, 8, op);
459 insn = deposit32(insn, 8, 4, r0);
460 insn = deposit32(insn, 12, 4, r1);
461 insn = deposit32(insn, 16, 4, r2);
462 insn = deposit32(insn, 20, 4, r3);
463 insn = deposit32(insn, 24, 4, r4);
464 insn = deposit32(insn, 28, 4, c5);
465 tcg_out32(s, insn);
466 }
467
468 static void tcg_out_op_rrrrrr(TCGContext *s, TCGOpcode op,
469 TCGReg r0, TCGReg r1, TCGReg r2,
470 TCGReg r3, TCGReg r4, TCGReg r5)
471 {
472 tcg_insn_unit insn = 0;
473
474 insn = deposit32(insn, 0, 8, op);
475 insn = deposit32(insn, 8, 4, r0);
476 insn = deposit32(insn, 12, 4, r1);
477 insn = deposit32(insn, 16, 4, r2);
478 insn = deposit32(insn, 20, 4, r3);
479 insn = deposit32(insn, 24, 4, r4);
480 insn = deposit32(insn, 28, 4, r5);
481 tcg_out32(s, insn);
482 }
483
484 static void tcg_out_ldst(TCGContext *s, TCGOpcode op, TCGReg val,
485 TCGReg base, intptr_t offset)
486 {
487 stack_bounds_check(base, offset);
488 if (offset != sextract32(offset, 0, 16)) {
489 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, offset);
490 tcg_out_op_rrr(s, (TCG_TARGET_REG_BITS == 32
491 ? INDEX_op_add_i32 : INDEX_op_add_i64),
492 TCG_REG_TMP, TCG_REG_TMP, base);
493 base = TCG_REG_TMP;
494 offset = 0;
495 }
496 tcg_out_op_rrs(s, op, val, base, offset);
497 }
498
499 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
500 intptr_t offset)
501 {
502 switch (type) {
503 case TCG_TYPE_I32:
504 tcg_out_ldst(s, INDEX_op_ld_i32, val, base, offset);
505 break;
506 #if TCG_TARGET_REG_BITS == 64
507 case TCG_TYPE_I64:
508 tcg_out_ldst(s, INDEX_op_ld_i64, val, base, offset);
509 break;
510 #endif
511 default:
512 g_assert_not_reached();
513 }
514 }
515
516 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
517 {
518 switch (type) {
519 case TCG_TYPE_I32:
520 tcg_out_op_rr(s, INDEX_op_mov_i32, ret, arg);
521 break;
522 #if TCG_TARGET_REG_BITS == 64
523 case TCG_TYPE_I64:
524 tcg_out_op_rr(s, INDEX_op_mov_i64, ret, arg);
525 break;
526 #endif
527 default:
528 g_assert_not_reached();
529 }
530 return true;
531 }
532
533 static void tcg_out_movi(TCGContext *s, TCGType type,
534 TCGReg ret, tcg_target_long arg)
535 {
536 switch (type) {
537 case TCG_TYPE_I32:
538 #if TCG_TARGET_REG_BITS == 64
539 arg = (int32_t)arg;
540 /* fall through */
541 case TCG_TYPE_I64:
542 #endif
543 break;
544 default:
545 g_assert_not_reached();
546 }
547
548 if (arg == sextract32(arg, 0, 20)) {
549 tcg_out_op_ri(s, INDEX_op_tci_movi, ret, arg);
550 } else {
551 tcg_insn_unit insn = 0;
552
553 new_pool_label(s, arg, 20, s->code_ptr, 0);
554 insn = deposit32(insn, 0, 8, INDEX_op_tci_movl);
555 insn = deposit32(insn, 8, 4, ret);
556 tcg_out32(s, insn);
557 }
558 }
559
560 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *func,
561 const TCGHelperInfo *info)
562 {
563 ffi_cif *cif = info->cif;
564 tcg_insn_unit insn = 0;
565 uint8_t which;
566
567 if (cif->rtype == &ffi_type_void) {
568 which = 0;
569 } else if (cif->rtype->size == 4) {
570 which = 1;
571 } else {
572 tcg_debug_assert(cif->rtype->size == 8);
573 which = 2;
574 }
575 new_pool_l2(s, 20, s->code_ptr, 0, (uintptr_t)func, (uintptr_t)cif);
576 insn = deposit32(insn, 0, 8, INDEX_op_call);
577 insn = deposit32(insn, 8, 4, which);
578 tcg_out32(s, insn);
579 }
580
581 #if TCG_TARGET_REG_BITS == 64
582 # define CASE_32_64(x) \
583 case glue(glue(INDEX_op_, x), _i64): \
584 case glue(glue(INDEX_op_, x), _i32):
585 # define CASE_64(x) \
586 case glue(glue(INDEX_op_, x), _i64):
587 #else
588 # define CASE_32_64(x) \
589 case glue(glue(INDEX_op_, x), _i32):
590 # define CASE_64(x)
591 #endif
592
593 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
594 {
595 tcg_out_op_p(s, INDEX_op_exit_tb, (void *)arg);
596 }
597
598 static void tcg_out_goto_tb(TCGContext *s, int which)
599 {
600 qemu_build_assert(!TCG_TARGET_HAS_direct_jump);
601 /* indirect jump method. */
602 tcg_out_op_p(s, INDEX_op_goto_tb, (void *)get_jmp_target_addr(s, which));
603 set_jmp_reset_offset(s, which);
604 }
605
606 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
607 const TCGArg args[TCG_MAX_OP_ARGS],
608 const int const_args[TCG_MAX_OP_ARGS])
609 {
610 TCGOpcode exts;
611
612 switch (opc) {
613 case INDEX_op_goto_ptr:
614 tcg_out_op_r(s, opc, args[0]);
615 break;
616
617 case INDEX_op_br:
618 tcg_out_op_l(s, opc, arg_label(args[0]));
619 break;
620
621 CASE_32_64(setcond)
622 tcg_out_op_rrrc(s, opc, args[0], args[1], args[2], args[3]);
623 break;
624
625 CASE_32_64(movcond)
626 case INDEX_op_setcond2_i32:
627 tcg_out_op_rrrrrc(s, opc, args[0], args[1], args[2],
628 args[3], args[4], args[5]);
629 break;
630
631 CASE_32_64(ld8u)
632 CASE_32_64(ld8s)
633 CASE_32_64(ld16u)
634 CASE_32_64(ld16s)
635 case INDEX_op_ld_i32:
636 CASE_64(ld32u)
637 CASE_64(ld32s)
638 CASE_64(ld)
639 CASE_32_64(st8)
640 CASE_32_64(st16)
641 case INDEX_op_st_i32:
642 CASE_64(st32)
643 CASE_64(st)
644 tcg_out_ldst(s, opc, args[0], args[1], args[2]);
645 break;
646
647 CASE_32_64(add)
648 CASE_32_64(sub)
649 CASE_32_64(mul)
650 CASE_32_64(and)
651 CASE_32_64(or)
652 CASE_32_64(xor)
653 CASE_32_64(andc) /* Optional (TCG_TARGET_HAS_andc_*). */
654 CASE_32_64(orc) /* Optional (TCG_TARGET_HAS_orc_*). */
655 CASE_32_64(eqv) /* Optional (TCG_TARGET_HAS_eqv_*). */
656 CASE_32_64(nand) /* Optional (TCG_TARGET_HAS_nand_*). */
657 CASE_32_64(nor) /* Optional (TCG_TARGET_HAS_nor_*). */
658 CASE_32_64(shl)
659 CASE_32_64(shr)
660 CASE_32_64(sar)
661 CASE_32_64(rotl) /* Optional (TCG_TARGET_HAS_rot_*). */
662 CASE_32_64(rotr) /* Optional (TCG_TARGET_HAS_rot_*). */
663 CASE_32_64(div) /* Optional (TCG_TARGET_HAS_div_*). */
664 CASE_32_64(divu) /* Optional (TCG_TARGET_HAS_div_*). */
665 CASE_32_64(rem) /* Optional (TCG_TARGET_HAS_div_*). */
666 CASE_32_64(remu) /* Optional (TCG_TARGET_HAS_div_*). */
667 CASE_32_64(clz) /* Optional (TCG_TARGET_HAS_clz_*). */
668 CASE_32_64(ctz) /* Optional (TCG_TARGET_HAS_ctz_*). */
669 tcg_out_op_rrr(s, opc, args[0], args[1], args[2]);
670 break;
671
672 CASE_32_64(deposit) /* Optional (TCG_TARGET_HAS_deposit_*). */
673 {
674 TCGArg pos = args[3], len = args[4];
675 TCGArg max = opc == INDEX_op_deposit_i32 ? 32 : 64;
676
677 tcg_debug_assert(pos < max);
678 tcg_debug_assert(pos + len <= max);
679
680 tcg_out_op_rrrbb(s, opc, args[0], args[1], args[2], pos, len);
681 }
682 break;
683
684 CASE_32_64(extract) /* Optional (TCG_TARGET_HAS_extract_*). */
685 CASE_32_64(sextract) /* Optional (TCG_TARGET_HAS_sextract_*). */
686 {
687 TCGArg pos = args[2], len = args[3];
688 TCGArg max = tcg_op_defs[opc].flags & TCG_OPF_64BIT ? 64 : 32;
689
690 tcg_debug_assert(pos < max);
691 tcg_debug_assert(pos + len <= max);
692
693 tcg_out_op_rrbb(s, opc, args[0], args[1], pos, len);
694 }
695 break;
696
697 CASE_32_64(brcond)
698 tcg_out_op_rrrc(s, (opc == INDEX_op_brcond_i32
699 ? INDEX_op_setcond_i32 : INDEX_op_setcond_i64),
700 TCG_REG_TMP, args[0], args[1], args[2]);
701 tcg_out_op_rl(s, opc, TCG_REG_TMP, arg_label(args[3]));
702 break;
703
704 CASE_32_64(neg) /* Optional (TCG_TARGET_HAS_neg_*). */
705 CASE_32_64(not) /* Optional (TCG_TARGET_HAS_not_*). */
706 CASE_32_64(ext8s) /* Optional (TCG_TARGET_HAS_ext8s_*). */
707 CASE_32_64(ext8u) /* Optional (TCG_TARGET_HAS_ext8u_*). */
708 CASE_32_64(ext16s) /* Optional (TCG_TARGET_HAS_ext16s_*). */
709 CASE_32_64(ext16u) /* Optional (TCG_TARGET_HAS_ext16u_*). */
710 CASE_64(ext32s) /* Optional (TCG_TARGET_HAS_ext32s_i64). */
711 CASE_64(ext32u) /* Optional (TCG_TARGET_HAS_ext32u_i64). */
712 CASE_64(ext_i32)
713 CASE_64(extu_i32)
714 CASE_32_64(ctpop) /* Optional (TCG_TARGET_HAS_ctpop_*). */
715 case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */
716 case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */
717 tcg_out_op_rr(s, opc, args[0], args[1]);
718 break;
719
720 case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */
721 exts = INDEX_op_ext16s_i32;
722 goto do_bswap;
723 case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */
724 exts = INDEX_op_ext16s_i64;
725 goto do_bswap;
726 case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */
727 exts = INDEX_op_ext32s_i64;
728 do_bswap:
729 /* The base tci bswaps zero-extend, and ignore high bits. */
730 tcg_out_op_rr(s, opc, args[0], args[1]);
731 if (args[2] & TCG_BSWAP_OS) {
732 tcg_out_op_rr(s, exts, args[0], args[0]);
733 }
734 break;
735
736 CASE_32_64(add2)
737 CASE_32_64(sub2)
738 tcg_out_op_rrrrrr(s, opc, args[0], args[1], args[2],
739 args[3], args[4], args[5]);
740 break;
741
742 #if TCG_TARGET_REG_BITS == 32
743 case INDEX_op_brcond2_i32:
744 tcg_out_op_rrrrrc(s, INDEX_op_setcond2_i32, TCG_REG_TMP,
745 args[0], args[1], args[2], args[3], args[4]);
746 tcg_out_op_rl(s, INDEX_op_brcond_i32, TCG_REG_TMP, arg_label(args[5]));
747 break;
748 #endif
749
750 CASE_32_64(mulu2)
751 CASE_32_64(muls2)
752 tcg_out_op_rrrr(s, opc, args[0], args[1], args[2], args[3]);
753 break;
754
755 case INDEX_op_qemu_ld_i32:
756 case INDEX_op_qemu_st_i32:
757 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
758 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
759 } else {
760 tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
761 }
762 break;
763
764 case INDEX_op_qemu_ld_i64:
765 case INDEX_op_qemu_st_i64:
766 if (TCG_TARGET_REG_BITS == 64) {
767 tcg_out_op_rrm(s, opc, args[0], args[1], args[2]);
768 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
769 tcg_out_op_rrrm(s, opc, args[0], args[1], args[2], args[3]);
770 } else {
771 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, args[4]);
772 tcg_out_op_rrrrr(s, opc, args[0], args[1],
773 args[2], args[3], TCG_REG_TMP);
774 }
775 break;
776
777 case INDEX_op_mb:
778 tcg_out_op_v(s, opc);
779 break;
780
781 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
782 case INDEX_op_mov_i64:
783 case INDEX_op_call: /* Always emitted via tcg_out_call. */
784 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
785 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
786 default:
787 tcg_abort();
788 }
789 }
790
791 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg val, TCGReg base,
792 intptr_t offset)
793 {
794 switch (type) {
795 case TCG_TYPE_I32:
796 tcg_out_ldst(s, INDEX_op_st_i32, val, base, offset);
797 break;
798 #if TCG_TARGET_REG_BITS == 64
799 case TCG_TYPE_I64:
800 tcg_out_ldst(s, INDEX_op_st_i64, val, base, offset);
801 break;
802 #endif
803 default:
804 g_assert_not_reached();
805 }
806 }
807
808 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
809 TCGReg base, intptr_t ofs)
810 {
811 return false;
812 }
813
814 /* Test if a constant matches the constraint. */
815 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
816 {
817 return ct & TCG_CT_CONST;
818 }
819
820 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
821 {
822 memset(p, 0, sizeof(*p) * count);
823 }
824
825 static void tcg_target_init(TCGContext *s)
826 {
827 /* The current code uses uint8_t for tcg operations. */
828 tcg_debug_assert(tcg_op_defs_max <= UINT8_MAX);
829
830 /* Registers available for 32 bit operations. */
831 tcg_target_available_regs[TCG_TYPE_I32] = BIT(TCG_TARGET_NB_REGS) - 1;
832 /* Registers available for 64 bit operations. */
833 tcg_target_available_regs[TCG_TYPE_I64] = BIT(TCG_TARGET_NB_REGS) - 1;
834 /*
835 * The interpreter "registers" are in the local stack frame and
836 * cannot be clobbered by the called helper functions. However,
837 * the interpreter assumes a 64-bit return value and assigns to
838 * the return value registers.
839 */
840 tcg_target_call_clobber_regs =
841 MAKE_64BIT_MASK(TCG_REG_R0, 64 / TCG_TARGET_REG_BITS);
842
843 s->reserved_regs = 0;
844 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
845 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
846
847 /* The call arguments come first, followed by the temp storage. */
848 tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
849 TCG_STATIC_FRAME_SIZE);
850 }
851
852 /* Generate global QEMU prologue and epilogue code. */
853 static inline void tcg_target_qemu_prologue(TCGContext *s)
854 {
855 }