]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
Merge tag 'pull-tcg-20230204' of https://gitlab.com/rth7680/qemu into staging
[mirror_qemu.git] / tcg / tci.c
1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "exec/cpu_ldst.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-ldst.h"
24 #include "qemu/compiler.h"
25 #include <ffi.h>
26
27
28 /*
29 * Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
30 * Without assertions, the interpreter runs much faster.
31 */
32 #if defined(CONFIG_DEBUG_TCG)
33 # define tci_assert(cond) assert(cond)
34 #else
35 # define tci_assert(cond) ((void)(cond))
36 #endif
37
38 __thread uintptr_t tci_tb_ptr;
39
40 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
41 uint32_t low_index, uint64_t value)
42 {
43 regs[low_index] = (uint32_t)value;
44 regs[high_index] = value >> 32;
45 }
46
47 /* Create a 64 bit value from two 32 bit values. */
48 static uint64_t tci_uint64(uint32_t high, uint32_t low)
49 {
50 return ((uint64_t)high << 32) + low;
51 }
52
53 /*
54 * Load sets of arguments all at once. The naming convention is:
55 * tci_args_<arguments>
56 * where arguments is a sequence of
57 *
58 * b = immediate (bit position)
59 * c = condition (TCGCond)
60 * i = immediate (uint32_t)
61 * I = immediate (tcg_target_ulong)
62 * l = label or pointer
63 * m = immediate (MemOpIdx)
64 * n = immediate (call return length)
65 * r = register
66 * s = signed ldst offset
67 */
68
69 static void tci_args_l(uint32_t insn, const void *tb_ptr, void **l0)
70 {
71 int diff = sextract32(insn, 12, 20);
72 *l0 = diff ? (void *)tb_ptr + diff : NULL;
73 }
74
75 static void tci_args_r(uint32_t insn, TCGReg *r0)
76 {
77 *r0 = extract32(insn, 8, 4);
78 }
79
80 static void tci_args_nl(uint32_t insn, const void *tb_ptr,
81 uint8_t *n0, void **l1)
82 {
83 *n0 = extract32(insn, 8, 4);
84 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
85 }
86
87 static void tci_args_rl(uint32_t insn, const void *tb_ptr,
88 TCGReg *r0, void **l1)
89 {
90 *r0 = extract32(insn, 8, 4);
91 *l1 = sextract32(insn, 12, 20) + (void *)tb_ptr;
92 }
93
94 static void tci_args_rr(uint32_t insn, TCGReg *r0, TCGReg *r1)
95 {
96 *r0 = extract32(insn, 8, 4);
97 *r1 = extract32(insn, 12, 4);
98 }
99
100 static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1)
101 {
102 *r0 = extract32(insn, 8, 4);
103 *i1 = sextract32(insn, 12, 20);
104 }
105
106 static void tci_args_rrm(uint32_t insn, TCGReg *r0,
107 TCGReg *r1, MemOpIdx *m2)
108 {
109 *r0 = extract32(insn, 8, 4);
110 *r1 = extract32(insn, 12, 4);
111 *m2 = extract32(insn, 20, 12);
112 }
113
114 static void tci_args_rrr(uint32_t insn, TCGReg *r0, TCGReg *r1, TCGReg *r2)
115 {
116 *r0 = extract32(insn, 8, 4);
117 *r1 = extract32(insn, 12, 4);
118 *r2 = extract32(insn, 16, 4);
119 }
120
121 static void tci_args_rrs(uint32_t insn, TCGReg *r0, TCGReg *r1, int32_t *i2)
122 {
123 *r0 = extract32(insn, 8, 4);
124 *r1 = extract32(insn, 12, 4);
125 *i2 = sextract32(insn, 16, 16);
126 }
127
128 static void tci_args_rrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
129 uint8_t *i2, uint8_t *i3)
130 {
131 *r0 = extract32(insn, 8, 4);
132 *r1 = extract32(insn, 12, 4);
133 *i2 = extract32(insn, 16, 6);
134 *i3 = extract32(insn, 22, 6);
135 }
136
137 static void tci_args_rrrc(uint32_t insn,
138 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
139 {
140 *r0 = extract32(insn, 8, 4);
141 *r1 = extract32(insn, 12, 4);
142 *r2 = extract32(insn, 16, 4);
143 *c3 = extract32(insn, 20, 4);
144 }
145
146 static void tci_args_rrrm(uint32_t insn,
147 TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3)
148 {
149 *r0 = extract32(insn, 8, 4);
150 *r1 = extract32(insn, 12, 4);
151 *r2 = extract32(insn, 16, 4);
152 *m3 = extract32(insn, 20, 12);
153 }
154
155 static void tci_args_rrrbb(uint32_t insn, TCGReg *r0, TCGReg *r1,
156 TCGReg *r2, uint8_t *i3, uint8_t *i4)
157 {
158 *r0 = extract32(insn, 8, 4);
159 *r1 = extract32(insn, 12, 4);
160 *r2 = extract32(insn, 16, 4);
161 *i3 = extract32(insn, 20, 6);
162 *i4 = extract32(insn, 26, 6);
163 }
164
165 static void tci_args_rrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
166 TCGReg *r2, TCGReg *r3, TCGReg *r4)
167 {
168 *r0 = extract32(insn, 8, 4);
169 *r1 = extract32(insn, 12, 4);
170 *r2 = extract32(insn, 16, 4);
171 *r3 = extract32(insn, 20, 4);
172 *r4 = extract32(insn, 24, 4);
173 }
174
175 static void tci_args_rrrr(uint32_t insn,
176 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
177 {
178 *r0 = extract32(insn, 8, 4);
179 *r1 = extract32(insn, 12, 4);
180 *r2 = extract32(insn, 16, 4);
181 *r3 = extract32(insn, 20, 4);
182 }
183
184 static void tci_args_rrrrrc(uint32_t insn, TCGReg *r0, TCGReg *r1,
185 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
186 {
187 *r0 = extract32(insn, 8, 4);
188 *r1 = extract32(insn, 12, 4);
189 *r2 = extract32(insn, 16, 4);
190 *r3 = extract32(insn, 20, 4);
191 *r4 = extract32(insn, 24, 4);
192 *c5 = extract32(insn, 28, 4);
193 }
194
195 static void tci_args_rrrrrr(uint32_t insn, TCGReg *r0, TCGReg *r1,
196 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
197 {
198 *r0 = extract32(insn, 8, 4);
199 *r1 = extract32(insn, 12, 4);
200 *r2 = extract32(insn, 16, 4);
201 *r3 = extract32(insn, 20, 4);
202 *r4 = extract32(insn, 24, 4);
203 *r5 = extract32(insn, 28, 4);
204 }
205
206 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
207 {
208 bool result = false;
209 int32_t i0 = u0;
210 int32_t i1 = u1;
211 switch (condition) {
212 case TCG_COND_EQ:
213 result = (u0 == u1);
214 break;
215 case TCG_COND_NE:
216 result = (u0 != u1);
217 break;
218 case TCG_COND_LT:
219 result = (i0 < i1);
220 break;
221 case TCG_COND_GE:
222 result = (i0 >= i1);
223 break;
224 case TCG_COND_LE:
225 result = (i0 <= i1);
226 break;
227 case TCG_COND_GT:
228 result = (i0 > i1);
229 break;
230 case TCG_COND_LTU:
231 result = (u0 < u1);
232 break;
233 case TCG_COND_GEU:
234 result = (u0 >= u1);
235 break;
236 case TCG_COND_LEU:
237 result = (u0 <= u1);
238 break;
239 case TCG_COND_GTU:
240 result = (u0 > u1);
241 break;
242 default:
243 g_assert_not_reached();
244 }
245 return result;
246 }
247
248 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
249 {
250 bool result = false;
251 int64_t i0 = u0;
252 int64_t i1 = u1;
253 switch (condition) {
254 case TCG_COND_EQ:
255 result = (u0 == u1);
256 break;
257 case TCG_COND_NE:
258 result = (u0 != u1);
259 break;
260 case TCG_COND_LT:
261 result = (i0 < i1);
262 break;
263 case TCG_COND_GE:
264 result = (i0 >= i1);
265 break;
266 case TCG_COND_LE:
267 result = (i0 <= i1);
268 break;
269 case TCG_COND_GT:
270 result = (i0 > i1);
271 break;
272 case TCG_COND_LTU:
273 result = (u0 < u1);
274 break;
275 case TCG_COND_GEU:
276 result = (u0 >= u1);
277 break;
278 case TCG_COND_LEU:
279 result = (u0 <= u1);
280 break;
281 case TCG_COND_GTU:
282 result = (u0 > u1);
283 break;
284 default:
285 g_assert_not_reached();
286 }
287 return result;
288 }
289
290 static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr,
291 MemOpIdx oi, const void *tb_ptr)
292 {
293 MemOp mop = get_memop(oi);
294 uintptr_t ra = (uintptr_t)tb_ptr;
295
296 #ifdef CONFIG_SOFTMMU
297 switch (mop & (MO_BSWAP | MO_SSIZE)) {
298 case MO_UB:
299 return helper_ret_ldub_mmu(env, taddr, oi, ra);
300 case MO_SB:
301 return helper_ret_ldsb_mmu(env, taddr, oi, ra);
302 case MO_LEUW:
303 return helper_le_lduw_mmu(env, taddr, oi, ra);
304 case MO_LESW:
305 return helper_le_ldsw_mmu(env, taddr, oi, ra);
306 case MO_LEUL:
307 return helper_le_ldul_mmu(env, taddr, oi, ra);
308 case MO_LESL:
309 return helper_le_ldsl_mmu(env, taddr, oi, ra);
310 case MO_LEUQ:
311 return helper_le_ldq_mmu(env, taddr, oi, ra);
312 case MO_BEUW:
313 return helper_be_lduw_mmu(env, taddr, oi, ra);
314 case MO_BESW:
315 return helper_be_ldsw_mmu(env, taddr, oi, ra);
316 case MO_BEUL:
317 return helper_be_ldul_mmu(env, taddr, oi, ra);
318 case MO_BESL:
319 return helper_be_ldsl_mmu(env, taddr, oi, ra);
320 case MO_BEUQ:
321 return helper_be_ldq_mmu(env, taddr, oi, ra);
322 default:
323 g_assert_not_reached();
324 }
325 #else
326 void *haddr = g2h(env_cpu(env), taddr);
327 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
328 uint64_t ret;
329
330 set_helper_retaddr(ra);
331 if (taddr & a_mask) {
332 helper_unaligned_ld(env, taddr);
333 }
334 switch (mop & (MO_BSWAP | MO_SSIZE)) {
335 case MO_UB:
336 ret = ldub_p(haddr);
337 break;
338 case MO_SB:
339 ret = ldsb_p(haddr);
340 break;
341 case MO_LEUW:
342 ret = lduw_le_p(haddr);
343 break;
344 case MO_LESW:
345 ret = ldsw_le_p(haddr);
346 break;
347 case MO_LEUL:
348 ret = (uint32_t)ldl_le_p(haddr);
349 break;
350 case MO_LESL:
351 ret = (int32_t)ldl_le_p(haddr);
352 break;
353 case MO_LEUQ:
354 ret = ldq_le_p(haddr);
355 break;
356 case MO_BEUW:
357 ret = lduw_be_p(haddr);
358 break;
359 case MO_BESW:
360 ret = ldsw_be_p(haddr);
361 break;
362 case MO_BEUL:
363 ret = (uint32_t)ldl_be_p(haddr);
364 break;
365 case MO_BESL:
366 ret = (int32_t)ldl_be_p(haddr);
367 break;
368 case MO_BEUQ:
369 ret = ldq_be_p(haddr);
370 break;
371 default:
372 g_assert_not_reached();
373 }
374 clear_helper_retaddr();
375 return ret;
376 #endif
377 }
378
379 static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val,
380 MemOpIdx oi, const void *tb_ptr)
381 {
382 MemOp mop = get_memop(oi);
383 uintptr_t ra = (uintptr_t)tb_ptr;
384
385 #ifdef CONFIG_SOFTMMU
386 switch (mop & (MO_BSWAP | MO_SIZE)) {
387 case MO_UB:
388 helper_ret_stb_mmu(env, taddr, val, oi, ra);
389 break;
390 case MO_LEUW:
391 helper_le_stw_mmu(env, taddr, val, oi, ra);
392 break;
393 case MO_LEUL:
394 helper_le_stl_mmu(env, taddr, val, oi, ra);
395 break;
396 case MO_LEUQ:
397 helper_le_stq_mmu(env, taddr, val, oi, ra);
398 break;
399 case MO_BEUW:
400 helper_be_stw_mmu(env, taddr, val, oi, ra);
401 break;
402 case MO_BEUL:
403 helper_be_stl_mmu(env, taddr, val, oi, ra);
404 break;
405 case MO_BEUQ:
406 helper_be_stq_mmu(env, taddr, val, oi, ra);
407 break;
408 default:
409 g_assert_not_reached();
410 }
411 #else
412 void *haddr = g2h(env_cpu(env), taddr);
413 unsigned a_mask = (1u << get_alignment_bits(mop)) - 1;
414
415 set_helper_retaddr(ra);
416 if (taddr & a_mask) {
417 helper_unaligned_st(env, taddr);
418 }
419 switch (mop & (MO_BSWAP | MO_SIZE)) {
420 case MO_UB:
421 stb_p(haddr, val);
422 break;
423 case MO_LEUW:
424 stw_le_p(haddr, val);
425 break;
426 case MO_LEUL:
427 stl_le_p(haddr, val);
428 break;
429 case MO_LEUQ:
430 stq_le_p(haddr, val);
431 break;
432 case MO_BEUW:
433 stw_be_p(haddr, val);
434 break;
435 case MO_BEUL:
436 stl_be_p(haddr, val);
437 break;
438 case MO_BEUQ:
439 stq_be_p(haddr, val);
440 break;
441 default:
442 g_assert_not_reached();
443 }
444 clear_helper_retaddr();
445 #endif
446 }
447
448 #if TCG_TARGET_REG_BITS == 64
449 # define CASE_32_64(x) \
450 case glue(glue(INDEX_op_, x), _i64): \
451 case glue(glue(INDEX_op_, x), _i32):
452 # define CASE_64(x) \
453 case glue(glue(INDEX_op_, x), _i64):
454 #else
455 # define CASE_32_64(x) \
456 case glue(glue(INDEX_op_, x), _i32):
457 # define CASE_64(x)
458 #endif
459
460 /* Interpret pseudo code in tb. */
461 /*
462 * Disable CFI checks.
463 * One possible operation in the pseudo code is a call to binary code.
464 * Therefore, disable CFI checks in the interpreter function
465 */
466 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
467 const void *v_tb_ptr)
468 {
469 const uint32_t *tb_ptr = v_tb_ptr;
470 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
471 uint64_t stack[(TCG_STATIC_CALL_ARGS_SIZE + TCG_STATIC_FRAME_SIZE)
472 / sizeof(uint64_t)];
473
474 regs[TCG_AREG0] = (tcg_target_ulong)env;
475 regs[TCG_REG_CALL_STACK] = (uintptr_t)stack;
476 tci_assert(tb_ptr);
477
478 for (;;) {
479 uint32_t insn;
480 TCGOpcode opc;
481 TCGReg r0, r1, r2, r3, r4, r5;
482 tcg_target_ulong t1;
483 TCGCond condition;
484 target_ulong taddr;
485 uint8_t pos, len;
486 uint32_t tmp32;
487 uint64_t tmp64;
488 uint64_t T1, T2;
489 MemOpIdx oi;
490 int32_t ofs;
491 void *ptr;
492
493 insn = *tb_ptr++;
494 opc = extract32(insn, 0, 8);
495
496 switch (opc) {
497 case INDEX_op_call:
498 {
499 void *call_slots[MAX_CALL_IARGS];
500 ffi_cif *cif;
501 void *func;
502 unsigned i, s, n;
503
504 tci_args_nl(insn, tb_ptr, &len, &ptr);
505 func = ((void **)ptr)[0];
506 cif = ((void **)ptr)[1];
507
508 n = cif->nargs;
509 for (i = s = 0; i < n; ++i) {
510 ffi_type *t = cif->arg_types[i];
511 call_slots[i] = &stack[s];
512 s += DIV_ROUND_UP(t->size, 8);
513 }
514
515 /* Helper functions may need to access the "return address" */
516 tci_tb_ptr = (uintptr_t)tb_ptr;
517 ffi_call(cif, func, stack, call_slots);
518 }
519
520 switch (len) {
521 case 0: /* void */
522 break;
523 case 1: /* uint32_t */
524 /*
525 * The result winds up "left-aligned" in the stack[0] slot.
526 * Note that libffi has an odd special case in that it will
527 * always widen an integral result to ffi_arg.
528 */
529 if (sizeof(ffi_arg) == 8) {
530 regs[TCG_REG_R0] = (uint32_t)stack[0];
531 } else {
532 regs[TCG_REG_R0] = *(uint32_t *)stack;
533 }
534 break;
535 case 2: /* uint64_t */
536 /*
537 * For TCG_TARGET_REG_BITS == 32, the register pair
538 * must stay in host memory order.
539 */
540 memcpy(&regs[TCG_REG_R0], stack, 8);
541 break;
542 case 3: /* Int128 */
543 memcpy(&regs[TCG_REG_R0], stack, 16);
544 break;
545 default:
546 g_assert_not_reached();
547 }
548 break;
549
550 case INDEX_op_br:
551 tci_args_l(insn, tb_ptr, &ptr);
552 tb_ptr = ptr;
553 continue;
554 case INDEX_op_setcond_i32:
555 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
556 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
557 break;
558 case INDEX_op_movcond_i32:
559 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
560 tmp32 = tci_compare32(regs[r1], regs[r2], condition);
561 regs[r0] = regs[tmp32 ? r3 : r4];
562 break;
563 #if TCG_TARGET_REG_BITS == 32
564 case INDEX_op_setcond2_i32:
565 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
566 T1 = tci_uint64(regs[r2], regs[r1]);
567 T2 = tci_uint64(regs[r4], regs[r3]);
568 regs[r0] = tci_compare64(T1, T2, condition);
569 break;
570 #elif TCG_TARGET_REG_BITS == 64
571 case INDEX_op_setcond_i64:
572 tci_args_rrrc(insn, &r0, &r1, &r2, &condition);
573 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
574 break;
575 case INDEX_op_movcond_i64:
576 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &condition);
577 tmp32 = tci_compare64(regs[r1], regs[r2], condition);
578 regs[r0] = regs[tmp32 ? r3 : r4];
579 break;
580 #endif
581 CASE_32_64(mov)
582 tci_args_rr(insn, &r0, &r1);
583 regs[r0] = regs[r1];
584 break;
585 case INDEX_op_tci_movi:
586 tci_args_ri(insn, &r0, &t1);
587 regs[r0] = t1;
588 break;
589 case INDEX_op_tci_movl:
590 tci_args_rl(insn, tb_ptr, &r0, &ptr);
591 regs[r0] = *(tcg_target_ulong *)ptr;
592 break;
593
594 /* Load/store operations (32 bit). */
595
596 CASE_32_64(ld8u)
597 tci_args_rrs(insn, &r0, &r1, &ofs);
598 ptr = (void *)(regs[r1] + ofs);
599 regs[r0] = *(uint8_t *)ptr;
600 break;
601 CASE_32_64(ld8s)
602 tci_args_rrs(insn, &r0, &r1, &ofs);
603 ptr = (void *)(regs[r1] + ofs);
604 regs[r0] = *(int8_t *)ptr;
605 break;
606 CASE_32_64(ld16u)
607 tci_args_rrs(insn, &r0, &r1, &ofs);
608 ptr = (void *)(regs[r1] + ofs);
609 regs[r0] = *(uint16_t *)ptr;
610 break;
611 CASE_32_64(ld16s)
612 tci_args_rrs(insn, &r0, &r1, &ofs);
613 ptr = (void *)(regs[r1] + ofs);
614 regs[r0] = *(int16_t *)ptr;
615 break;
616 case INDEX_op_ld_i32:
617 CASE_64(ld32u)
618 tci_args_rrs(insn, &r0, &r1, &ofs);
619 ptr = (void *)(regs[r1] + ofs);
620 regs[r0] = *(uint32_t *)ptr;
621 break;
622 CASE_32_64(st8)
623 tci_args_rrs(insn, &r0, &r1, &ofs);
624 ptr = (void *)(regs[r1] + ofs);
625 *(uint8_t *)ptr = regs[r0];
626 break;
627 CASE_32_64(st16)
628 tci_args_rrs(insn, &r0, &r1, &ofs);
629 ptr = (void *)(regs[r1] + ofs);
630 *(uint16_t *)ptr = regs[r0];
631 break;
632 case INDEX_op_st_i32:
633 CASE_64(st32)
634 tci_args_rrs(insn, &r0, &r1, &ofs);
635 ptr = (void *)(regs[r1] + ofs);
636 *(uint32_t *)ptr = regs[r0];
637 break;
638
639 /* Arithmetic operations (mixed 32/64 bit). */
640
641 CASE_32_64(add)
642 tci_args_rrr(insn, &r0, &r1, &r2);
643 regs[r0] = regs[r1] + regs[r2];
644 break;
645 CASE_32_64(sub)
646 tci_args_rrr(insn, &r0, &r1, &r2);
647 regs[r0] = regs[r1] - regs[r2];
648 break;
649 CASE_32_64(mul)
650 tci_args_rrr(insn, &r0, &r1, &r2);
651 regs[r0] = regs[r1] * regs[r2];
652 break;
653 CASE_32_64(and)
654 tci_args_rrr(insn, &r0, &r1, &r2);
655 regs[r0] = regs[r1] & regs[r2];
656 break;
657 CASE_32_64(or)
658 tci_args_rrr(insn, &r0, &r1, &r2);
659 regs[r0] = regs[r1] | regs[r2];
660 break;
661 CASE_32_64(xor)
662 tci_args_rrr(insn, &r0, &r1, &r2);
663 regs[r0] = regs[r1] ^ regs[r2];
664 break;
665 #if TCG_TARGET_HAS_andc_i32 || TCG_TARGET_HAS_andc_i64
666 CASE_32_64(andc)
667 tci_args_rrr(insn, &r0, &r1, &r2);
668 regs[r0] = regs[r1] & ~regs[r2];
669 break;
670 #endif
671 #if TCG_TARGET_HAS_orc_i32 || TCG_TARGET_HAS_orc_i64
672 CASE_32_64(orc)
673 tci_args_rrr(insn, &r0, &r1, &r2);
674 regs[r0] = regs[r1] | ~regs[r2];
675 break;
676 #endif
677 #if TCG_TARGET_HAS_eqv_i32 || TCG_TARGET_HAS_eqv_i64
678 CASE_32_64(eqv)
679 tci_args_rrr(insn, &r0, &r1, &r2);
680 regs[r0] = ~(regs[r1] ^ regs[r2]);
681 break;
682 #endif
683 #if TCG_TARGET_HAS_nand_i32 || TCG_TARGET_HAS_nand_i64
684 CASE_32_64(nand)
685 tci_args_rrr(insn, &r0, &r1, &r2);
686 regs[r0] = ~(regs[r1] & regs[r2]);
687 break;
688 #endif
689 #if TCG_TARGET_HAS_nor_i32 || TCG_TARGET_HAS_nor_i64
690 CASE_32_64(nor)
691 tci_args_rrr(insn, &r0, &r1, &r2);
692 regs[r0] = ~(regs[r1] | regs[r2]);
693 break;
694 #endif
695
696 /* Arithmetic operations (32 bit). */
697
698 case INDEX_op_div_i32:
699 tci_args_rrr(insn, &r0, &r1, &r2);
700 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
701 break;
702 case INDEX_op_divu_i32:
703 tci_args_rrr(insn, &r0, &r1, &r2);
704 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
705 break;
706 case INDEX_op_rem_i32:
707 tci_args_rrr(insn, &r0, &r1, &r2);
708 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
709 break;
710 case INDEX_op_remu_i32:
711 tci_args_rrr(insn, &r0, &r1, &r2);
712 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
713 break;
714 #if TCG_TARGET_HAS_clz_i32
715 case INDEX_op_clz_i32:
716 tci_args_rrr(insn, &r0, &r1, &r2);
717 tmp32 = regs[r1];
718 regs[r0] = tmp32 ? clz32(tmp32) : regs[r2];
719 break;
720 #endif
721 #if TCG_TARGET_HAS_ctz_i32
722 case INDEX_op_ctz_i32:
723 tci_args_rrr(insn, &r0, &r1, &r2);
724 tmp32 = regs[r1];
725 regs[r0] = tmp32 ? ctz32(tmp32) : regs[r2];
726 break;
727 #endif
728 #if TCG_TARGET_HAS_ctpop_i32
729 case INDEX_op_ctpop_i32:
730 tci_args_rr(insn, &r0, &r1);
731 regs[r0] = ctpop32(regs[r1]);
732 break;
733 #endif
734
735 /* Shift/rotate operations (32 bit). */
736
737 case INDEX_op_shl_i32:
738 tci_args_rrr(insn, &r0, &r1, &r2);
739 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
740 break;
741 case INDEX_op_shr_i32:
742 tci_args_rrr(insn, &r0, &r1, &r2);
743 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
744 break;
745 case INDEX_op_sar_i32:
746 tci_args_rrr(insn, &r0, &r1, &r2);
747 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
748 break;
749 #if TCG_TARGET_HAS_rot_i32
750 case INDEX_op_rotl_i32:
751 tci_args_rrr(insn, &r0, &r1, &r2);
752 regs[r0] = rol32(regs[r1], regs[r2] & 31);
753 break;
754 case INDEX_op_rotr_i32:
755 tci_args_rrr(insn, &r0, &r1, &r2);
756 regs[r0] = ror32(regs[r1], regs[r2] & 31);
757 break;
758 #endif
759 #if TCG_TARGET_HAS_deposit_i32
760 case INDEX_op_deposit_i32:
761 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
762 regs[r0] = deposit32(regs[r1], pos, len, regs[r2]);
763 break;
764 #endif
765 #if TCG_TARGET_HAS_extract_i32
766 case INDEX_op_extract_i32:
767 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
768 regs[r0] = extract32(regs[r1], pos, len);
769 break;
770 #endif
771 #if TCG_TARGET_HAS_sextract_i32
772 case INDEX_op_sextract_i32:
773 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
774 regs[r0] = sextract32(regs[r1], pos, len);
775 break;
776 #endif
777 case INDEX_op_brcond_i32:
778 tci_args_rl(insn, tb_ptr, &r0, &ptr);
779 if ((uint32_t)regs[r0]) {
780 tb_ptr = ptr;
781 }
782 break;
783 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_add2_i32
784 case INDEX_op_add2_i32:
785 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
786 T1 = tci_uint64(regs[r3], regs[r2]);
787 T2 = tci_uint64(regs[r5], regs[r4]);
788 tci_write_reg64(regs, r1, r0, T1 + T2);
789 break;
790 #endif
791 #if TCG_TARGET_REG_BITS == 32 || TCG_TARGET_HAS_sub2_i32
792 case INDEX_op_sub2_i32:
793 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
794 T1 = tci_uint64(regs[r3], regs[r2]);
795 T2 = tci_uint64(regs[r5], regs[r4]);
796 tci_write_reg64(regs, r1, r0, T1 - T2);
797 break;
798 #endif
799 #if TCG_TARGET_HAS_mulu2_i32
800 case INDEX_op_mulu2_i32:
801 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
802 tmp64 = (uint64_t)(uint32_t)regs[r2] * (uint32_t)regs[r3];
803 tci_write_reg64(regs, r1, r0, tmp64);
804 break;
805 #endif
806 #if TCG_TARGET_HAS_muls2_i32
807 case INDEX_op_muls2_i32:
808 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
809 tmp64 = (int64_t)(int32_t)regs[r2] * (int32_t)regs[r3];
810 tci_write_reg64(regs, r1, r0, tmp64);
811 break;
812 #endif
813 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
814 CASE_32_64(ext8s)
815 tci_args_rr(insn, &r0, &r1);
816 regs[r0] = (int8_t)regs[r1];
817 break;
818 #endif
819 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64 || \
820 TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
821 CASE_32_64(ext16s)
822 tci_args_rr(insn, &r0, &r1);
823 regs[r0] = (int16_t)regs[r1];
824 break;
825 #endif
826 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
827 CASE_32_64(ext8u)
828 tci_args_rr(insn, &r0, &r1);
829 regs[r0] = (uint8_t)regs[r1];
830 break;
831 #endif
832 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
833 CASE_32_64(ext16u)
834 tci_args_rr(insn, &r0, &r1);
835 regs[r0] = (uint16_t)regs[r1];
836 break;
837 #endif
838 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
839 CASE_32_64(bswap16)
840 tci_args_rr(insn, &r0, &r1);
841 regs[r0] = bswap16(regs[r1]);
842 break;
843 #endif
844 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
845 CASE_32_64(bswap32)
846 tci_args_rr(insn, &r0, &r1);
847 regs[r0] = bswap32(regs[r1]);
848 break;
849 #endif
850 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
851 CASE_32_64(not)
852 tci_args_rr(insn, &r0, &r1);
853 regs[r0] = ~regs[r1];
854 break;
855 #endif
856 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
857 CASE_32_64(neg)
858 tci_args_rr(insn, &r0, &r1);
859 regs[r0] = -regs[r1];
860 break;
861 #endif
862 #if TCG_TARGET_REG_BITS == 64
863 /* Load/store operations (64 bit). */
864
865 case INDEX_op_ld32s_i64:
866 tci_args_rrs(insn, &r0, &r1, &ofs);
867 ptr = (void *)(regs[r1] + ofs);
868 regs[r0] = *(int32_t *)ptr;
869 break;
870 case INDEX_op_ld_i64:
871 tci_args_rrs(insn, &r0, &r1, &ofs);
872 ptr = (void *)(regs[r1] + ofs);
873 regs[r0] = *(uint64_t *)ptr;
874 break;
875 case INDEX_op_st_i64:
876 tci_args_rrs(insn, &r0, &r1, &ofs);
877 ptr = (void *)(regs[r1] + ofs);
878 *(uint64_t *)ptr = regs[r0];
879 break;
880
881 /* Arithmetic operations (64 bit). */
882
883 case INDEX_op_div_i64:
884 tci_args_rrr(insn, &r0, &r1, &r2);
885 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
886 break;
887 case INDEX_op_divu_i64:
888 tci_args_rrr(insn, &r0, &r1, &r2);
889 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
890 break;
891 case INDEX_op_rem_i64:
892 tci_args_rrr(insn, &r0, &r1, &r2);
893 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
894 break;
895 case INDEX_op_remu_i64:
896 tci_args_rrr(insn, &r0, &r1, &r2);
897 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
898 break;
899 #if TCG_TARGET_HAS_clz_i64
900 case INDEX_op_clz_i64:
901 tci_args_rrr(insn, &r0, &r1, &r2);
902 regs[r0] = regs[r1] ? clz64(regs[r1]) : regs[r2];
903 break;
904 #endif
905 #if TCG_TARGET_HAS_ctz_i64
906 case INDEX_op_ctz_i64:
907 tci_args_rrr(insn, &r0, &r1, &r2);
908 regs[r0] = regs[r1] ? ctz64(regs[r1]) : regs[r2];
909 break;
910 #endif
911 #if TCG_TARGET_HAS_ctpop_i64
912 case INDEX_op_ctpop_i64:
913 tci_args_rr(insn, &r0, &r1);
914 regs[r0] = ctpop64(regs[r1]);
915 break;
916 #endif
917 #if TCG_TARGET_HAS_mulu2_i64
918 case INDEX_op_mulu2_i64:
919 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
920 mulu64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
921 break;
922 #endif
923 #if TCG_TARGET_HAS_muls2_i64
924 case INDEX_op_muls2_i64:
925 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
926 muls64(&regs[r0], &regs[r1], regs[r2], regs[r3]);
927 break;
928 #endif
929 #if TCG_TARGET_HAS_add2_i64
930 case INDEX_op_add2_i64:
931 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
932 T1 = regs[r2] + regs[r4];
933 T2 = regs[r3] + regs[r5] + (T1 < regs[r2]);
934 regs[r0] = T1;
935 regs[r1] = T2;
936 break;
937 #endif
938 #if TCG_TARGET_HAS_add2_i64
939 case INDEX_op_sub2_i64:
940 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
941 T1 = regs[r2] - regs[r4];
942 T2 = regs[r3] - regs[r5] - (regs[r2] < regs[r4]);
943 regs[r0] = T1;
944 regs[r1] = T2;
945 break;
946 #endif
947
948 /* Shift/rotate operations (64 bit). */
949
950 case INDEX_op_shl_i64:
951 tci_args_rrr(insn, &r0, &r1, &r2);
952 regs[r0] = regs[r1] << (regs[r2] & 63);
953 break;
954 case INDEX_op_shr_i64:
955 tci_args_rrr(insn, &r0, &r1, &r2);
956 regs[r0] = regs[r1] >> (regs[r2] & 63);
957 break;
958 case INDEX_op_sar_i64:
959 tci_args_rrr(insn, &r0, &r1, &r2);
960 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
961 break;
962 #if TCG_TARGET_HAS_rot_i64
963 case INDEX_op_rotl_i64:
964 tci_args_rrr(insn, &r0, &r1, &r2);
965 regs[r0] = rol64(regs[r1], regs[r2] & 63);
966 break;
967 case INDEX_op_rotr_i64:
968 tci_args_rrr(insn, &r0, &r1, &r2);
969 regs[r0] = ror64(regs[r1], regs[r2] & 63);
970 break;
971 #endif
972 #if TCG_TARGET_HAS_deposit_i64
973 case INDEX_op_deposit_i64:
974 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
975 regs[r0] = deposit64(regs[r1], pos, len, regs[r2]);
976 break;
977 #endif
978 #if TCG_TARGET_HAS_extract_i64
979 case INDEX_op_extract_i64:
980 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
981 regs[r0] = extract64(regs[r1], pos, len);
982 break;
983 #endif
984 #if TCG_TARGET_HAS_sextract_i64
985 case INDEX_op_sextract_i64:
986 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
987 regs[r0] = sextract64(regs[r1], pos, len);
988 break;
989 #endif
990 case INDEX_op_brcond_i64:
991 tci_args_rl(insn, tb_ptr, &r0, &ptr);
992 if (regs[r0]) {
993 tb_ptr = ptr;
994 }
995 break;
996 case INDEX_op_ext32s_i64:
997 case INDEX_op_ext_i32_i64:
998 tci_args_rr(insn, &r0, &r1);
999 regs[r0] = (int32_t)regs[r1];
1000 break;
1001 case INDEX_op_ext32u_i64:
1002 case INDEX_op_extu_i32_i64:
1003 tci_args_rr(insn, &r0, &r1);
1004 regs[r0] = (uint32_t)regs[r1];
1005 break;
1006 #if TCG_TARGET_HAS_bswap64_i64
1007 case INDEX_op_bswap64_i64:
1008 tci_args_rr(insn, &r0, &r1);
1009 regs[r0] = bswap64(regs[r1]);
1010 break;
1011 #endif
1012 #endif /* TCG_TARGET_REG_BITS == 64 */
1013
1014 /* QEMU specific operations. */
1015
1016 case INDEX_op_exit_tb:
1017 tci_args_l(insn, tb_ptr, &ptr);
1018 return (uintptr_t)ptr;
1019
1020 case INDEX_op_goto_tb:
1021 tci_args_l(insn, tb_ptr, &ptr);
1022 tb_ptr = *(void **)ptr;
1023 break;
1024
1025 case INDEX_op_goto_ptr:
1026 tci_args_r(insn, &r0);
1027 ptr = (void *)regs[r0];
1028 if (!ptr) {
1029 return 0;
1030 }
1031 tb_ptr = ptr;
1032 break;
1033
1034 case INDEX_op_qemu_ld_i32:
1035 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1036 tci_args_rrm(insn, &r0, &r1, &oi);
1037 taddr = regs[r1];
1038 } else {
1039 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1040 taddr = tci_uint64(regs[r2], regs[r1]);
1041 }
1042 tmp32 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1043 regs[r0] = tmp32;
1044 break;
1045
1046 case INDEX_op_qemu_ld_i64:
1047 if (TCG_TARGET_REG_BITS == 64) {
1048 tci_args_rrm(insn, &r0, &r1, &oi);
1049 taddr = regs[r1];
1050 } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1051 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1052 taddr = regs[r2];
1053 } else {
1054 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1055 taddr = tci_uint64(regs[r3], regs[r2]);
1056 oi = regs[r4];
1057 }
1058 tmp64 = tci_qemu_ld(env, taddr, oi, tb_ptr);
1059 if (TCG_TARGET_REG_BITS == 32) {
1060 tci_write_reg64(regs, r1, r0, tmp64);
1061 } else {
1062 regs[r0] = tmp64;
1063 }
1064 break;
1065
1066 case INDEX_op_qemu_st_i32:
1067 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1068 tci_args_rrm(insn, &r0, &r1, &oi);
1069 taddr = regs[r1];
1070 } else {
1071 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1072 taddr = tci_uint64(regs[r2], regs[r1]);
1073 }
1074 tmp32 = regs[r0];
1075 tci_qemu_st(env, taddr, tmp32, oi, tb_ptr);
1076 break;
1077
1078 case INDEX_op_qemu_st_i64:
1079 if (TCG_TARGET_REG_BITS == 64) {
1080 tci_args_rrm(insn, &r0, &r1, &oi);
1081 taddr = regs[r1];
1082 tmp64 = regs[r0];
1083 } else {
1084 if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
1085 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1086 taddr = regs[r2];
1087 } else {
1088 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1089 taddr = tci_uint64(regs[r3], regs[r2]);
1090 oi = regs[r4];
1091 }
1092 tmp64 = tci_uint64(regs[r1], regs[r0]);
1093 }
1094 tci_qemu_st(env, taddr, tmp64, oi, tb_ptr);
1095 break;
1096
1097 case INDEX_op_mb:
1098 /* Ensure ordering for all kinds */
1099 smp_mb();
1100 break;
1101 default:
1102 g_assert_not_reached();
1103 }
1104 }
1105 }
1106
1107 /*
1108 * Disassembler that matches the interpreter
1109 */
1110
1111 static const char *str_r(TCGReg r)
1112 {
1113 static const char regs[TCG_TARGET_NB_REGS][4] = {
1114 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1115 "r8", "r9", "r10", "r11", "r12", "r13", "env", "sp"
1116 };
1117
1118 QEMU_BUILD_BUG_ON(TCG_AREG0 != TCG_REG_R14);
1119 QEMU_BUILD_BUG_ON(TCG_REG_CALL_STACK != TCG_REG_R15);
1120
1121 assert((unsigned)r < TCG_TARGET_NB_REGS);
1122 return regs[r];
1123 }
1124
1125 static const char *str_c(TCGCond c)
1126 {
1127 static const char cond[16][8] = {
1128 [TCG_COND_NEVER] = "never",
1129 [TCG_COND_ALWAYS] = "always",
1130 [TCG_COND_EQ] = "eq",
1131 [TCG_COND_NE] = "ne",
1132 [TCG_COND_LT] = "lt",
1133 [TCG_COND_GE] = "ge",
1134 [TCG_COND_LE] = "le",
1135 [TCG_COND_GT] = "gt",
1136 [TCG_COND_LTU] = "ltu",
1137 [TCG_COND_GEU] = "geu",
1138 [TCG_COND_LEU] = "leu",
1139 [TCG_COND_GTU] = "gtu",
1140 };
1141
1142 assert((unsigned)c < ARRAY_SIZE(cond));
1143 assert(cond[c][0] != 0);
1144 return cond[c];
1145 }
1146
1147 /* Disassemble TCI bytecode. */
1148 int print_insn_tci(bfd_vma addr, disassemble_info *info)
1149 {
1150 const uint32_t *tb_ptr = (const void *)(uintptr_t)addr;
1151 const TCGOpDef *def;
1152 const char *op_name;
1153 uint32_t insn;
1154 TCGOpcode op;
1155 TCGReg r0, r1, r2, r3, r4, r5;
1156 tcg_target_ulong i1;
1157 int32_t s2;
1158 TCGCond c;
1159 MemOpIdx oi;
1160 uint8_t pos, len;
1161 void *ptr;
1162
1163 /* TCI is always the host, so we don't need to load indirect. */
1164 insn = *tb_ptr++;
1165
1166 info->fprintf_func(info->stream, "%08x ", insn);
1167
1168 op = extract32(insn, 0, 8);
1169 def = &tcg_op_defs[op];
1170 op_name = def->name;
1171
1172 switch (op) {
1173 case INDEX_op_br:
1174 case INDEX_op_exit_tb:
1175 case INDEX_op_goto_tb:
1176 tci_args_l(insn, tb_ptr, &ptr);
1177 info->fprintf_func(info->stream, "%-12s %p", op_name, ptr);
1178 break;
1179
1180 case INDEX_op_goto_ptr:
1181 tci_args_r(insn, &r0);
1182 info->fprintf_func(info->stream, "%-12s %s", op_name, str_r(r0));
1183 break;
1184
1185 case INDEX_op_call:
1186 tci_args_nl(insn, tb_ptr, &len, &ptr);
1187 info->fprintf_func(info->stream, "%-12s %d, %p", op_name, len, ptr);
1188 break;
1189
1190 case INDEX_op_brcond_i32:
1191 case INDEX_op_brcond_i64:
1192 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1193 info->fprintf_func(info->stream, "%-12s %s, 0, ne, %p",
1194 op_name, str_r(r0), ptr);
1195 break;
1196
1197 case INDEX_op_setcond_i32:
1198 case INDEX_op_setcond_i64:
1199 tci_args_rrrc(insn, &r0, &r1, &r2, &c);
1200 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1201 op_name, str_r(r0), str_r(r1), str_r(r2), str_c(c));
1202 break;
1203
1204 case INDEX_op_tci_movi:
1205 tci_args_ri(insn, &r0, &i1);
1206 info->fprintf_func(info->stream, "%-12s %s, 0x%" TCG_PRIlx,
1207 op_name, str_r(r0), i1);
1208 break;
1209
1210 case INDEX_op_tci_movl:
1211 tci_args_rl(insn, tb_ptr, &r0, &ptr);
1212 info->fprintf_func(info->stream, "%-12s %s, %p",
1213 op_name, str_r(r0), ptr);
1214 break;
1215
1216 case INDEX_op_ld8u_i32:
1217 case INDEX_op_ld8u_i64:
1218 case INDEX_op_ld8s_i32:
1219 case INDEX_op_ld8s_i64:
1220 case INDEX_op_ld16u_i32:
1221 case INDEX_op_ld16u_i64:
1222 case INDEX_op_ld16s_i32:
1223 case INDEX_op_ld16s_i64:
1224 case INDEX_op_ld32u_i64:
1225 case INDEX_op_ld32s_i64:
1226 case INDEX_op_ld_i32:
1227 case INDEX_op_ld_i64:
1228 case INDEX_op_st8_i32:
1229 case INDEX_op_st8_i64:
1230 case INDEX_op_st16_i32:
1231 case INDEX_op_st16_i64:
1232 case INDEX_op_st32_i64:
1233 case INDEX_op_st_i32:
1234 case INDEX_op_st_i64:
1235 tci_args_rrs(insn, &r0, &r1, &s2);
1236 info->fprintf_func(info->stream, "%-12s %s, %s, %d",
1237 op_name, str_r(r0), str_r(r1), s2);
1238 break;
1239
1240 case INDEX_op_mov_i32:
1241 case INDEX_op_mov_i64:
1242 case INDEX_op_ext8s_i32:
1243 case INDEX_op_ext8s_i64:
1244 case INDEX_op_ext8u_i32:
1245 case INDEX_op_ext8u_i64:
1246 case INDEX_op_ext16s_i32:
1247 case INDEX_op_ext16s_i64:
1248 case INDEX_op_ext16u_i32:
1249 case INDEX_op_ext32s_i64:
1250 case INDEX_op_ext32u_i64:
1251 case INDEX_op_ext_i32_i64:
1252 case INDEX_op_extu_i32_i64:
1253 case INDEX_op_bswap16_i32:
1254 case INDEX_op_bswap16_i64:
1255 case INDEX_op_bswap32_i32:
1256 case INDEX_op_bswap32_i64:
1257 case INDEX_op_bswap64_i64:
1258 case INDEX_op_not_i32:
1259 case INDEX_op_not_i64:
1260 case INDEX_op_neg_i32:
1261 case INDEX_op_neg_i64:
1262 case INDEX_op_ctpop_i32:
1263 case INDEX_op_ctpop_i64:
1264 tci_args_rr(insn, &r0, &r1);
1265 info->fprintf_func(info->stream, "%-12s %s, %s",
1266 op_name, str_r(r0), str_r(r1));
1267 break;
1268
1269 case INDEX_op_add_i32:
1270 case INDEX_op_add_i64:
1271 case INDEX_op_sub_i32:
1272 case INDEX_op_sub_i64:
1273 case INDEX_op_mul_i32:
1274 case INDEX_op_mul_i64:
1275 case INDEX_op_and_i32:
1276 case INDEX_op_and_i64:
1277 case INDEX_op_or_i32:
1278 case INDEX_op_or_i64:
1279 case INDEX_op_xor_i32:
1280 case INDEX_op_xor_i64:
1281 case INDEX_op_andc_i32:
1282 case INDEX_op_andc_i64:
1283 case INDEX_op_orc_i32:
1284 case INDEX_op_orc_i64:
1285 case INDEX_op_eqv_i32:
1286 case INDEX_op_eqv_i64:
1287 case INDEX_op_nand_i32:
1288 case INDEX_op_nand_i64:
1289 case INDEX_op_nor_i32:
1290 case INDEX_op_nor_i64:
1291 case INDEX_op_div_i32:
1292 case INDEX_op_div_i64:
1293 case INDEX_op_rem_i32:
1294 case INDEX_op_rem_i64:
1295 case INDEX_op_divu_i32:
1296 case INDEX_op_divu_i64:
1297 case INDEX_op_remu_i32:
1298 case INDEX_op_remu_i64:
1299 case INDEX_op_shl_i32:
1300 case INDEX_op_shl_i64:
1301 case INDEX_op_shr_i32:
1302 case INDEX_op_shr_i64:
1303 case INDEX_op_sar_i32:
1304 case INDEX_op_sar_i64:
1305 case INDEX_op_rotl_i32:
1306 case INDEX_op_rotl_i64:
1307 case INDEX_op_rotr_i32:
1308 case INDEX_op_rotr_i64:
1309 case INDEX_op_clz_i32:
1310 case INDEX_op_clz_i64:
1311 case INDEX_op_ctz_i32:
1312 case INDEX_op_ctz_i64:
1313 tci_args_rrr(insn, &r0, &r1, &r2);
1314 info->fprintf_func(info->stream, "%-12s %s, %s, %s",
1315 op_name, str_r(r0), str_r(r1), str_r(r2));
1316 break;
1317
1318 case INDEX_op_deposit_i32:
1319 case INDEX_op_deposit_i64:
1320 tci_args_rrrbb(insn, &r0, &r1, &r2, &pos, &len);
1321 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %d, %d",
1322 op_name, str_r(r0), str_r(r1), str_r(r2), pos, len);
1323 break;
1324
1325 case INDEX_op_extract_i32:
1326 case INDEX_op_extract_i64:
1327 case INDEX_op_sextract_i32:
1328 case INDEX_op_sextract_i64:
1329 tci_args_rrbb(insn, &r0, &r1, &pos, &len);
1330 info->fprintf_func(info->stream, "%-12s %s,%s,%d,%d",
1331 op_name, str_r(r0), str_r(r1), pos, len);
1332 break;
1333
1334 case INDEX_op_movcond_i32:
1335 case INDEX_op_movcond_i64:
1336 case INDEX_op_setcond2_i32:
1337 tci_args_rrrrrc(insn, &r0, &r1, &r2, &r3, &r4, &c);
1338 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1339 op_name, str_r(r0), str_r(r1), str_r(r2),
1340 str_r(r3), str_r(r4), str_c(c));
1341 break;
1342
1343 case INDEX_op_mulu2_i32:
1344 case INDEX_op_mulu2_i64:
1345 case INDEX_op_muls2_i32:
1346 case INDEX_op_muls2_i64:
1347 tci_args_rrrr(insn, &r0, &r1, &r2, &r3);
1348 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s",
1349 op_name, str_r(r0), str_r(r1),
1350 str_r(r2), str_r(r3));
1351 break;
1352
1353 case INDEX_op_add2_i32:
1354 case INDEX_op_add2_i64:
1355 case INDEX_op_sub2_i32:
1356 case INDEX_op_sub2_i64:
1357 tci_args_rrrrrr(insn, &r0, &r1, &r2, &r3, &r4, &r5);
1358 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s, %s",
1359 op_name, str_r(r0), str_r(r1), str_r(r2),
1360 str_r(r3), str_r(r4), str_r(r5));
1361 break;
1362
1363 case INDEX_op_qemu_ld_i64:
1364 case INDEX_op_qemu_st_i64:
1365 len = DIV_ROUND_UP(64, TCG_TARGET_REG_BITS);
1366 goto do_qemu_ldst;
1367 case INDEX_op_qemu_ld_i32:
1368 case INDEX_op_qemu_st_i32:
1369 len = 1;
1370 do_qemu_ldst:
1371 len += DIV_ROUND_UP(TARGET_LONG_BITS, TCG_TARGET_REG_BITS);
1372 switch (len) {
1373 case 2:
1374 tci_args_rrm(insn, &r0, &r1, &oi);
1375 info->fprintf_func(info->stream, "%-12s %s, %s, %x",
1376 op_name, str_r(r0), str_r(r1), oi);
1377 break;
1378 case 3:
1379 tci_args_rrrm(insn, &r0, &r1, &r2, &oi);
1380 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %x",
1381 op_name, str_r(r0), str_r(r1), str_r(r2), oi);
1382 break;
1383 case 4:
1384 tci_args_rrrrr(insn, &r0, &r1, &r2, &r3, &r4);
1385 info->fprintf_func(info->stream, "%-12s %s, %s, %s, %s, %s",
1386 op_name, str_r(r0), str_r(r1),
1387 str_r(r2), str_r(r3), str_r(r4));
1388 break;
1389 default:
1390 g_assert_not_reached();
1391 }
1392 break;
1393
1394 case 0:
1395 /* tcg_out_nop_fill uses zeros */
1396 if (insn == 0) {
1397 info->fprintf_func(info->stream, "align");
1398 break;
1399 }
1400 /* fall through */
1401
1402 default:
1403 info->fprintf_func(info->stream, "illegal opcode %d", op);
1404 break;
1405 }
1406
1407 return sizeof(insn);
1408 }