]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
tcg/tci: Split out tci_args_l
[mirror_qemu.git] / tcg / tci.c
1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 /* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
23 * Without assertions, the interpreter runs much faster. */
24 #if defined(CONFIG_DEBUG_TCG)
25 # define tci_assert(cond) assert(cond)
26 #else
27 # define tci_assert(cond) ((void)0)
28 #endif
29
30 #include "qemu-common.h"
31 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
32 #include "exec/cpu_ldst.h"
33 #include "tcg/tcg-op.h"
34 #include "qemu/compiler.h"
35
36 #if MAX_OPC_PARAM_IARGS != 6
37 # error Fix needed, number of supported input arguments changed!
38 #endif
39 #if TCG_TARGET_REG_BITS == 32
40 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
41 tcg_target_ulong, tcg_target_ulong,
42 tcg_target_ulong, tcg_target_ulong,
43 tcg_target_ulong, tcg_target_ulong,
44 tcg_target_ulong, tcg_target_ulong,
45 tcg_target_ulong, tcg_target_ulong);
46 #else
47 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
48 tcg_target_ulong, tcg_target_ulong,
49 tcg_target_ulong, tcg_target_ulong);
50 #endif
51
52 __thread uintptr_t tci_tb_ptr;
53
54 static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
55 {
56 tci_assert(index < TCG_TARGET_NB_REGS);
57 return regs[index];
58 }
59
60 static void
61 tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
62 {
63 tci_assert(index < TCG_TARGET_NB_REGS);
64 tci_assert(index != TCG_AREG0);
65 tci_assert(index != TCG_REG_CALL_STACK);
66 regs[index] = value;
67 }
68
69 #if TCG_TARGET_REG_BITS == 32
70 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
71 uint32_t low_index, uint64_t value)
72 {
73 tci_write_reg(regs, low_index, value);
74 tci_write_reg(regs, high_index, value >> 32);
75 }
76 #endif
77
78 #if TCG_TARGET_REG_BITS == 32
79 /* Create a 64 bit value from two 32 bit values. */
80 static uint64_t tci_uint64(uint32_t high, uint32_t low)
81 {
82 return ((uint64_t)high << 32) + low;
83 }
84 #endif
85
86 /* Read constant byte from bytecode. */
87 static uint8_t tci_read_b(const uint8_t **tb_ptr)
88 {
89 return *(tb_ptr[0]++);
90 }
91
92 /* Read register number from bytecode. */
93 static TCGReg tci_read_r(const uint8_t **tb_ptr)
94 {
95 uint8_t regno = tci_read_b(tb_ptr);
96 tci_assert(regno < TCG_TARGET_NB_REGS);
97 return regno;
98 }
99
100 /* Read constant (native size) from bytecode. */
101 static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
102 {
103 tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
104 *tb_ptr += sizeof(value);
105 return value;
106 }
107
108 /* Read unsigned constant (32 bit) from bytecode. */
109 static uint32_t tci_read_i32(const uint8_t **tb_ptr)
110 {
111 uint32_t value = *(const uint32_t *)(*tb_ptr);
112 *tb_ptr += sizeof(value);
113 return value;
114 }
115
116 /* Read signed constant (32 bit) from bytecode. */
117 static int32_t tci_read_s32(const uint8_t **tb_ptr)
118 {
119 int32_t value = *(const int32_t *)(*tb_ptr);
120 *tb_ptr += sizeof(value);
121 return value;
122 }
123
124 #if TCG_TARGET_REG_BITS == 64
125 /* Read constant (64 bit) from bytecode. */
126 static uint64_t tci_read_i64(const uint8_t **tb_ptr)
127 {
128 uint64_t value = *(const uint64_t *)(*tb_ptr);
129 *tb_ptr += sizeof(value);
130 return value;
131 }
132 #endif
133
134 /* Read indexed register (native size) from bytecode. */
135 static tcg_target_ulong
136 tci_read_rval(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
137 {
138 tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
139 *tb_ptr += 1;
140 return value;
141 }
142
143 #if TCG_TARGET_REG_BITS == 32
144 /* Read two indexed registers (2 * 32 bit) from bytecode. */
145 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
146 const uint8_t **tb_ptr)
147 {
148 uint32_t low = tci_read_rval(regs, tb_ptr);
149 return tci_uint64(tci_read_rval(regs, tb_ptr), low);
150 }
151 #elif TCG_TARGET_REG_BITS == 64
152 /* Read indexed register (64 bit) from bytecode. */
153 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
154 const uint8_t **tb_ptr)
155 {
156 return tci_read_rval(regs, tb_ptr);
157 }
158 #endif
159
160 /* Read indexed register(s) with target address from bytecode. */
161 static target_ulong
162 tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
163 {
164 target_ulong taddr = tci_read_rval(regs, tb_ptr);
165 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
166 taddr += (uint64_t)tci_read_rval(regs, tb_ptr) << 32;
167 #endif
168 return taddr;
169 }
170
171 static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
172 {
173 tcg_target_ulong label = tci_read_i(tb_ptr);
174 tci_assert(label != 0);
175 return label;
176 }
177
178 /*
179 * Load sets of arguments all at once. The naming convention is:
180 * tci_args_<arguments>
181 * where arguments is a sequence of
182 *
183 * c = condition (TCGCond)
184 * l = label or pointer
185 * r = register
186 * s = signed ldst offset
187 */
188
189 static void tci_args_l(const uint8_t **tb_ptr, void **l0)
190 {
191 *l0 = (void *)tci_read_label(tb_ptr);
192 }
193
194 static void tci_args_rr(const uint8_t **tb_ptr,
195 TCGReg *r0, TCGReg *r1)
196 {
197 *r0 = tci_read_r(tb_ptr);
198 *r1 = tci_read_r(tb_ptr);
199 }
200
201 static void tci_args_rrr(const uint8_t **tb_ptr,
202 TCGReg *r0, TCGReg *r1, TCGReg *r2)
203 {
204 *r0 = tci_read_r(tb_ptr);
205 *r1 = tci_read_r(tb_ptr);
206 *r2 = tci_read_r(tb_ptr);
207 }
208
209 static void tci_args_rrs(const uint8_t **tb_ptr,
210 TCGReg *r0, TCGReg *r1, int32_t *i2)
211 {
212 *r0 = tci_read_r(tb_ptr);
213 *r1 = tci_read_r(tb_ptr);
214 *i2 = tci_read_s32(tb_ptr);
215 }
216
217 static void tci_args_rrrc(const uint8_t **tb_ptr,
218 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
219 {
220 *r0 = tci_read_r(tb_ptr);
221 *r1 = tci_read_r(tb_ptr);
222 *r2 = tci_read_r(tb_ptr);
223 *c3 = tci_read_b(tb_ptr);
224 }
225
226 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
227 {
228 bool result = false;
229 int32_t i0 = u0;
230 int32_t i1 = u1;
231 switch (condition) {
232 case TCG_COND_EQ:
233 result = (u0 == u1);
234 break;
235 case TCG_COND_NE:
236 result = (u0 != u1);
237 break;
238 case TCG_COND_LT:
239 result = (i0 < i1);
240 break;
241 case TCG_COND_GE:
242 result = (i0 >= i1);
243 break;
244 case TCG_COND_LE:
245 result = (i0 <= i1);
246 break;
247 case TCG_COND_GT:
248 result = (i0 > i1);
249 break;
250 case TCG_COND_LTU:
251 result = (u0 < u1);
252 break;
253 case TCG_COND_GEU:
254 result = (u0 >= u1);
255 break;
256 case TCG_COND_LEU:
257 result = (u0 <= u1);
258 break;
259 case TCG_COND_GTU:
260 result = (u0 > u1);
261 break;
262 default:
263 g_assert_not_reached();
264 }
265 return result;
266 }
267
268 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
269 {
270 bool result = false;
271 int64_t i0 = u0;
272 int64_t i1 = u1;
273 switch (condition) {
274 case TCG_COND_EQ:
275 result = (u0 == u1);
276 break;
277 case TCG_COND_NE:
278 result = (u0 != u1);
279 break;
280 case TCG_COND_LT:
281 result = (i0 < i1);
282 break;
283 case TCG_COND_GE:
284 result = (i0 >= i1);
285 break;
286 case TCG_COND_LE:
287 result = (i0 <= i1);
288 break;
289 case TCG_COND_GT:
290 result = (i0 > i1);
291 break;
292 case TCG_COND_LTU:
293 result = (u0 < u1);
294 break;
295 case TCG_COND_GEU:
296 result = (u0 >= u1);
297 break;
298 case TCG_COND_LEU:
299 result = (u0 <= u1);
300 break;
301 case TCG_COND_GTU:
302 result = (u0 > u1);
303 break;
304 default:
305 g_assert_not_reached();
306 }
307 return result;
308 }
309
310 #define qemu_ld_ub \
311 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
312 #define qemu_ld_leuw \
313 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
314 #define qemu_ld_leul \
315 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
316 #define qemu_ld_leq \
317 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
318 #define qemu_ld_beuw \
319 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
320 #define qemu_ld_beul \
321 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
322 #define qemu_ld_beq \
323 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
324 #define qemu_st_b(X) \
325 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
326 #define qemu_st_lew(X) \
327 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
328 #define qemu_st_lel(X) \
329 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
330 #define qemu_st_leq(X) \
331 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
332 #define qemu_st_bew(X) \
333 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
334 #define qemu_st_bel(X) \
335 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
336 #define qemu_st_beq(X) \
337 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
338
339 #if TCG_TARGET_REG_BITS == 64
340 # define CASE_32_64(x) \
341 case glue(glue(INDEX_op_, x), _i64): \
342 case glue(glue(INDEX_op_, x), _i32):
343 # define CASE_64(x) \
344 case glue(glue(INDEX_op_, x), _i64):
345 #else
346 # define CASE_32_64(x) \
347 case glue(glue(INDEX_op_, x), _i32):
348 # define CASE_64(x)
349 #endif
350
351 /* Interpret pseudo code in tb. */
352 /*
353 * Disable CFI checks.
354 * One possible operation in the pseudo code is a call to binary code.
355 * Therefore, disable CFI checks in the interpreter function
356 */
357 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
358 const void *v_tb_ptr)
359 {
360 const uint8_t *tb_ptr = v_tb_ptr;
361 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
362 long tcg_temps[CPU_TEMP_BUF_NLONGS];
363 uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
364 uintptr_t ret = 0;
365
366 regs[TCG_AREG0] = (tcg_target_ulong)env;
367 regs[TCG_REG_CALL_STACK] = sp_value;
368 tci_assert(tb_ptr);
369
370 for (;;) {
371 TCGOpcode opc = tb_ptr[0];
372 #if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
373 uint8_t op_size = tb_ptr[1];
374 const uint8_t *old_code_ptr = tb_ptr;
375 #endif
376 TCGReg r0, r1, r2;
377 tcg_target_ulong t0;
378 tcg_target_ulong t1;
379 tcg_target_ulong t2;
380 tcg_target_ulong label;
381 TCGCond condition;
382 target_ulong taddr;
383 uint8_t tmp8;
384 uint16_t tmp16;
385 uint32_t tmp32;
386 uint64_t tmp64;
387 #if TCG_TARGET_REG_BITS == 32
388 uint64_t v64;
389 #endif
390 TCGMemOpIdx oi;
391 int32_t ofs;
392 void *ptr;
393
394 /* Skip opcode and size entry. */
395 tb_ptr += 2;
396
397 switch (opc) {
398 case INDEX_op_call:
399 t0 = tci_read_i(&tb_ptr);
400 tci_tb_ptr = (uintptr_t)tb_ptr;
401 #if TCG_TARGET_REG_BITS == 32
402 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
403 tci_read_reg(regs, TCG_REG_R1),
404 tci_read_reg(regs, TCG_REG_R2),
405 tci_read_reg(regs, TCG_REG_R3),
406 tci_read_reg(regs, TCG_REG_R4),
407 tci_read_reg(regs, TCG_REG_R5),
408 tci_read_reg(regs, TCG_REG_R6),
409 tci_read_reg(regs, TCG_REG_R7),
410 tci_read_reg(regs, TCG_REG_R8),
411 tci_read_reg(regs, TCG_REG_R9),
412 tci_read_reg(regs, TCG_REG_R10),
413 tci_read_reg(regs, TCG_REG_R11));
414 tci_write_reg(regs, TCG_REG_R0, tmp64);
415 tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
416 #else
417 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
418 tci_read_reg(regs, TCG_REG_R1),
419 tci_read_reg(regs, TCG_REG_R2),
420 tci_read_reg(regs, TCG_REG_R3),
421 tci_read_reg(regs, TCG_REG_R4),
422 tci_read_reg(regs, TCG_REG_R5));
423 tci_write_reg(regs, TCG_REG_R0, tmp64);
424 #endif
425 break;
426 case INDEX_op_br:
427 tci_args_l(&tb_ptr, &ptr);
428 tci_assert(tb_ptr == old_code_ptr + op_size);
429 tb_ptr = ptr;
430 continue;
431 case INDEX_op_setcond_i32:
432 tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
433 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
434 break;
435 #if TCG_TARGET_REG_BITS == 32
436 case INDEX_op_setcond2_i32:
437 t0 = *tb_ptr++;
438 tmp64 = tci_read_r64(regs, &tb_ptr);
439 v64 = tci_read_r64(regs, &tb_ptr);
440 condition = *tb_ptr++;
441 tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
442 break;
443 #elif TCG_TARGET_REG_BITS == 64
444 case INDEX_op_setcond_i64:
445 tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
446 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
447 break;
448 #endif
449 CASE_32_64(mov)
450 tci_args_rr(&tb_ptr, &r0, &r1);
451 regs[r0] = regs[r1];
452 break;
453 case INDEX_op_tci_movi_i32:
454 t0 = *tb_ptr++;
455 t1 = tci_read_i32(&tb_ptr);
456 tci_write_reg(regs, t0, t1);
457 break;
458
459 /* Load/store operations (32 bit). */
460
461 CASE_32_64(ld8u)
462 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
463 ptr = (void *)(regs[r1] + ofs);
464 regs[r0] = *(uint8_t *)ptr;
465 break;
466 CASE_32_64(ld8s)
467 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
468 ptr = (void *)(regs[r1] + ofs);
469 regs[r0] = *(int8_t *)ptr;
470 break;
471 CASE_32_64(ld16u)
472 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
473 ptr = (void *)(regs[r1] + ofs);
474 regs[r0] = *(uint16_t *)ptr;
475 break;
476 CASE_32_64(ld16s)
477 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
478 ptr = (void *)(regs[r1] + ofs);
479 regs[r0] = *(int16_t *)ptr;
480 break;
481 case INDEX_op_ld_i32:
482 CASE_64(ld32u)
483 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
484 ptr = (void *)(regs[r1] + ofs);
485 regs[r0] = *(uint32_t *)ptr;
486 break;
487 CASE_32_64(st8)
488 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
489 ptr = (void *)(regs[r1] + ofs);
490 *(uint8_t *)ptr = regs[r0];
491 break;
492 CASE_32_64(st16)
493 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
494 ptr = (void *)(regs[r1] + ofs);
495 *(uint16_t *)ptr = regs[r0];
496 break;
497 case INDEX_op_st_i32:
498 CASE_64(st32)
499 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
500 ptr = (void *)(regs[r1] + ofs);
501 *(uint32_t *)ptr = regs[r0];
502 break;
503
504 /* Arithmetic operations (mixed 32/64 bit). */
505
506 CASE_32_64(add)
507 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
508 regs[r0] = regs[r1] + regs[r2];
509 break;
510 CASE_32_64(sub)
511 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
512 regs[r0] = regs[r1] - regs[r2];
513 break;
514 CASE_32_64(mul)
515 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
516 regs[r0] = regs[r1] * regs[r2];
517 break;
518 CASE_32_64(and)
519 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
520 regs[r0] = regs[r1] & regs[r2];
521 break;
522 CASE_32_64(or)
523 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
524 regs[r0] = regs[r1] | regs[r2];
525 break;
526 CASE_32_64(xor)
527 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
528 regs[r0] = regs[r1] ^ regs[r2];
529 break;
530
531 /* Arithmetic operations (32 bit). */
532
533 case INDEX_op_div_i32:
534 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
535 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
536 break;
537 case INDEX_op_divu_i32:
538 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
539 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
540 break;
541 case INDEX_op_rem_i32:
542 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
543 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
544 break;
545 case INDEX_op_remu_i32:
546 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
547 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
548 break;
549
550 /* Shift/rotate operations (32 bit). */
551
552 case INDEX_op_shl_i32:
553 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
554 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
555 break;
556 case INDEX_op_shr_i32:
557 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
558 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
559 break;
560 case INDEX_op_sar_i32:
561 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
562 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
563 break;
564 #if TCG_TARGET_HAS_rot_i32
565 case INDEX_op_rotl_i32:
566 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
567 regs[r0] = rol32(regs[r1], regs[r2] & 31);
568 break;
569 case INDEX_op_rotr_i32:
570 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
571 regs[r0] = ror32(regs[r1], regs[r2] & 31);
572 break;
573 #endif
574 #if TCG_TARGET_HAS_deposit_i32
575 case INDEX_op_deposit_i32:
576 t0 = *tb_ptr++;
577 t1 = tci_read_rval(regs, &tb_ptr);
578 t2 = tci_read_rval(regs, &tb_ptr);
579 tmp16 = *tb_ptr++;
580 tmp8 = *tb_ptr++;
581 tmp32 = (((1 << tmp8) - 1) << tmp16);
582 tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
583 break;
584 #endif
585 case INDEX_op_brcond_i32:
586 t0 = tci_read_rval(regs, &tb_ptr);
587 t1 = tci_read_rval(regs, &tb_ptr);
588 condition = *tb_ptr++;
589 label = tci_read_label(&tb_ptr);
590 if (tci_compare32(t0, t1, condition)) {
591 tci_assert(tb_ptr == old_code_ptr + op_size);
592 tb_ptr = (uint8_t *)label;
593 continue;
594 }
595 break;
596 #if TCG_TARGET_REG_BITS == 32
597 case INDEX_op_add2_i32:
598 t0 = *tb_ptr++;
599 t1 = *tb_ptr++;
600 tmp64 = tci_read_r64(regs, &tb_ptr);
601 tmp64 += tci_read_r64(regs, &tb_ptr);
602 tci_write_reg64(regs, t1, t0, tmp64);
603 break;
604 case INDEX_op_sub2_i32:
605 t0 = *tb_ptr++;
606 t1 = *tb_ptr++;
607 tmp64 = tci_read_r64(regs, &tb_ptr);
608 tmp64 -= tci_read_r64(regs, &tb_ptr);
609 tci_write_reg64(regs, t1, t0, tmp64);
610 break;
611 case INDEX_op_brcond2_i32:
612 tmp64 = tci_read_r64(regs, &tb_ptr);
613 v64 = tci_read_r64(regs, &tb_ptr);
614 condition = *tb_ptr++;
615 label = tci_read_label(&tb_ptr);
616 if (tci_compare64(tmp64, v64, condition)) {
617 tci_assert(tb_ptr == old_code_ptr + op_size);
618 tb_ptr = (uint8_t *)label;
619 continue;
620 }
621 break;
622 case INDEX_op_mulu2_i32:
623 t0 = *tb_ptr++;
624 t1 = *tb_ptr++;
625 t2 = tci_read_rval(regs, &tb_ptr);
626 tmp64 = (uint32_t)tci_read_rval(regs, &tb_ptr);
627 tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
628 break;
629 #endif /* TCG_TARGET_REG_BITS == 32 */
630 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
631 CASE_32_64(ext8s)
632 tci_args_rr(&tb_ptr, &r0, &r1);
633 regs[r0] = (int8_t)regs[r1];
634 break;
635 #endif
636 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
637 CASE_32_64(ext16s)
638 tci_args_rr(&tb_ptr, &r0, &r1);
639 regs[r0] = (int16_t)regs[r1];
640 break;
641 #endif
642 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
643 CASE_32_64(ext8u)
644 tci_args_rr(&tb_ptr, &r0, &r1);
645 regs[r0] = (uint8_t)regs[r1];
646 break;
647 #endif
648 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
649 CASE_32_64(ext16u)
650 tci_args_rr(&tb_ptr, &r0, &r1);
651 regs[r0] = (uint16_t)regs[r1];
652 break;
653 #endif
654 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
655 CASE_32_64(bswap16)
656 tci_args_rr(&tb_ptr, &r0, &r1);
657 regs[r0] = bswap16(regs[r1]);
658 break;
659 #endif
660 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
661 CASE_32_64(bswap32)
662 tci_args_rr(&tb_ptr, &r0, &r1);
663 regs[r0] = bswap32(regs[r1]);
664 break;
665 #endif
666 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
667 CASE_32_64(not)
668 tci_args_rr(&tb_ptr, &r0, &r1);
669 regs[r0] = ~regs[r1];
670 break;
671 #endif
672 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
673 CASE_32_64(neg)
674 tci_args_rr(&tb_ptr, &r0, &r1);
675 regs[r0] = -regs[r1];
676 break;
677 #endif
678 #if TCG_TARGET_REG_BITS == 64
679 case INDEX_op_tci_movi_i64:
680 t0 = *tb_ptr++;
681 t1 = tci_read_i64(&tb_ptr);
682 tci_write_reg(regs, t0, t1);
683 break;
684
685 /* Load/store operations (64 bit). */
686
687 case INDEX_op_ld32s_i64:
688 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
689 ptr = (void *)(regs[r1] + ofs);
690 regs[r0] = *(int32_t *)ptr;
691 break;
692 case INDEX_op_ld_i64:
693 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
694 ptr = (void *)(regs[r1] + ofs);
695 regs[r0] = *(uint64_t *)ptr;
696 break;
697 case INDEX_op_st_i64:
698 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
699 ptr = (void *)(regs[r1] + ofs);
700 *(uint64_t *)ptr = regs[r0];
701 break;
702
703 /* Arithmetic operations (64 bit). */
704
705 case INDEX_op_div_i64:
706 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
707 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
708 break;
709 case INDEX_op_divu_i64:
710 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
711 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
712 break;
713 case INDEX_op_rem_i64:
714 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
715 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
716 break;
717 case INDEX_op_remu_i64:
718 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
719 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
720 break;
721
722 /* Shift/rotate operations (64 bit). */
723
724 case INDEX_op_shl_i64:
725 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
726 regs[r0] = regs[r1] << (regs[r2] & 63);
727 break;
728 case INDEX_op_shr_i64:
729 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
730 regs[r0] = regs[r1] >> (regs[r2] & 63);
731 break;
732 case INDEX_op_sar_i64:
733 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
734 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
735 break;
736 #if TCG_TARGET_HAS_rot_i64
737 case INDEX_op_rotl_i64:
738 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
739 regs[r0] = rol64(regs[r1], regs[r2] & 63);
740 break;
741 case INDEX_op_rotr_i64:
742 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
743 regs[r0] = ror64(regs[r1], regs[r2] & 63);
744 break;
745 #endif
746 #if TCG_TARGET_HAS_deposit_i64
747 case INDEX_op_deposit_i64:
748 t0 = *tb_ptr++;
749 t1 = tci_read_rval(regs, &tb_ptr);
750 t2 = tci_read_rval(regs, &tb_ptr);
751 tmp16 = *tb_ptr++;
752 tmp8 = *tb_ptr++;
753 tmp64 = (((1ULL << tmp8) - 1) << tmp16);
754 tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
755 break;
756 #endif
757 case INDEX_op_brcond_i64:
758 t0 = tci_read_rval(regs, &tb_ptr);
759 t1 = tci_read_rval(regs, &tb_ptr);
760 condition = *tb_ptr++;
761 label = tci_read_label(&tb_ptr);
762 if (tci_compare64(t0, t1, condition)) {
763 tci_assert(tb_ptr == old_code_ptr + op_size);
764 tb_ptr = (uint8_t *)label;
765 continue;
766 }
767 break;
768 case INDEX_op_ext32s_i64:
769 case INDEX_op_ext_i32_i64:
770 tci_args_rr(&tb_ptr, &r0, &r1);
771 regs[r0] = (int32_t)regs[r1];
772 break;
773 case INDEX_op_ext32u_i64:
774 case INDEX_op_extu_i32_i64:
775 tci_args_rr(&tb_ptr, &r0, &r1);
776 regs[r0] = (uint32_t)regs[r1];
777 break;
778 #if TCG_TARGET_HAS_bswap64_i64
779 case INDEX_op_bswap64_i64:
780 tci_args_rr(&tb_ptr, &r0, &r1);
781 regs[r0] = bswap64(regs[r1]);
782 break;
783 #endif
784 #endif /* TCG_TARGET_REG_BITS == 64 */
785
786 /* QEMU specific operations. */
787
788 case INDEX_op_exit_tb:
789 ret = *(uint64_t *)tb_ptr;
790 goto exit;
791 break;
792 case INDEX_op_goto_tb:
793 /* Jump address is aligned */
794 tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
795 t0 = qatomic_read((int32_t *)tb_ptr);
796 tb_ptr += sizeof(int32_t);
797 tci_assert(tb_ptr == old_code_ptr + op_size);
798 tb_ptr += (int32_t)t0;
799 continue;
800 case INDEX_op_qemu_ld_i32:
801 t0 = *tb_ptr++;
802 taddr = tci_read_ulong(regs, &tb_ptr);
803 oi = tci_read_i(&tb_ptr);
804 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
805 case MO_UB:
806 tmp32 = qemu_ld_ub;
807 break;
808 case MO_SB:
809 tmp32 = (int8_t)qemu_ld_ub;
810 break;
811 case MO_LEUW:
812 tmp32 = qemu_ld_leuw;
813 break;
814 case MO_LESW:
815 tmp32 = (int16_t)qemu_ld_leuw;
816 break;
817 case MO_LEUL:
818 tmp32 = qemu_ld_leul;
819 break;
820 case MO_BEUW:
821 tmp32 = qemu_ld_beuw;
822 break;
823 case MO_BESW:
824 tmp32 = (int16_t)qemu_ld_beuw;
825 break;
826 case MO_BEUL:
827 tmp32 = qemu_ld_beul;
828 break;
829 default:
830 g_assert_not_reached();
831 }
832 tci_write_reg(regs, t0, tmp32);
833 break;
834 case INDEX_op_qemu_ld_i64:
835 t0 = *tb_ptr++;
836 if (TCG_TARGET_REG_BITS == 32) {
837 t1 = *tb_ptr++;
838 }
839 taddr = tci_read_ulong(regs, &tb_ptr);
840 oi = tci_read_i(&tb_ptr);
841 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
842 case MO_UB:
843 tmp64 = qemu_ld_ub;
844 break;
845 case MO_SB:
846 tmp64 = (int8_t)qemu_ld_ub;
847 break;
848 case MO_LEUW:
849 tmp64 = qemu_ld_leuw;
850 break;
851 case MO_LESW:
852 tmp64 = (int16_t)qemu_ld_leuw;
853 break;
854 case MO_LEUL:
855 tmp64 = qemu_ld_leul;
856 break;
857 case MO_LESL:
858 tmp64 = (int32_t)qemu_ld_leul;
859 break;
860 case MO_LEQ:
861 tmp64 = qemu_ld_leq;
862 break;
863 case MO_BEUW:
864 tmp64 = qemu_ld_beuw;
865 break;
866 case MO_BESW:
867 tmp64 = (int16_t)qemu_ld_beuw;
868 break;
869 case MO_BEUL:
870 tmp64 = qemu_ld_beul;
871 break;
872 case MO_BESL:
873 tmp64 = (int32_t)qemu_ld_beul;
874 break;
875 case MO_BEQ:
876 tmp64 = qemu_ld_beq;
877 break;
878 default:
879 g_assert_not_reached();
880 }
881 tci_write_reg(regs, t0, tmp64);
882 if (TCG_TARGET_REG_BITS == 32) {
883 tci_write_reg(regs, t1, tmp64 >> 32);
884 }
885 break;
886 case INDEX_op_qemu_st_i32:
887 t0 = tci_read_rval(regs, &tb_ptr);
888 taddr = tci_read_ulong(regs, &tb_ptr);
889 oi = tci_read_i(&tb_ptr);
890 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
891 case MO_UB:
892 qemu_st_b(t0);
893 break;
894 case MO_LEUW:
895 qemu_st_lew(t0);
896 break;
897 case MO_LEUL:
898 qemu_st_lel(t0);
899 break;
900 case MO_BEUW:
901 qemu_st_bew(t0);
902 break;
903 case MO_BEUL:
904 qemu_st_bel(t0);
905 break;
906 default:
907 g_assert_not_reached();
908 }
909 break;
910 case INDEX_op_qemu_st_i64:
911 tmp64 = tci_read_r64(regs, &tb_ptr);
912 taddr = tci_read_ulong(regs, &tb_ptr);
913 oi = tci_read_i(&tb_ptr);
914 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
915 case MO_UB:
916 qemu_st_b(tmp64);
917 break;
918 case MO_LEUW:
919 qemu_st_lew(tmp64);
920 break;
921 case MO_LEUL:
922 qemu_st_lel(tmp64);
923 break;
924 case MO_LEQ:
925 qemu_st_leq(tmp64);
926 break;
927 case MO_BEUW:
928 qemu_st_bew(tmp64);
929 break;
930 case MO_BEUL:
931 qemu_st_bel(tmp64);
932 break;
933 case MO_BEQ:
934 qemu_st_beq(tmp64);
935 break;
936 default:
937 g_assert_not_reached();
938 }
939 break;
940 case INDEX_op_mb:
941 /* Ensure ordering for all kinds */
942 smp_mb();
943 break;
944 default:
945 g_assert_not_reached();
946 }
947 tci_assert(tb_ptr == old_code_ptr + op_size);
948 }
949 exit:
950 return ret;
951 }