]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
tcg/tci: Split out tci_args_rrr
[mirror_qemu.git] / tcg / tci.c
1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 /* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
23 * Without assertions, the interpreter runs much faster. */
24 #if defined(CONFIG_DEBUG_TCG)
25 # define tci_assert(cond) assert(cond)
26 #else
27 # define tci_assert(cond) ((void)0)
28 #endif
29
30 #include "qemu-common.h"
31 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
32 #include "exec/cpu_ldst.h"
33 #include "tcg/tcg-op.h"
34 #include "qemu/compiler.h"
35
36 #if MAX_OPC_PARAM_IARGS != 6
37 # error Fix needed, number of supported input arguments changed!
38 #endif
39 #if TCG_TARGET_REG_BITS == 32
40 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
41 tcg_target_ulong, tcg_target_ulong,
42 tcg_target_ulong, tcg_target_ulong,
43 tcg_target_ulong, tcg_target_ulong,
44 tcg_target_ulong, tcg_target_ulong,
45 tcg_target_ulong, tcg_target_ulong);
46 #else
47 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
48 tcg_target_ulong, tcg_target_ulong,
49 tcg_target_ulong, tcg_target_ulong);
50 #endif
51
52 __thread uintptr_t tci_tb_ptr;
53
54 static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
55 {
56 tci_assert(index < TCG_TARGET_NB_REGS);
57 return regs[index];
58 }
59
60 static void
61 tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
62 {
63 tci_assert(index < TCG_TARGET_NB_REGS);
64 tci_assert(index != TCG_AREG0);
65 tci_assert(index != TCG_REG_CALL_STACK);
66 regs[index] = value;
67 }
68
69 #if TCG_TARGET_REG_BITS == 32
70 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
71 uint32_t low_index, uint64_t value)
72 {
73 tci_write_reg(regs, low_index, value);
74 tci_write_reg(regs, high_index, value >> 32);
75 }
76 #endif
77
78 #if TCG_TARGET_REG_BITS == 32
79 /* Create a 64 bit value from two 32 bit values. */
80 static uint64_t tci_uint64(uint32_t high, uint32_t low)
81 {
82 return ((uint64_t)high << 32) + low;
83 }
84 #endif
85
86 /* Read constant byte from bytecode. */
87 static uint8_t tci_read_b(const uint8_t **tb_ptr)
88 {
89 return *(tb_ptr[0]++);
90 }
91
92 /* Read register number from bytecode. */
93 static TCGReg tci_read_r(const uint8_t **tb_ptr)
94 {
95 uint8_t regno = tci_read_b(tb_ptr);
96 tci_assert(regno < TCG_TARGET_NB_REGS);
97 return regno;
98 }
99
100 /* Read constant (native size) from bytecode. */
101 static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
102 {
103 tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
104 *tb_ptr += sizeof(value);
105 return value;
106 }
107
108 /* Read unsigned constant (32 bit) from bytecode. */
109 static uint32_t tci_read_i32(const uint8_t **tb_ptr)
110 {
111 uint32_t value = *(const uint32_t *)(*tb_ptr);
112 *tb_ptr += sizeof(value);
113 return value;
114 }
115
116 /* Read signed constant (32 bit) from bytecode. */
117 static int32_t tci_read_s32(const uint8_t **tb_ptr)
118 {
119 int32_t value = *(const int32_t *)(*tb_ptr);
120 *tb_ptr += sizeof(value);
121 return value;
122 }
123
124 #if TCG_TARGET_REG_BITS == 64
125 /* Read constant (64 bit) from bytecode. */
126 static uint64_t tci_read_i64(const uint8_t **tb_ptr)
127 {
128 uint64_t value = *(const uint64_t *)(*tb_ptr);
129 *tb_ptr += sizeof(value);
130 return value;
131 }
132 #endif
133
134 /* Read indexed register (native size) from bytecode. */
135 static tcg_target_ulong
136 tci_read_rval(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
137 {
138 tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
139 *tb_ptr += 1;
140 return value;
141 }
142
143 #if TCG_TARGET_REG_BITS == 32
144 /* Read two indexed registers (2 * 32 bit) from bytecode. */
145 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
146 const uint8_t **tb_ptr)
147 {
148 uint32_t low = tci_read_rval(regs, tb_ptr);
149 return tci_uint64(tci_read_rval(regs, tb_ptr), low);
150 }
151 #elif TCG_TARGET_REG_BITS == 64
152 /* Read indexed register (64 bit) from bytecode. */
153 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
154 const uint8_t **tb_ptr)
155 {
156 return tci_read_rval(regs, tb_ptr);
157 }
158 #endif
159
160 /* Read indexed register(s) with target address from bytecode. */
161 static target_ulong
162 tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
163 {
164 target_ulong taddr = tci_read_rval(regs, tb_ptr);
165 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
166 taddr += (uint64_t)tci_read_rval(regs, tb_ptr) << 32;
167 #endif
168 return taddr;
169 }
170
171 static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
172 {
173 tcg_target_ulong label = tci_read_i(tb_ptr);
174 tci_assert(label != 0);
175 return label;
176 }
177
178 /*
179 * Load sets of arguments all at once. The naming convention is:
180 * tci_args_<arguments>
181 * where arguments is a sequence of
182 *
183 * r = register
184 * s = signed ldst offset
185 */
186
187 static void tci_args_rr(const uint8_t **tb_ptr,
188 TCGReg *r0, TCGReg *r1)
189 {
190 *r0 = tci_read_r(tb_ptr);
191 *r1 = tci_read_r(tb_ptr);
192 }
193
194 static void tci_args_rrr(const uint8_t **tb_ptr,
195 TCGReg *r0, TCGReg *r1, TCGReg *r2)
196 {
197 *r0 = tci_read_r(tb_ptr);
198 *r1 = tci_read_r(tb_ptr);
199 *r2 = tci_read_r(tb_ptr);
200 }
201
202 static void tci_args_rrs(const uint8_t **tb_ptr,
203 TCGReg *r0, TCGReg *r1, int32_t *i2)
204 {
205 *r0 = tci_read_r(tb_ptr);
206 *r1 = tci_read_r(tb_ptr);
207 *i2 = tci_read_s32(tb_ptr);
208 }
209
210 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
211 {
212 bool result = false;
213 int32_t i0 = u0;
214 int32_t i1 = u1;
215 switch (condition) {
216 case TCG_COND_EQ:
217 result = (u0 == u1);
218 break;
219 case TCG_COND_NE:
220 result = (u0 != u1);
221 break;
222 case TCG_COND_LT:
223 result = (i0 < i1);
224 break;
225 case TCG_COND_GE:
226 result = (i0 >= i1);
227 break;
228 case TCG_COND_LE:
229 result = (i0 <= i1);
230 break;
231 case TCG_COND_GT:
232 result = (i0 > i1);
233 break;
234 case TCG_COND_LTU:
235 result = (u0 < u1);
236 break;
237 case TCG_COND_GEU:
238 result = (u0 >= u1);
239 break;
240 case TCG_COND_LEU:
241 result = (u0 <= u1);
242 break;
243 case TCG_COND_GTU:
244 result = (u0 > u1);
245 break;
246 default:
247 g_assert_not_reached();
248 }
249 return result;
250 }
251
252 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
253 {
254 bool result = false;
255 int64_t i0 = u0;
256 int64_t i1 = u1;
257 switch (condition) {
258 case TCG_COND_EQ:
259 result = (u0 == u1);
260 break;
261 case TCG_COND_NE:
262 result = (u0 != u1);
263 break;
264 case TCG_COND_LT:
265 result = (i0 < i1);
266 break;
267 case TCG_COND_GE:
268 result = (i0 >= i1);
269 break;
270 case TCG_COND_LE:
271 result = (i0 <= i1);
272 break;
273 case TCG_COND_GT:
274 result = (i0 > i1);
275 break;
276 case TCG_COND_LTU:
277 result = (u0 < u1);
278 break;
279 case TCG_COND_GEU:
280 result = (u0 >= u1);
281 break;
282 case TCG_COND_LEU:
283 result = (u0 <= u1);
284 break;
285 case TCG_COND_GTU:
286 result = (u0 > u1);
287 break;
288 default:
289 g_assert_not_reached();
290 }
291 return result;
292 }
293
294 #define qemu_ld_ub \
295 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
296 #define qemu_ld_leuw \
297 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
298 #define qemu_ld_leul \
299 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
300 #define qemu_ld_leq \
301 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
302 #define qemu_ld_beuw \
303 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
304 #define qemu_ld_beul \
305 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
306 #define qemu_ld_beq \
307 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
308 #define qemu_st_b(X) \
309 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
310 #define qemu_st_lew(X) \
311 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
312 #define qemu_st_lel(X) \
313 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
314 #define qemu_st_leq(X) \
315 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
316 #define qemu_st_bew(X) \
317 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
318 #define qemu_st_bel(X) \
319 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
320 #define qemu_st_beq(X) \
321 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
322
323 #if TCG_TARGET_REG_BITS == 64
324 # define CASE_32_64(x) \
325 case glue(glue(INDEX_op_, x), _i64): \
326 case glue(glue(INDEX_op_, x), _i32):
327 # define CASE_64(x) \
328 case glue(glue(INDEX_op_, x), _i64):
329 #else
330 # define CASE_32_64(x) \
331 case glue(glue(INDEX_op_, x), _i32):
332 # define CASE_64(x)
333 #endif
334
335 /* Interpret pseudo code in tb. */
336 /*
337 * Disable CFI checks.
338 * One possible operation in the pseudo code is a call to binary code.
339 * Therefore, disable CFI checks in the interpreter function
340 */
341 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
342 const void *v_tb_ptr)
343 {
344 const uint8_t *tb_ptr = v_tb_ptr;
345 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
346 long tcg_temps[CPU_TEMP_BUF_NLONGS];
347 uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
348 uintptr_t ret = 0;
349
350 regs[TCG_AREG0] = (tcg_target_ulong)env;
351 regs[TCG_REG_CALL_STACK] = sp_value;
352 tci_assert(tb_ptr);
353
354 for (;;) {
355 TCGOpcode opc = tb_ptr[0];
356 #if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
357 uint8_t op_size = tb_ptr[1];
358 const uint8_t *old_code_ptr = tb_ptr;
359 #endif
360 TCGReg r0, r1, r2;
361 tcg_target_ulong t0;
362 tcg_target_ulong t1;
363 tcg_target_ulong t2;
364 tcg_target_ulong label;
365 TCGCond condition;
366 target_ulong taddr;
367 uint8_t tmp8;
368 uint16_t tmp16;
369 uint32_t tmp32;
370 uint64_t tmp64;
371 #if TCG_TARGET_REG_BITS == 32
372 uint64_t v64;
373 #endif
374 TCGMemOpIdx oi;
375 int32_t ofs;
376 void *ptr;
377
378 /* Skip opcode and size entry. */
379 tb_ptr += 2;
380
381 switch (opc) {
382 case INDEX_op_call:
383 t0 = tci_read_i(&tb_ptr);
384 tci_tb_ptr = (uintptr_t)tb_ptr;
385 #if TCG_TARGET_REG_BITS == 32
386 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
387 tci_read_reg(regs, TCG_REG_R1),
388 tci_read_reg(regs, TCG_REG_R2),
389 tci_read_reg(regs, TCG_REG_R3),
390 tci_read_reg(regs, TCG_REG_R4),
391 tci_read_reg(regs, TCG_REG_R5),
392 tci_read_reg(regs, TCG_REG_R6),
393 tci_read_reg(regs, TCG_REG_R7),
394 tci_read_reg(regs, TCG_REG_R8),
395 tci_read_reg(regs, TCG_REG_R9),
396 tci_read_reg(regs, TCG_REG_R10),
397 tci_read_reg(regs, TCG_REG_R11));
398 tci_write_reg(regs, TCG_REG_R0, tmp64);
399 tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
400 #else
401 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
402 tci_read_reg(regs, TCG_REG_R1),
403 tci_read_reg(regs, TCG_REG_R2),
404 tci_read_reg(regs, TCG_REG_R3),
405 tci_read_reg(regs, TCG_REG_R4),
406 tci_read_reg(regs, TCG_REG_R5));
407 tci_write_reg(regs, TCG_REG_R0, tmp64);
408 #endif
409 break;
410 case INDEX_op_br:
411 label = tci_read_label(&tb_ptr);
412 tci_assert(tb_ptr == old_code_ptr + op_size);
413 tb_ptr = (uint8_t *)label;
414 continue;
415 case INDEX_op_setcond_i32:
416 t0 = *tb_ptr++;
417 t1 = tci_read_rval(regs, &tb_ptr);
418 t2 = tci_read_rval(regs, &tb_ptr);
419 condition = *tb_ptr++;
420 tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
421 break;
422 #if TCG_TARGET_REG_BITS == 32
423 case INDEX_op_setcond2_i32:
424 t0 = *tb_ptr++;
425 tmp64 = tci_read_r64(regs, &tb_ptr);
426 v64 = tci_read_r64(regs, &tb_ptr);
427 condition = *tb_ptr++;
428 tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
429 break;
430 #elif TCG_TARGET_REG_BITS == 64
431 case INDEX_op_setcond_i64:
432 t0 = *tb_ptr++;
433 t1 = tci_read_rval(regs, &tb_ptr);
434 t2 = tci_read_rval(regs, &tb_ptr);
435 condition = *tb_ptr++;
436 tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
437 break;
438 #endif
439 CASE_32_64(mov)
440 tci_args_rr(&tb_ptr, &r0, &r1);
441 regs[r0] = regs[r1];
442 break;
443 case INDEX_op_tci_movi_i32:
444 t0 = *tb_ptr++;
445 t1 = tci_read_i32(&tb_ptr);
446 tci_write_reg(regs, t0, t1);
447 break;
448
449 /* Load/store operations (32 bit). */
450
451 CASE_32_64(ld8u)
452 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
453 ptr = (void *)(regs[r1] + ofs);
454 regs[r0] = *(uint8_t *)ptr;
455 break;
456 CASE_32_64(ld8s)
457 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
458 ptr = (void *)(regs[r1] + ofs);
459 regs[r0] = *(int8_t *)ptr;
460 break;
461 CASE_32_64(ld16u)
462 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
463 ptr = (void *)(regs[r1] + ofs);
464 regs[r0] = *(uint16_t *)ptr;
465 break;
466 CASE_32_64(ld16s)
467 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
468 ptr = (void *)(regs[r1] + ofs);
469 regs[r0] = *(int16_t *)ptr;
470 break;
471 case INDEX_op_ld_i32:
472 CASE_64(ld32u)
473 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
474 ptr = (void *)(regs[r1] + ofs);
475 regs[r0] = *(uint32_t *)ptr;
476 break;
477 CASE_32_64(st8)
478 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
479 ptr = (void *)(regs[r1] + ofs);
480 *(uint8_t *)ptr = regs[r0];
481 break;
482 CASE_32_64(st16)
483 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
484 ptr = (void *)(regs[r1] + ofs);
485 *(uint16_t *)ptr = regs[r0];
486 break;
487 case INDEX_op_st_i32:
488 CASE_64(st32)
489 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
490 ptr = (void *)(regs[r1] + ofs);
491 *(uint32_t *)ptr = regs[r0];
492 break;
493
494 /* Arithmetic operations (mixed 32/64 bit). */
495
496 CASE_32_64(add)
497 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
498 regs[r0] = regs[r1] + regs[r2];
499 break;
500 CASE_32_64(sub)
501 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
502 regs[r0] = regs[r1] - regs[r2];
503 break;
504 CASE_32_64(mul)
505 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
506 regs[r0] = regs[r1] * regs[r2];
507 break;
508 CASE_32_64(and)
509 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
510 regs[r0] = regs[r1] & regs[r2];
511 break;
512 CASE_32_64(or)
513 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
514 regs[r0] = regs[r1] | regs[r2];
515 break;
516 CASE_32_64(xor)
517 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
518 regs[r0] = regs[r1] ^ regs[r2];
519 break;
520
521 /* Arithmetic operations (32 bit). */
522
523 case INDEX_op_div_i32:
524 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
525 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
526 break;
527 case INDEX_op_divu_i32:
528 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
529 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
530 break;
531 case INDEX_op_rem_i32:
532 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
533 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
534 break;
535 case INDEX_op_remu_i32:
536 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
537 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
538 break;
539
540 /* Shift/rotate operations (32 bit). */
541
542 case INDEX_op_shl_i32:
543 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
544 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
545 break;
546 case INDEX_op_shr_i32:
547 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
548 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
549 break;
550 case INDEX_op_sar_i32:
551 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
552 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
553 break;
554 #if TCG_TARGET_HAS_rot_i32
555 case INDEX_op_rotl_i32:
556 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
557 regs[r0] = rol32(regs[r1], regs[r2] & 31);
558 break;
559 case INDEX_op_rotr_i32:
560 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
561 regs[r0] = ror32(regs[r1], regs[r2] & 31);
562 break;
563 #endif
564 #if TCG_TARGET_HAS_deposit_i32
565 case INDEX_op_deposit_i32:
566 t0 = *tb_ptr++;
567 t1 = tci_read_rval(regs, &tb_ptr);
568 t2 = tci_read_rval(regs, &tb_ptr);
569 tmp16 = *tb_ptr++;
570 tmp8 = *tb_ptr++;
571 tmp32 = (((1 << tmp8) - 1) << tmp16);
572 tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
573 break;
574 #endif
575 case INDEX_op_brcond_i32:
576 t0 = tci_read_rval(regs, &tb_ptr);
577 t1 = tci_read_rval(regs, &tb_ptr);
578 condition = *tb_ptr++;
579 label = tci_read_label(&tb_ptr);
580 if (tci_compare32(t0, t1, condition)) {
581 tci_assert(tb_ptr == old_code_ptr + op_size);
582 tb_ptr = (uint8_t *)label;
583 continue;
584 }
585 break;
586 #if TCG_TARGET_REG_BITS == 32
587 case INDEX_op_add2_i32:
588 t0 = *tb_ptr++;
589 t1 = *tb_ptr++;
590 tmp64 = tci_read_r64(regs, &tb_ptr);
591 tmp64 += tci_read_r64(regs, &tb_ptr);
592 tci_write_reg64(regs, t1, t0, tmp64);
593 break;
594 case INDEX_op_sub2_i32:
595 t0 = *tb_ptr++;
596 t1 = *tb_ptr++;
597 tmp64 = tci_read_r64(regs, &tb_ptr);
598 tmp64 -= tci_read_r64(regs, &tb_ptr);
599 tci_write_reg64(regs, t1, t0, tmp64);
600 break;
601 case INDEX_op_brcond2_i32:
602 tmp64 = tci_read_r64(regs, &tb_ptr);
603 v64 = tci_read_r64(regs, &tb_ptr);
604 condition = *tb_ptr++;
605 label = tci_read_label(&tb_ptr);
606 if (tci_compare64(tmp64, v64, condition)) {
607 tci_assert(tb_ptr == old_code_ptr + op_size);
608 tb_ptr = (uint8_t *)label;
609 continue;
610 }
611 break;
612 case INDEX_op_mulu2_i32:
613 t0 = *tb_ptr++;
614 t1 = *tb_ptr++;
615 t2 = tci_read_rval(regs, &tb_ptr);
616 tmp64 = (uint32_t)tci_read_rval(regs, &tb_ptr);
617 tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
618 break;
619 #endif /* TCG_TARGET_REG_BITS == 32 */
620 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
621 CASE_32_64(ext8s)
622 tci_args_rr(&tb_ptr, &r0, &r1);
623 regs[r0] = (int8_t)regs[r1];
624 break;
625 #endif
626 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
627 CASE_32_64(ext16s)
628 tci_args_rr(&tb_ptr, &r0, &r1);
629 regs[r0] = (int16_t)regs[r1];
630 break;
631 #endif
632 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
633 CASE_32_64(ext8u)
634 tci_args_rr(&tb_ptr, &r0, &r1);
635 regs[r0] = (uint8_t)regs[r1];
636 break;
637 #endif
638 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
639 CASE_32_64(ext16u)
640 tci_args_rr(&tb_ptr, &r0, &r1);
641 regs[r0] = (uint16_t)regs[r1];
642 break;
643 #endif
644 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
645 CASE_32_64(bswap16)
646 tci_args_rr(&tb_ptr, &r0, &r1);
647 regs[r0] = bswap16(regs[r1]);
648 break;
649 #endif
650 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
651 CASE_32_64(bswap32)
652 tci_args_rr(&tb_ptr, &r0, &r1);
653 regs[r0] = bswap32(regs[r1]);
654 break;
655 #endif
656 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
657 CASE_32_64(not)
658 tci_args_rr(&tb_ptr, &r0, &r1);
659 regs[r0] = ~regs[r1];
660 break;
661 #endif
662 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
663 CASE_32_64(neg)
664 tci_args_rr(&tb_ptr, &r0, &r1);
665 regs[r0] = -regs[r1];
666 break;
667 #endif
668 #if TCG_TARGET_REG_BITS == 64
669 case INDEX_op_tci_movi_i64:
670 t0 = *tb_ptr++;
671 t1 = tci_read_i64(&tb_ptr);
672 tci_write_reg(regs, t0, t1);
673 break;
674
675 /* Load/store operations (64 bit). */
676
677 case INDEX_op_ld32s_i64:
678 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
679 ptr = (void *)(regs[r1] + ofs);
680 regs[r0] = *(int32_t *)ptr;
681 break;
682 case INDEX_op_ld_i64:
683 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
684 ptr = (void *)(regs[r1] + ofs);
685 regs[r0] = *(uint64_t *)ptr;
686 break;
687 case INDEX_op_st_i64:
688 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
689 ptr = (void *)(regs[r1] + ofs);
690 *(uint64_t *)ptr = regs[r0];
691 break;
692
693 /* Arithmetic operations (64 bit). */
694
695 case INDEX_op_div_i64:
696 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
697 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
698 break;
699 case INDEX_op_divu_i64:
700 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
701 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
702 break;
703 case INDEX_op_rem_i64:
704 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
705 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
706 break;
707 case INDEX_op_remu_i64:
708 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
709 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
710 break;
711
712 /* Shift/rotate operations (64 bit). */
713
714 case INDEX_op_shl_i64:
715 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
716 regs[r0] = regs[r1] << (regs[r2] & 63);
717 break;
718 case INDEX_op_shr_i64:
719 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
720 regs[r0] = regs[r1] >> (regs[r2] & 63);
721 break;
722 case INDEX_op_sar_i64:
723 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
724 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
725 break;
726 #if TCG_TARGET_HAS_rot_i64
727 case INDEX_op_rotl_i64:
728 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
729 regs[r0] = rol64(regs[r1], regs[r2] & 63);
730 break;
731 case INDEX_op_rotr_i64:
732 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
733 regs[r0] = ror64(regs[r1], regs[r2] & 63);
734 break;
735 #endif
736 #if TCG_TARGET_HAS_deposit_i64
737 case INDEX_op_deposit_i64:
738 t0 = *tb_ptr++;
739 t1 = tci_read_rval(regs, &tb_ptr);
740 t2 = tci_read_rval(regs, &tb_ptr);
741 tmp16 = *tb_ptr++;
742 tmp8 = *tb_ptr++;
743 tmp64 = (((1ULL << tmp8) - 1) << tmp16);
744 tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
745 break;
746 #endif
747 case INDEX_op_brcond_i64:
748 t0 = tci_read_rval(regs, &tb_ptr);
749 t1 = tci_read_rval(regs, &tb_ptr);
750 condition = *tb_ptr++;
751 label = tci_read_label(&tb_ptr);
752 if (tci_compare64(t0, t1, condition)) {
753 tci_assert(tb_ptr == old_code_ptr + op_size);
754 tb_ptr = (uint8_t *)label;
755 continue;
756 }
757 break;
758 case INDEX_op_ext32s_i64:
759 case INDEX_op_ext_i32_i64:
760 tci_args_rr(&tb_ptr, &r0, &r1);
761 regs[r0] = (int32_t)regs[r1];
762 break;
763 case INDEX_op_ext32u_i64:
764 case INDEX_op_extu_i32_i64:
765 tci_args_rr(&tb_ptr, &r0, &r1);
766 regs[r0] = (uint32_t)regs[r1];
767 break;
768 #if TCG_TARGET_HAS_bswap64_i64
769 case INDEX_op_bswap64_i64:
770 tci_args_rr(&tb_ptr, &r0, &r1);
771 regs[r0] = bswap64(regs[r1]);
772 break;
773 #endif
774 #endif /* TCG_TARGET_REG_BITS == 64 */
775
776 /* QEMU specific operations. */
777
778 case INDEX_op_exit_tb:
779 ret = *(uint64_t *)tb_ptr;
780 goto exit;
781 break;
782 case INDEX_op_goto_tb:
783 /* Jump address is aligned */
784 tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
785 t0 = qatomic_read((int32_t *)tb_ptr);
786 tb_ptr += sizeof(int32_t);
787 tci_assert(tb_ptr == old_code_ptr + op_size);
788 tb_ptr += (int32_t)t0;
789 continue;
790 case INDEX_op_qemu_ld_i32:
791 t0 = *tb_ptr++;
792 taddr = tci_read_ulong(regs, &tb_ptr);
793 oi = tci_read_i(&tb_ptr);
794 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
795 case MO_UB:
796 tmp32 = qemu_ld_ub;
797 break;
798 case MO_SB:
799 tmp32 = (int8_t)qemu_ld_ub;
800 break;
801 case MO_LEUW:
802 tmp32 = qemu_ld_leuw;
803 break;
804 case MO_LESW:
805 tmp32 = (int16_t)qemu_ld_leuw;
806 break;
807 case MO_LEUL:
808 tmp32 = qemu_ld_leul;
809 break;
810 case MO_BEUW:
811 tmp32 = qemu_ld_beuw;
812 break;
813 case MO_BESW:
814 tmp32 = (int16_t)qemu_ld_beuw;
815 break;
816 case MO_BEUL:
817 tmp32 = qemu_ld_beul;
818 break;
819 default:
820 g_assert_not_reached();
821 }
822 tci_write_reg(regs, t0, tmp32);
823 break;
824 case INDEX_op_qemu_ld_i64:
825 t0 = *tb_ptr++;
826 if (TCG_TARGET_REG_BITS == 32) {
827 t1 = *tb_ptr++;
828 }
829 taddr = tci_read_ulong(regs, &tb_ptr);
830 oi = tci_read_i(&tb_ptr);
831 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
832 case MO_UB:
833 tmp64 = qemu_ld_ub;
834 break;
835 case MO_SB:
836 tmp64 = (int8_t)qemu_ld_ub;
837 break;
838 case MO_LEUW:
839 tmp64 = qemu_ld_leuw;
840 break;
841 case MO_LESW:
842 tmp64 = (int16_t)qemu_ld_leuw;
843 break;
844 case MO_LEUL:
845 tmp64 = qemu_ld_leul;
846 break;
847 case MO_LESL:
848 tmp64 = (int32_t)qemu_ld_leul;
849 break;
850 case MO_LEQ:
851 tmp64 = qemu_ld_leq;
852 break;
853 case MO_BEUW:
854 tmp64 = qemu_ld_beuw;
855 break;
856 case MO_BESW:
857 tmp64 = (int16_t)qemu_ld_beuw;
858 break;
859 case MO_BEUL:
860 tmp64 = qemu_ld_beul;
861 break;
862 case MO_BESL:
863 tmp64 = (int32_t)qemu_ld_beul;
864 break;
865 case MO_BEQ:
866 tmp64 = qemu_ld_beq;
867 break;
868 default:
869 g_assert_not_reached();
870 }
871 tci_write_reg(regs, t0, tmp64);
872 if (TCG_TARGET_REG_BITS == 32) {
873 tci_write_reg(regs, t1, tmp64 >> 32);
874 }
875 break;
876 case INDEX_op_qemu_st_i32:
877 t0 = tci_read_rval(regs, &tb_ptr);
878 taddr = tci_read_ulong(regs, &tb_ptr);
879 oi = tci_read_i(&tb_ptr);
880 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
881 case MO_UB:
882 qemu_st_b(t0);
883 break;
884 case MO_LEUW:
885 qemu_st_lew(t0);
886 break;
887 case MO_LEUL:
888 qemu_st_lel(t0);
889 break;
890 case MO_BEUW:
891 qemu_st_bew(t0);
892 break;
893 case MO_BEUL:
894 qemu_st_bel(t0);
895 break;
896 default:
897 g_assert_not_reached();
898 }
899 break;
900 case INDEX_op_qemu_st_i64:
901 tmp64 = tci_read_r64(regs, &tb_ptr);
902 taddr = tci_read_ulong(regs, &tb_ptr);
903 oi = tci_read_i(&tb_ptr);
904 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
905 case MO_UB:
906 qemu_st_b(tmp64);
907 break;
908 case MO_LEUW:
909 qemu_st_lew(tmp64);
910 break;
911 case MO_LEUL:
912 qemu_st_lel(tmp64);
913 break;
914 case MO_LEQ:
915 qemu_st_leq(tmp64);
916 break;
917 case MO_BEUW:
918 qemu_st_bew(tmp64);
919 break;
920 case MO_BEUL:
921 qemu_st_bel(tmp64);
922 break;
923 case MO_BEQ:
924 qemu_st_beq(tmp64);
925 break;
926 default:
927 g_assert_not_reached();
928 }
929 break;
930 case INDEX_op_mb:
931 /* Ensure ordering for all kinds */
932 smp_mb();
933 break;
934 default:
935 g_assert_not_reached();
936 }
937 tci_assert(tb_ptr == old_code_ptr + op_size);
938 }
939 exit:
940 return ret;
941 }