]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
tcg/tci: Remove tci_read_r32
[mirror_qemu.git] / tcg / tci.c
1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 /* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
23 * Without assertions, the interpreter runs much faster. */
24 #if defined(CONFIG_DEBUG_TCG)
25 # define tci_assert(cond) assert(cond)
26 #else
27 # define tci_assert(cond) ((void)0)
28 #endif
29
30 #include "qemu-common.h"
31 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
32 #include "exec/cpu_ldst.h"
33 #include "tcg/tcg-op.h"
34 #include "qemu/compiler.h"
35
36 #if MAX_OPC_PARAM_IARGS != 6
37 # error Fix needed, number of supported input arguments changed!
38 #endif
39 #if TCG_TARGET_REG_BITS == 32
40 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
41 tcg_target_ulong, tcg_target_ulong,
42 tcg_target_ulong, tcg_target_ulong,
43 tcg_target_ulong, tcg_target_ulong,
44 tcg_target_ulong, tcg_target_ulong,
45 tcg_target_ulong, tcg_target_ulong);
46 #else
47 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
48 tcg_target_ulong, tcg_target_ulong,
49 tcg_target_ulong, tcg_target_ulong);
50 #endif
51
52 __thread uintptr_t tci_tb_ptr;
53
54 static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
55 {
56 tci_assert(index < TCG_TARGET_NB_REGS);
57 return regs[index];
58 }
59
60 #if TCG_TARGET_REG_BITS == 64
61 static int32_t tci_read_reg32s(const tcg_target_ulong *regs, TCGReg index)
62 {
63 return (int32_t)tci_read_reg(regs, index);
64 }
65 #endif
66
67 #if TCG_TARGET_REG_BITS == 64
68 static uint64_t tci_read_reg64(const tcg_target_ulong *regs, TCGReg index)
69 {
70 return tci_read_reg(regs, index);
71 }
72 #endif
73
74 static void
75 tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
76 {
77 tci_assert(index < TCG_TARGET_NB_REGS);
78 tci_assert(index != TCG_AREG0);
79 tci_assert(index != TCG_REG_CALL_STACK);
80 regs[index] = value;
81 }
82
83 #if TCG_TARGET_REG_BITS == 32
84 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
85 uint32_t low_index, uint64_t value)
86 {
87 tci_write_reg(regs, low_index, value);
88 tci_write_reg(regs, high_index, value >> 32);
89 }
90 #endif
91
92 #if TCG_TARGET_REG_BITS == 32
93 /* Create a 64 bit value from two 32 bit values. */
94 static uint64_t tci_uint64(uint32_t high, uint32_t low)
95 {
96 return ((uint64_t)high << 32) + low;
97 }
98 #endif
99
100 /* Read constant (native size) from bytecode. */
101 static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
102 {
103 tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
104 *tb_ptr += sizeof(value);
105 return value;
106 }
107
108 /* Read unsigned constant (32 bit) from bytecode. */
109 static uint32_t tci_read_i32(const uint8_t **tb_ptr)
110 {
111 uint32_t value = *(const uint32_t *)(*tb_ptr);
112 *tb_ptr += sizeof(value);
113 return value;
114 }
115
116 /* Read signed constant (32 bit) from bytecode. */
117 static int32_t tci_read_s32(const uint8_t **tb_ptr)
118 {
119 int32_t value = *(const int32_t *)(*tb_ptr);
120 *tb_ptr += sizeof(value);
121 return value;
122 }
123
124 #if TCG_TARGET_REG_BITS == 64
125 /* Read constant (64 bit) from bytecode. */
126 static uint64_t tci_read_i64(const uint8_t **tb_ptr)
127 {
128 uint64_t value = *(const uint64_t *)(*tb_ptr);
129 *tb_ptr += sizeof(value);
130 return value;
131 }
132 #endif
133
134 /* Read indexed register (native size) from bytecode. */
135 static tcg_target_ulong
136 tci_read_r(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
137 {
138 tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
139 *tb_ptr += 1;
140 return value;
141 }
142
143 #if TCG_TARGET_REG_BITS == 32
144 /* Read two indexed registers (2 * 32 bit) from bytecode. */
145 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
146 const uint8_t **tb_ptr)
147 {
148 uint32_t low = tci_read_r(regs, tb_ptr);
149 return tci_uint64(tci_read_r(regs, tb_ptr), low);
150 }
151 #elif TCG_TARGET_REG_BITS == 64
152 /* Read indexed register (32 bit signed) from bytecode. */
153 static int32_t tci_read_r32s(const tcg_target_ulong *regs,
154 const uint8_t **tb_ptr)
155 {
156 int32_t value = tci_read_reg32s(regs, **tb_ptr);
157 *tb_ptr += 1;
158 return value;
159 }
160
161 /* Read indexed register (64 bit) from bytecode. */
162 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
163 const uint8_t **tb_ptr)
164 {
165 uint64_t value = tci_read_reg64(regs, **tb_ptr);
166 *tb_ptr += 1;
167 return value;
168 }
169 #endif
170
171 /* Read indexed register(s) with target address from bytecode. */
172 static target_ulong
173 tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
174 {
175 target_ulong taddr = tci_read_r(regs, tb_ptr);
176 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
177 taddr += (uint64_t)tci_read_r(regs, tb_ptr) << 32;
178 #endif
179 return taddr;
180 }
181
182 static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
183 {
184 tcg_target_ulong label = tci_read_i(tb_ptr);
185 tci_assert(label != 0);
186 return label;
187 }
188
189 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
190 {
191 bool result = false;
192 int32_t i0 = u0;
193 int32_t i1 = u1;
194 switch (condition) {
195 case TCG_COND_EQ:
196 result = (u0 == u1);
197 break;
198 case TCG_COND_NE:
199 result = (u0 != u1);
200 break;
201 case TCG_COND_LT:
202 result = (i0 < i1);
203 break;
204 case TCG_COND_GE:
205 result = (i0 >= i1);
206 break;
207 case TCG_COND_LE:
208 result = (i0 <= i1);
209 break;
210 case TCG_COND_GT:
211 result = (i0 > i1);
212 break;
213 case TCG_COND_LTU:
214 result = (u0 < u1);
215 break;
216 case TCG_COND_GEU:
217 result = (u0 >= u1);
218 break;
219 case TCG_COND_LEU:
220 result = (u0 <= u1);
221 break;
222 case TCG_COND_GTU:
223 result = (u0 > u1);
224 break;
225 default:
226 g_assert_not_reached();
227 }
228 return result;
229 }
230
231 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
232 {
233 bool result = false;
234 int64_t i0 = u0;
235 int64_t i1 = u1;
236 switch (condition) {
237 case TCG_COND_EQ:
238 result = (u0 == u1);
239 break;
240 case TCG_COND_NE:
241 result = (u0 != u1);
242 break;
243 case TCG_COND_LT:
244 result = (i0 < i1);
245 break;
246 case TCG_COND_GE:
247 result = (i0 >= i1);
248 break;
249 case TCG_COND_LE:
250 result = (i0 <= i1);
251 break;
252 case TCG_COND_GT:
253 result = (i0 > i1);
254 break;
255 case TCG_COND_LTU:
256 result = (u0 < u1);
257 break;
258 case TCG_COND_GEU:
259 result = (u0 >= u1);
260 break;
261 case TCG_COND_LEU:
262 result = (u0 <= u1);
263 break;
264 case TCG_COND_GTU:
265 result = (u0 > u1);
266 break;
267 default:
268 g_assert_not_reached();
269 }
270 return result;
271 }
272
273 #define qemu_ld_ub \
274 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
275 #define qemu_ld_leuw \
276 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
277 #define qemu_ld_leul \
278 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
279 #define qemu_ld_leq \
280 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
281 #define qemu_ld_beuw \
282 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
283 #define qemu_ld_beul \
284 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
285 #define qemu_ld_beq \
286 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
287 #define qemu_st_b(X) \
288 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
289 #define qemu_st_lew(X) \
290 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
291 #define qemu_st_lel(X) \
292 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
293 #define qemu_st_leq(X) \
294 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
295 #define qemu_st_bew(X) \
296 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
297 #define qemu_st_bel(X) \
298 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
299 #define qemu_st_beq(X) \
300 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
301
302 #if TCG_TARGET_REG_BITS == 64
303 # define CASE_32_64(x) \
304 case glue(glue(INDEX_op_, x), _i64): \
305 case glue(glue(INDEX_op_, x), _i32):
306 # define CASE_64(x) \
307 case glue(glue(INDEX_op_, x), _i64):
308 #else
309 # define CASE_32_64(x) \
310 case glue(glue(INDEX_op_, x), _i32):
311 # define CASE_64(x)
312 #endif
313
314 /* Interpret pseudo code in tb. */
315 /*
316 * Disable CFI checks.
317 * One possible operation in the pseudo code is a call to binary code.
318 * Therefore, disable CFI checks in the interpreter function
319 */
320 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
321 const void *v_tb_ptr)
322 {
323 const uint8_t *tb_ptr = v_tb_ptr;
324 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
325 long tcg_temps[CPU_TEMP_BUF_NLONGS];
326 uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
327 uintptr_t ret = 0;
328
329 regs[TCG_AREG0] = (tcg_target_ulong)env;
330 regs[TCG_REG_CALL_STACK] = sp_value;
331 tci_assert(tb_ptr);
332
333 for (;;) {
334 TCGOpcode opc = tb_ptr[0];
335 #if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
336 uint8_t op_size = tb_ptr[1];
337 const uint8_t *old_code_ptr = tb_ptr;
338 #endif
339 tcg_target_ulong t0;
340 tcg_target_ulong t1;
341 tcg_target_ulong t2;
342 tcg_target_ulong label;
343 TCGCond condition;
344 target_ulong taddr;
345 uint8_t tmp8;
346 uint16_t tmp16;
347 uint32_t tmp32;
348 uint64_t tmp64;
349 #if TCG_TARGET_REG_BITS == 32
350 uint64_t v64;
351 #endif
352 TCGMemOpIdx oi;
353
354 /* Skip opcode and size entry. */
355 tb_ptr += 2;
356
357 switch (opc) {
358 case INDEX_op_call:
359 t0 = tci_read_i(&tb_ptr);
360 tci_tb_ptr = (uintptr_t)tb_ptr;
361 #if TCG_TARGET_REG_BITS == 32
362 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
363 tci_read_reg(regs, TCG_REG_R1),
364 tci_read_reg(regs, TCG_REG_R2),
365 tci_read_reg(regs, TCG_REG_R3),
366 tci_read_reg(regs, TCG_REG_R4),
367 tci_read_reg(regs, TCG_REG_R5),
368 tci_read_reg(regs, TCG_REG_R6),
369 tci_read_reg(regs, TCG_REG_R7),
370 tci_read_reg(regs, TCG_REG_R8),
371 tci_read_reg(regs, TCG_REG_R9),
372 tci_read_reg(regs, TCG_REG_R10),
373 tci_read_reg(regs, TCG_REG_R11));
374 tci_write_reg(regs, TCG_REG_R0, tmp64);
375 tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
376 #else
377 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
378 tci_read_reg(regs, TCG_REG_R1),
379 tci_read_reg(regs, TCG_REG_R2),
380 tci_read_reg(regs, TCG_REG_R3),
381 tci_read_reg(regs, TCG_REG_R4),
382 tci_read_reg(regs, TCG_REG_R5));
383 tci_write_reg(regs, TCG_REG_R0, tmp64);
384 #endif
385 break;
386 case INDEX_op_br:
387 label = tci_read_label(&tb_ptr);
388 tci_assert(tb_ptr == old_code_ptr + op_size);
389 tb_ptr = (uint8_t *)label;
390 continue;
391 case INDEX_op_setcond_i32:
392 t0 = *tb_ptr++;
393 t1 = tci_read_r(regs, &tb_ptr);
394 t2 = tci_read_r(regs, &tb_ptr);
395 condition = *tb_ptr++;
396 tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
397 break;
398 #if TCG_TARGET_REG_BITS == 32
399 case INDEX_op_setcond2_i32:
400 t0 = *tb_ptr++;
401 tmp64 = tci_read_r64(regs, &tb_ptr);
402 v64 = tci_read_r64(regs, &tb_ptr);
403 condition = *tb_ptr++;
404 tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
405 break;
406 #elif TCG_TARGET_REG_BITS == 64
407 case INDEX_op_setcond_i64:
408 t0 = *tb_ptr++;
409 t1 = tci_read_r64(regs, &tb_ptr);
410 t2 = tci_read_r64(regs, &tb_ptr);
411 condition = *tb_ptr++;
412 tci_write_reg(regs, t0, tci_compare64(t1, t2, condition));
413 break;
414 #endif
415 case INDEX_op_mov_i32:
416 t0 = *tb_ptr++;
417 t1 = tci_read_r(regs, &tb_ptr);
418 tci_write_reg(regs, t0, t1);
419 break;
420 case INDEX_op_tci_movi_i32:
421 t0 = *tb_ptr++;
422 t1 = tci_read_i32(&tb_ptr);
423 tci_write_reg(regs, t0, t1);
424 break;
425
426 /* Load/store operations (32 bit). */
427
428 CASE_32_64(ld8u)
429 t0 = *tb_ptr++;
430 t1 = tci_read_r(regs, &tb_ptr);
431 t2 = tci_read_s32(&tb_ptr);
432 tci_write_reg(regs, t0, *(uint8_t *)(t1 + t2));
433 break;
434 CASE_32_64(ld8s)
435 t0 = *tb_ptr++;
436 t1 = tci_read_r(regs, &tb_ptr);
437 t2 = tci_read_s32(&tb_ptr);
438 tci_write_reg(regs, t0, *(int8_t *)(t1 + t2));
439 break;
440 CASE_32_64(ld16u)
441 t0 = *tb_ptr++;
442 t1 = tci_read_r(regs, &tb_ptr);
443 t2 = tci_read_s32(&tb_ptr);
444 tci_write_reg(regs, t0, *(uint16_t *)(t1 + t2));
445 break;
446 CASE_32_64(ld16s)
447 t0 = *tb_ptr++;
448 t1 = tci_read_r(regs, &tb_ptr);
449 t2 = tci_read_s32(&tb_ptr);
450 tci_write_reg(regs, t0, *(int16_t *)(t1 + t2));
451 break;
452 case INDEX_op_ld_i32:
453 CASE_64(ld32u)
454 t0 = *tb_ptr++;
455 t1 = tci_read_r(regs, &tb_ptr);
456 t2 = tci_read_s32(&tb_ptr);
457 tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
458 break;
459 CASE_32_64(st8)
460 t0 = tci_read_r(regs, &tb_ptr);
461 t1 = tci_read_r(regs, &tb_ptr);
462 t2 = tci_read_s32(&tb_ptr);
463 *(uint8_t *)(t1 + t2) = t0;
464 break;
465 CASE_32_64(st16)
466 t0 = tci_read_r(regs, &tb_ptr);
467 t1 = tci_read_r(regs, &tb_ptr);
468 t2 = tci_read_s32(&tb_ptr);
469 *(uint16_t *)(t1 + t2) = t0;
470 break;
471 case INDEX_op_st_i32:
472 CASE_64(st32)
473 t0 = tci_read_r(regs, &tb_ptr);
474 t1 = tci_read_r(regs, &tb_ptr);
475 t2 = tci_read_s32(&tb_ptr);
476 *(uint32_t *)(t1 + t2) = t0;
477 break;
478
479 /* Arithmetic operations (32 bit). */
480
481 case INDEX_op_add_i32:
482 t0 = *tb_ptr++;
483 t1 = tci_read_r(regs, &tb_ptr);
484 t2 = tci_read_r(regs, &tb_ptr);
485 tci_write_reg(regs, t0, t1 + t2);
486 break;
487 case INDEX_op_sub_i32:
488 t0 = *tb_ptr++;
489 t1 = tci_read_r(regs, &tb_ptr);
490 t2 = tci_read_r(regs, &tb_ptr);
491 tci_write_reg(regs, t0, t1 - t2);
492 break;
493 case INDEX_op_mul_i32:
494 t0 = *tb_ptr++;
495 t1 = tci_read_r(regs, &tb_ptr);
496 t2 = tci_read_r(regs, &tb_ptr);
497 tci_write_reg(regs, t0, t1 * t2);
498 break;
499 case INDEX_op_div_i32:
500 t0 = *tb_ptr++;
501 t1 = tci_read_r(regs, &tb_ptr);
502 t2 = tci_read_r(regs, &tb_ptr);
503 tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
504 break;
505 case INDEX_op_divu_i32:
506 t0 = *tb_ptr++;
507 t1 = tci_read_r(regs, &tb_ptr);
508 t2 = tci_read_r(regs, &tb_ptr);
509 tci_write_reg(regs, t0, (uint32_t)t1 / (uint32_t)t2);
510 break;
511 case INDEX_op_rem_i32:
512 t0 = *tb_ptr++;
513 t1 = tci_read_r(regs, &tb_ptr);
514 t2 = tci_read_r(regs, &tb_ptr);
515 tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
516 break;
517 case INDEX_op_remu_i32:
518 t0 = *tb_ptr++;
519 t1 = tci_read_r(regs, &tb_ptr);
520 t2 = tci_read_r(regs, &tb_ptr);
521 tci_write_reg(regs, t0, (uint32_t)t1 % (uint32_t)t2);
522 break;
523 case INDEX_op_and_i32:
524 t0 = *tb_ptr++;
525 t1 = tci_read_r(regs, &tb_ptr);
526 t2 = tci_read_r(regs, &tb_ptr);
527 tci_write_reg(regs, t0, t1 & t2);
528 break;
529 case INDEX_op_or_i32:
530 t0 = *tb_ptr++;
531 t1 = tci_read_r(regs, &tb_ptr);
532 t2 = tci_read_r(regs, &tb_ptr);
533 tci_write_reg(regs, t0, t1 | t2);
534 break;
535 case INDEX_op_xor_i32:
536 t0 = *tb_ptr++;
537 t1 = tci_read_r(regs, &tb_ptr);
538 t2 = tci_read_r(regs, &tb_ptr);
539 tci_write_reg(regs, t0, t1 ^ t2);
540 break;
541
542 /* Shift/rotate operations (32 bit). */
543
544 case INDEX_op_shl_i32:
545 t0 = *tb_ptr++;
546 t1 = tci_read_r(regs, &tb_ptr);
547 t2 = tci_read_r(regs, &tb_ptr);
548 tci_write_reg(regs, t0, (uint32_t)t1 << (t2 & 31));
549 break;
550 case INDEX_op_shr_i32:
551 t0 = *tb_ptr++;
552 t1 = tci_read_r(regs, &tb_ptr);
553 t2 = tci_read_r(regs, &tb_ptr);
554 tci_write_reg(regs, t0, (uint32_t)t1 >> (t2 & 31));
555 break;
556 case INDEX_op_sar_i32:
557 t0 = *tb_ptr++;
558 t1 = tci_read_r(regs, &tb_ptr);
559 t2 = tci_read_r(regs, &tb_ptr);
560 tci_write_reg(regs, t0, (int32_t)t1 >> (t2 & 31));
561 break;
562 #if TCG_TARGET_HAS_rot_i32
563 case INDEX_op_rotl_i32:
564 t0 = *tb_ptr++;
565 t1 = tci_read_r(regs, &tb_ptr);
566 t2 = tci_read_r(regs, &tb_ptr);
567 tci_write_reg(regs, t0, rol32(t1, t2 & 31));
568 break;
569 case INDEX_op_rotr_i32:
570 t0 = *tb_ptr++;
571 t1 = tci_read_r(regs, &tb_ptr);
572 t2 = tci_read_r(regs, &tb_ptr);
573 tci_write_reg(regs, t0, ror32(t1, t2 & 31));
574 break;
575 #endif
576 #if TCG_TARGET_HAS_deposit_i32
577 case INDEX_op_deposit_i32:
578 t0 = *tb_ptr++;
579 t1 = tci_read_r(regs, &tb_ptr);
580 t2 = tci_read_r(regs, &tb_ptr);
581 tmp16 = *tb_ptr++;
582 tmp8 = *tb_ptr++;
583 tmp32 = (((1 << tmp8) - 1) << tmp16);
584 tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
585 break;
586 #endif
587 case INDEX_op_brcond_i32:
588 t0 = tci_read_r(regs, &tb_ptr);
589 t1 = tci_read_r(regs, &tb_ptr);
590 condition = *tb_ptr++;
591 label = tci_read_label(&tb_ptr);
592 if (tci_compare32(t0, t1, condition)) {
593 tci_assert(tb_ptr == old_code_ptr + op_size);
594 tb_ptr = (uint8_t *)label;
595 continue;
596 }
597 break;
598 #if TCG_TARGET_REG_BITS == 32
599 case INDEX_op_add2_i32:
600 t0 = *tb_ptr++;
601 t1 = *tb_ptr++;
602 tmp64 = tci_read_r64(regs, &tb_ptr);
603 tmp64 += tci_read_r64(regs, &tb_ptr);
604 tci_write_reg64(regs, t1, t0, tmp64);
605 break;
606 case INDEX_op_sub2_i32:
607 t0 = *tb_ptr++;
608 t1 = *tb_ptr++;
609 tmp64 = tci_read_r64(regs, &tb_ptr);
610 tmp64 -= tci_read_r64(regs, &tb_ptr);
611 tci_write_reg64(regs, t1, t0, tmp64);
612 break;
613 case INDEX_op_brcond2_i32:
614 tmp64 = tci_read_r64(regs, &tb_ptr);
615 v64 = tci_read_r64(regs, &tb_ptr);
616 condition = *tb_ptr++;
617 label = tci_read_label(&tb_ptr);
618 if (tci_compare64(tmp64, v64, condition)) {
619 tci_assert(tb_ptr == old_code_ptr + op_size);
620 tb_ptr = (uint8_t *)label;
621 continue;
622 }
623 break;
624 case INDEX_op_mulu2_i32:
625 t0 = *tb_ptr++;
626 t1 = *tb_ptr++;
627 t2 = tci_read_r(regs, &tb_ptr);
628 tmp64 = (uint32_t)tci_read_r(regs, &tb_ptr);
629 tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
630 break;
631 #endif /* TCG_TARGET_REG_BITS == 32 */
632 #if TCG_TARGET_HAS_ext8s_i32
633 case INDEX_op_ext8s_i32:
634 t0 = *tb_ptr++;
635 t1 = tci_read_r(regs, &tb_ptr);
636 tci_write_reg(regs, t0, (int8_t)t1);
637 break;
638 #endif
639 #if TCG_TARGET_HAS_ext16s_i32
640 case INDEX_op_ext16s_i32:
641 t0 = *tb_ptr++;
642 t1 = tci_read_r(regs, &tb_ptr);
643 tci_write_reg(regs, t0, (int16_t)t1);
644 break;
645 #endif
646 #if TCG_TARGET_HAS_ext8u_i32
647 case INDEX_op_ext8u_i32:
648 t0 = *tb_ptr++;
649 t1 = tci_read_r(regs, &tb_ptr);
650 tci_write_reg(regs, t0, (uint8_t)t1);
651 break;
652 #endif
653 #if TCG_TARGET_HAS_ext16u_i32
654 case INDEX_op_ext16u_i32:
655 t0 = *tb_ptr++;
656 t1 = tci_read_r(regs, &tb_ptr);
657 tci_write_reg(regs, t0, (uint16_t)t1);
658 break;
659 #endif
660 #if TCG_TARGET_HAS_bswap16_i32
661 case INDEX_op_bswap16_i32:
662 t0 = *tb_ptr++;
663 t1 = tci_read_r(regs, &tb_ptr);
664 tci_write_reg(regs, t0, bswap16(t1));
665 break;
666 #endif
667 #if TCG_TARGET_HAS_bswap32_i32
668 case INDEX_op_bswap32_i32:
669 t0 = *tb_ptr++;
670 t1 = tci_read_r(regs, &tb_ptr);
671 tci_write_reg(regs, t0, bswap32(t1));
672 break;
673 #endif
674 #if TCG_TARGET_HAS_not_i32
675 case INDEX_op_not_i32:
676 t0 = *tb_ptr++;
677 t1 = tci_read_r(regs, &tb_ptr);
678 tci_write_reg(regs, t0, ~t1);
679 break;
680 #endif
681 #if TCG_TARGET_HAS_neg_i32
682 case INDEX_op_neg_i32:
683 t0 = *tb_ptr++;
684 t1 = tci_read_r(regs, &tb_ptr);
685 tci_write_reg(regs, t0, -t1);
686 break;
687 #endif
688 #if TCG_TARGET_REG_BITS == 64
689 case INDEX_op_mov_i64:
690 t0 = *tb_ptr++;
691 t1 = tci_read_r64(regs, &tb_ptr);
692 tci_write_reg(regs, t0, t1);
693 break;
694 case INDEX_op_tci_movi_i64:
695 t0 = *tb_ptr++;
696 t1 = tci_read_i64(&tb_ptr);
697 tci_write_reg(regs, t0, t1);
698 break;
699
700 /* Load/store operations (64 bit). */
701
702 case INDEX_op_ld32s_i64:
703 t0 = *tb_ptr++;
704 t1 = tci_read_r(regs, &tb_ptr);
705 t2 = tci_read_s32(&tb_ptr);
706 tci_write_reg(regs, t0, *(int32_t *)(t1 + t2));
707 break;
708 case INDEX_op_ld_i64:
709 t0 = *tb_ptr++;
710 t1 = tci_read_r(regs, &tb_ptr);
711 t2 = tci_read_s32(&tb_ptr);
712 tci_write_reg(regs, t0, *(uint64_t *)(t1 + t2));
713 break;
714 case INDEX_op_st_i64:
715 t0 = tci_read_r64(regs, &tb_ptr);
716 t1 = tci_read_r(regs, &tb_ptr);
717 t2 = tci_read_s32(&tb_ptr);
718 *(uint64_t *)(t1 + t2) = t0;
719 break;
720
721 /* Arithmetic operations (64 bit). */
722
723 case INDEX_op_add_i64:
724 t0 = *tb_ptr++;
725 t1 = tci_read_r64(regs, &tb_ptr);
726 t2 = tci_read_r64(regs, &tb_ptr);
727 tci_write_reg(regs, t0, t1 + t2);
728 break;
729 case INDEX_op_sub_i64:
730 t0 = *tb_ptr++;
731 t1 = tci_read_r64(regs, &tb_ptr);
732 t2 = tci_read_r64(regs, &tb_ptr);
733 tci_write_reg(regs, t0, t1 - t2);
734 break;
735 case INDEX_op_mul_i64:
736 t0 = *tb_ptr++;
737 t1 = tci_read_r64(regs, &tb_ptr);
738 t2 = tci_read_r64(regs, &tb_ptr);
739 tci_write_reg(regs, t0, t1 * t2);
740 break;
741 case INDEX_op_div_i64:
742 t0 = *tb_ptr++;
743 t1 = tci_read_r64(regs, &tb_ptr);
744 t2 = tci_read_r64(regs, &tb_ptr);
745 tci_write_reg(regs, t0, (int64_t)t1 / (int64_t)t2);
746 break;
747 case INDEX_op_divu_i64:
748 t0 = *tb_ptr++;
749 t1 = tci_read_r64(regs, &tb_ptr);
750 t2 = tci_read_r64(regs, &tb_ptr);
751 tci_write_reg(regs, t0, (uint64_t)t1 / (uint64_t)t2);
752 break;
753 case INDEX_op_rem_i64:
754 t0 = *tb_ptr++;
755 t1 = tci_read_r64(regs, &tb_ptr);
756 t2 = tci_read_r64(regs, &tb_ptr);
757 tci_write_reg(regs, t0, (int64_t)t1 % (int64_t)t2);
758 break;
759 case INDEX_op_remu_i64:
760 t0 = *tb_ptr++;
761 t1 = tci_read_r64(regs, &tb_ptr);
762 t2 = tci_read_r64(regs, &tb_ptr);
763 tci_write_reg(regs, t0, (uint64_t)t1 % (uint64_t)t2);
764 break;
765 case INDEX_op_and_i64:
766 t0 = *tb_ptr++;
767 t1 = tci_read_r64(regs, &tb_ptr);
768 t2 = tci_read_r64(regs, &tb_ptr);
769 tci_write_reg(regs, t0, t1 & t2);
770 break;
771 case INDEX_op_or_i64:
772 t0 = *tb_ptr++;
773 t1 = tci_read_r64(regs, &tb_ptr);
774 t2 = tci_read_r64(regs, &tb_ptr);
775 tci_write_reg(regs, t0, t1 | t2);
776 break;
777 case INDEX_op_xor_i64:
778 t0 = *tb_ptr++;
779 t1 = tci_read_r64(regs, &tb_ptr);
780 t2 = tci_read_r64(regs, &tb_ptr);
781 tci_write_reg(regs, t0, t1 ^ t2);
782 break;
783
784 /* Shift/rotate operations (64 bit). */
785
786 case INDEX_op_shl_i64:
787 t0 = *tb_ptr++;
788 t1 = tci_read_r64(regs, &tb_ptr);
789 t2 = tci_read_r64(regs, &tb_ptr);
790 tci_write_reg(regs, t0, t1 << (t2 & 63));
791 break;
792 case INDEX_op_shr_i64:
793 t0 = *tb_ptr++;
794 t1 = tci_read_r64(regs, &tb_ptr);
795 t2 = tci_read_r64(regs, &tb_ptr);
796 tci_write_reg(regs, t0, t1 >> (t2 & 63));
797 break;
798 case INDEX_op_sar_i64:
799 t0 = *tb_ptr++;
800 t1 = tci_read_r64(regs, &tb_ptr);
801 t2 = tci_read_r64(regs, &tb_ptr);
802 tci_write_reg(regs, t0, ((int64_t)t1 >> (t2 & 63)));
803 break;
804 #if TCG_TARGET_HAS_rot_i64
805 case INDEX_op_rotl_i64:
806 t0 = *tb_ptr++;
807 t1 = tci_read_r64(regs, &tb_ptr);
808 t2 = tci_read_r64(regs, &tb_ptr);
809 tci_write_reg(regs, t0, rol64(t1, t2 & 63));
810 break;
811 case INDEX_op_rotr_i64:
812 t0 = *tb_ptr++;
813 t1 = tci_read_r64(regs, &tb_ptr);
814 t2 = tci_read_r64(regs, &tb_ptr);
815 tci_write_reg(regs, t0, ror64(t1, t2 & 63));
816 break;
817 #endif
818 #if TCG_TARGET_HAS_deposit_i64
819 case INDEX_op_deposit_i64:
820 t0 = *tb_ptr++;
821 t1 = tci_read_r64(regs, &tb_ptr);
822 t2 = tci_read_r64(regs, &tb_ptr);
823 tmp16 = *tb_ptr++;
824 tmp8 = *tb_ptr++;
825 tmp64 = (((1ULL << tmp8) - 1) << tmp16);
826 tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
827 break;
828 #endif
829 case INDEX_op_brcond_i64:
830 t0 = tci_read_r64(regs, &tb_ptr);
831 t1 = tci_read_r64(regs, &tb_ptr);
832 condition = *tb_ptr++;
833 label = tci_read_label(&tb_ptr);
834 if (tci_compare64(t0, t1, condition)) {
835 tci_assert(tb_ptr == old_code_ptr + op_size);
836 tb_ptr = (uint8_t *)label;
837 continue;
838 }
839 break;
840 #if TCG_TARGET_HAS_ext8u_i64
841 case INDEX_op_ext8u_i64:
842 t0 = *tb_ptr++;
843 t1 = tci_read_r(regs, &tb_ptr);
844 tci_write_reg(regs, t0, (uint8_t)t1);
845 break;
846 #endif
847 #if TCG_TARGET_HAS_ext8s_i64
848 case INDEX_op_ext8s_i64:
849 t0 = *tb_ptr++;
850 t1 = tci_read_r(regs, &tb_ptr);
851 tci_write_reg(regs, t0, (int8_t)t1);
852 break;
853 #endif
854 #if TCG_TARGET_HAS_ext16s_i64
855 case INDEX_op_ext16s_i64:
856 t0 = *tb_ptr++;
857 t1 = tci_read_r(regs, &tb_ptr);
858 tci_write_reg(regs, t0, (int16_t)t1);
859 break;
860 #endif
861 #if TCG_TARGET_HAS_ext16u_i64
862 case INDEX_op_ext16u_i64:
863 t0 = *tb_ptr++;
864 t1 = tci_read_r(regs, &tb_ptr);
865 tci_write_reg(regs, t0, (uint16_t)t1);
866 break;
867 #endif
868 #if TCG_TARGET_HAS_ext32s_i64
869 case INDEX_op_ext32s_i64:
870 #endif
871 case INDEX_op_ext_i32_i64:
872 t0 = *tb_ptr++;
873 t1 = tci_read_r32s(regs, &tb_ptr);
874 tci_write_reg(regs, t0, t1);
875 break;
876 #if TCG_TARGET_HAS_ext32u_i64
877 case INDEX_op_ext32u_i64:
878 #endif
879 case INDEX_op_extu_i32_i64:
880 t0 = *tb_ptr++;
881 t1 = tci_read_r(regs, &tb_ptr);
882 tci_write_reg(regs, t0, (uint32_t)t1);
883 break;
884 #if TCG_TARGET_HAS_bswap16_i64
885 case INDEX_op_bswap16_i64:
886 t0 = *tb_ptr++;
887 t1 = tci_read_r(regs, &tb_ptr);
888 tci_write_reg(regs, t0, bswap16(t1));
889 break;
890 #endif
891 #if TCG_TARGET_HAS_bswap32_i64
892 case INDEX_op_bswap32_i64:
893 t0 = *tb_ptr++;
894 t1 = tci_read_r(regs, &tb_ptr);
895 tci_write_reg(regs, t0, bswap32(t1));
896 break;
897 #endif
898 #if TCG_TARGET_HAS_bswap64_i64
899 case INDEX_op_bswap64_i64:
900 t0 = *tb_ptr++;
901 t1 = tci_read_r64(regs, &tb_ptr);
902 tci_write_reg(regs, t0, bswap64(t1));
903 break;
904 #endif
905 #if TCG_TARGET_HAS_not_i64
906 case INDEX_op_not_i64:
907 t0 = *tb_ptr++;
908 t1 = tci_read_r64(regs, &tb_ptr);
909 tci_write_reg(regs, t0, ~t1);
910 break;
911 #endif
912 #if TCG_TARGET_HAS_neg_i64
913 case INDEX_op_neg_i64:
914 t0 = *tb_ptr++;
915 t1 = tci_read_r64(regs, &tb_ptr);
916 tci_write_reg(regs, t0, -t1);
917 break;
918 #endif
919 #endif /* TCG_TARGET_REG_BITS == 64 */
920
921 /* QEMU specific operations. */
922
923 case INDEX_op_exit_tb:
924 ret = *(uint64_t *)tb_ptr;
925 goto exit;
926 break;
927 case INDEX_op_goto_tb:
928 /* Jump address is aligned */
929 tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
930 t0 = qatomic_read((int32_t *)tb_ptr);
931 tb_ptr += sizeof(int32_t);
932 tci_assert(tb_ptr == old_code_ptr + op_size);
933 tb_ptr += (int32_t)t0;
934 continue;
935 case INDEX_op_qemu_ld_i32:
936 t0 = *tb_ptr++;
937 taddr = tci_read_ulong(regs, &tb_ptr);
938 oi = tci_read_i(&tb_ptr);
939 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
940 case MO_UB:
941 tmp32 = qemu_ld_ub;
942 break;
943 case MO_SB:
944 tmp32 = (int8_t)qemu_ld_ub;
945 break;
946 case MO_LEUW:
947 tmp32 = qemu_ld_leuw;
948 break;
949 case MO_LESW:
950 tmp32 = (int16_t)qemu_ld_leuw;
951 break;
952 case MO_LEUL:
953 tmp32 = qemu_ld_leul;
954 break;
955 case MO_BEUW:
956 tmp32 = qemu_ld_beuw;
957 break;
958 case MO_BESW:
959 tmp32 = (int16_t)qemu_ld_beuw;
960 break;
961 case MO_BEUL:
962 tmp32 = qemu_ld_beul;
963 break;
964 default:
965 g_assert_not_reached();
966 }
967 tci_write_reg(regs, t0, tmp32);
968 break;
969 case INDEX_op_qemu_ld_i64:
970 t0 = *tb_ptr++;
971 if (TCG_TARGET_REG_BITS == 32) {
972 t1 = *tb_ptr++;
973 }
974 taddr = tci_read_ulong(regs, &tb_ptr);
975 oi = tci_read_i(&tb_ptr);
976 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
977 case MO_UB:
978 tmp64 = qemu_ld_ub;
979 break;
980 case MO_SB:
981 tmp64 = (int8_t)qemu_ld_ub;
982 break;
983 case MO_LEUW:
984 tmp64 = qemu_ld_leuw;
985 break;
986 case MO_LESW:
987 tmp64 = (int16_t)qemu_ld_leuw;
988 break;
989 case MO_LEUL:
990 tmp64 = qemu_ld_leul;
991 break;
992 case MO_LESL:
993 tmp64 = (int32_t)qemu_ld_leul;
994 break;
995 case MO_LEQ:
996 tmp64 = qemu_ld_leq;
997 break;
998 case MO_BEUW:
999 tmp64 = qemu_ld_beuw;
1000 break;
1001 case MO_BESW:
1002 tmp64 = (int16_t)qemu_ld_beuw;
1003 break;
1004 case MO_BEUL:
1005 tmp64 = qemu_ld_beul;
1006 break;
1007 case MO_BESL:
1008 tmp64 = (int32_t)qemu_ld_beul;
1009 break;
1010 case MO_BEQ:
1011 tmp64 = qemu_ld_beq;
1012 break;
1013 default:
1014 g_assert_not_reached();
1015 }
1016 tci_write_reg(regs, t0, tmp64);
1017 if (TCG_TARGET_REG_BITS == 32) {
1018 tci_write_reg(regs, t1, tmp64 >> 32);
1019 }
1020 break;
1021 case INDEX_op_qemu_st_i32:
1022 t0 = tci_read_r(regs, &tb_ptr);
1023 taddr = tci_read_ulong(regs, &tb_ptr);
1024 oi = tci_read_i(&tb_ptr);
1025 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
1026 case MO_UB:
1027 qemu_st_b(t0);
1028 break;
1029 case MO_LEUW:
1030 qemu_st_lew(t0);
1031 break;
1032 case MO_LEUL:
1033 qemu_st_lel(t0);
1034 break;
1035 case MO_BEUW:
1036 qemu_st_bew(t0);
1037 break;
1038 case MO_BEUL:
1039 qemu_st_bel(t0);
1040 break;
1041 default:
1042 g_assert_not_reached();
1043 }
1044 break;
1045 case INDEX_op_qemu_st_i64:
1046 tmp64 = tci_read_r64(regs, &tb_ptr);
1047 taddr = tci_read_ulong(regs, &tb_ptr);
1048 oi = tci_read_i(&tb_ptr);
1049 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
1050 case MO_UB:
1051 qemu_st_b(tmp64);
1052 break;
1053 case MO_LEUW:
1054 qemu_st_lew(tmp64);
1055 break;
1056 case MO_LEUL:
1057 qemu_st_lel(tmp64);
1058 break;
1059 case MO_LEQ:
1060 qemu_st_leq(tmp64);
1061 break;
1062 case MO_BEUW:
1063 qemu_st_bew(tmp64);
1064 break;
1065 case MO_BEUL:
1066 qemu_st_bel(tmp64);
1067 break;
1068 case MO_BEQ:
1069 qemu_st_beq(tmp64);
1070 break;
1071 default:
1072 g_assert_not_reached();
1073 }
1074 break;
1075 case INDEX_op_mb:
1076 /* Ensure ordering for all kinds */
1077 smp_mb();
1078 break;
1079 default:
1080 g_assert_not_reached();
1081 }
1082 tci_assert(tb_ptr == old_code_ptr + op_size);
1083 }
1084 exit:
1085 return ret;
1086 }