]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/tci.c
tcg/tci: Split out tci_args_ri and tci_args_rI
[mirror_qemu.git] / tcg / tci.c
1 /*
2 * Tiny Code Interpreter for QEMU
3 *
4 * Copyright (c) 2009, 2011, 2016 Stefan Weil
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 /* Enable TCI assertions only when debugging TCG (and without NDEBUG defined).
23 * Without assertions, the interpreter runs much faster. */
24 #if defined(CONFIG_DEBUG_TCG)
25 # define tci_assert(cond) assert(cond)
26 #else
27 # define tci_assert(cond) ((void)0)
28 #endif
29
30 #include "qemu-common.h"
31 #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */
32 #include "exec/cpu_ldst.h"
33 #include "tcg/tcg-op.h"
34 #include "qemu/compiler.h"
35
36 #if MAX_OPC_PARAM_IARGS != 6
37 # error Fix needed, number of supported input arguments changed!
38 #endif
39 #if TCG_TARGET_REG_BITS == 32
40 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
41 tcg_target_ulong, tcg_target_ulong,
42 tcg_target_ulong, tcg_target_ulong,
43 tcg_target_ulong, tcg_target_ulong,
44 tcg_target_ulong, tcg_target_ulong,
45 tcg_target_ulong, tcg_target_ulong);
46 #else
47 typedef uint64_t (*helper_function)(tcg_target_ulong, tcg_target_ulong,
48 tcg_target_ulong, tcg_target_ulong,
49 tcg_target_ulong, tcg_target_ulong);
50 #endif
51
52 __thread uintptr_t tci_tb_ptr;
53
54 static tcg_target_ulong tci_read_reg(const tcg_target_ulong *regs, TCGReg index)
55 {
56 tci_assert(index < TCG_TARGET_NB_REGS);
57 return regs[index];
58 }
59
60 static void
61 tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value)
62 {
63 tci_assert(index < TCG_TARGET_NB_REGS);
64 tci_assert(index != TCG_AREG0);
65 tci_assert(index != TCG_REG_CALL_STACK);
66 regs[index] = value;
67 }
68
69 #if TCG_TARGET_REG_BITS == 32
70 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
71 uint32_t low_index, uint64_t value)
72 {
73 tci_write_reg(regs, low_index, value);
74 tci_write_reg(regs, high_index, value >> 32);
75 }
76 #endif
77
78 #if TCG_TARGET_REG_BITS == 32
79 /* Create a 64 bit value from two 32 bit values. */
80 static uint64_t tci_uint64(uint32_t high, uint32_t low)
81 {
82 return ((uint64_t)high << 32) + low;
83 }
84 #endif
85
86 /* Read constant byte from bytecode. */
87 static uint8_t tci_read_b(const uint8_t **tb_ptr)
88 {
89 return *(tb_ptr[0]++);
90 }
91
92 /* Read register number from bytecode. */
93 static TCGReg tci_read_r(const uint8_t **tb_ptr)
94 {
95 uint8_t regno = tci_read_b(tb_ptr);
96 tci_assert(regno < TCG_TARGET_NB_REGS);
97 return regno;
98 }
99
100 /* Read constant (native size) from bytecode. */
101 static tcg_target_ulong tci_read_i(const uint8_t **tb_ptr)
102 {
103 tcg_target_ulong value = *(const tcg_target_ulong *)(*tb_ptr);
104 *tb_ptr += sizeof(value);
105 return value;
106 }
107
108 /* Read unsigned constant (32 bit) from bytecode. */
109 static uint32_t tci_read_i32(const uint8_t **tb_ptr)
110 {
111 uint32_t value = *(const uint32_t *)(*tb_ptr);
112 *tb_ptr += sizeof(value);
113 return value;
114 }
115
116 /* Read signed constant (32 bit) from bytecode. */
117 static int32_t tci_read_s32(const uint8_t **tb_ptr)
118 {
119 int32_t value = *(const int32_t *)(*tb_ptr);
120 *tb_ptr += sizeof(value);
121 return value;
122 }
123
124 /* Read indexed register (native size) from bytecode. */
125 static tcg_target_ulong
126 tci_read_rval(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
127 {
128 tcg_target_ulong value = tci_read_reg(regs, **tb_ptr);
129 *tb_ptr += 1;
130 return value;
131 }
132
133 #if TCG_TARGET_REG_BITS == 32
134 /* Read two indexed registers (2 * 32 bit) from bytecode. */
135 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
136 const uint8_t **tb_ptr)
137 {
138 uint32_t low = tci_read_rval(regs, tb_ptr);
139 return tci_uint64(tci_read_rval(regs, tb_ptr), low);
140 }
141 #elif TCG_TARGET_REG_BITS == 64
142 /* Read indexed register (64 bit) from bytecode. */
143 static uint64_t tci_read_r64(const tcg_target_ulong *regs,
144 const uint8_t **tb_ptr)
145 {
146 return tci_read_rval(regs, tb_ptr);
147 }
148 #endif
149
150 /* Read indexed register(s) with target address from bytecode. */
151 static target_ulong
152 tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr)
153 {
154 target_ulong taddr = tci_read_rval(regs, tb_ptr);
155 #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS
156 taddr += (uint64_t)tci_read_rval(regs, tb_ptr) << 32;
157 #endif
158 return taddr;
159 }
160
161 static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr)
162 {
163 tcg_target_ulong label = tci_read_i(tb_ptr);
164 tci_assert(label != 0);
165 return label;
166 }
167
168 /*
169 * Load sets of arguments all at once. The naming convention is:
170 * tci_args_<arguments>
171 * where arguments is a sequence of
172 *
173 * c = condition (TCGCond)
174 * i = immediate (uint32_t)
175 * I = immediate (tcg_target_ulong)
176 * l = label or pointer
177 * r = register
178 * s = signed ldst offset
179 */
180
181 static void tci_args_l(const uint8_t **tb_ptr, void **l0)
182 {
183 *l0 = (void *)tci_read_label(tb_ptr);
184 }
185
186 static void tci_args_rr(const uint8_t **tb_ptr,
187 TCGReg *r0, TCGReg *r1)
188 {
189 *r0 = tci_read_r(tb_ptr);
190 *r1 = tci_read_r(tb_ptr);
191 }
192
193 static void tci_args_ri(const uint8_t **tb_ptr,
194 TCGReg *r0, tcg_target_ulong *i1)
195 {
196 *r0 = tci_read_r(tb_ptr);
197 *i1 = tci_read_i32(tb_ptr);
198 }
199
200 #if TCG_TARGET_REG_BITS == 64
201 static void tci_args_rI(const uint8_t **tb_ptr,
202 TCGReg *r0, tcg_target_ulong *i1)
203 {
204 *r0 = tci_read_r(tb_ptr);
205 *i1 = tci_read_i(tb_ptr);
206 }
207 #endif
208
209 static void tci_args_rrr(const uint8_t **tb_ptr,
210 TCGReg *r0, TCGReg *r1, TCGReg *r2)
211 {
212 *r0 = tci_read_r(tb_ptr);
213 *r1 = tci_read_r(tb_ptr);
214 *r2 = tci_read_r(tb_ptr);
215 }
216
217 static void tci_args_rrs(const uint8_t **tb_ptr,
218 TCGReg *r0, TCGReg *r1, int32_t *i2)
219 {
220 *r0 = tci_read_r(tb_ptr);
221 *r1 = tci_read_r(tb_ptr);
222 *i2 = tci_read_s32(tb_ptr);
223 }
224
225 static void tci_args_rrcl(const uint8_t **tb_ptr,
226 TCGReg *r0, TCGReg *r1, TCGCond *c2, void **l3)
227 {
228 *r0 = tci_read_r(tb_ptr);
229 *r1 = tci_read_r(tb_ptr);
230 *c2 = tci_read_b(tb_ptr);
231 *l3 = (void *)tci_read_label(tb_ptr);
232 }
233
234 static void tci_args_rrrc(const uint8_t **tb_ptr,
235 TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
236 {
237 *r0 = tci_read_r(tb_ptr);
238 *r1 = tci_read_r(tb_ptr);
239 *r2 = tci_read_r(tb_ptr);
240 *c3 = tci_read_b(tb_ptr);
241 }
242
243 #if TCG_TARGET_REG_BITS == 32
244 static void tci_args_rrrrcl(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
245 TCGReg *r2, TCGReg *r3, TCGCond *c4, void **l5)
246 {
247 *r0 = tci_read_r(tb_ptr);
248 *r1 = tci_read_r(tb_ptr);
249 *r2 = tci_read_r(tb_ptr);
250 *r3 = tci_read_r(tb_ptr);
251 *c4 = tci_read_b(tb_ptr);
252 *l5 = (void *)tci_read_label(tb_ptr);
253 }
254
255 static void tci_args_rrrrrc(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
256 TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
257 {
258 *r0 = tci_read_r(tb_ptr);
259 *r1 = tci_read_r(tb_ptr);
260 *r2 = tci_read_r(tb_ptr);
261 *r3 = tci_read_r(tb_ptr);
262 *r4 = tci_read_r(tb_ptr);
263 *c5 = tci_read_b(tb_ptr);
264 }
265 #endif
266
267 static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
268 {
269 bool result = false;
270 int32_t i0 = u0;
271 int32_t i1 = u1;
272 switch (condition) {
273 case TCG_COND_EQ:
274 result = (u0 == u1);
275 break;
276 case TCG_COND_NE:
277 result = (u0 != u1);
278 break;
279 case TCG_COND_LT:
280 result = (i0 < i1);
281 break;
282 case TCG_COND_GE:
283 result = (i0 >= i1);
284 break;
285 case TCG_COND_LE:
286 result = (i0 <= i1);
287 break;
288 case TCG_COND_GT:
289 result = (i0 > i1);
290 break;
291 case TCG_COND_LTU:
292 result = (u0 < u1);
293 break;
294 case TCG_COND_GEU:
295 result = (u0 >= u1);
296 break;
297 case TCG_COND_LEU:
298 result = (u0 <= u1);
299 break;
300 case TCG_COND_GTU:
301 result = (u0 > u1);
302 break;
303 default:
304 g_assert_not_reached();
305 }
306 return result;
307 }
308
309 static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
310 {
311 bool result = false;
312 int64_t i0 = u0;
313 int64_t i1 = u1;
314 switch (condition) {
315 case TCG_COND_EQ:
316 result = (u0 == u1);
317 break;
318 case TCG_COND_NE:
319 result = (u0 != u1);
320 break;
321 case TCG_COND_LT:
322 result = (i0 < i1);
323 break;
324 case TCG_COND_GE:
325 result = (i0 >= i1);
326 break;
327 case TCG_COND_LE:
328 result = (i0 <= i1);
329 break;
330 case TCG_COND_GT:
331 result = (i0 > i1);
332 break;
333 case TCG_COND_LTU:
334 result = (u0 < u1);
335 break;
336 case TCG_COND_GEU:
337 result = (u0 >= u1);
338 break;
339 case TCG_COND_LEU:
340 result = (u0 <= u1);
341 break;
342 case TCG_COND_GTU:
343 result = (u0 > u1);
344 break;
345 default:
346 g_assert_not_reached();
347 }
348 return result;
349 }
350
351 #define qemu_ld_ub \
352 cpu_ldub_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
353 #define qemu_ld_leuw \
354 cpu_lduw_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
355 #define qemu_ld_leul \
356 cpu_ldl_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
357 #define qemu_ld_leq \
358 cpu_ldq_le_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
359 #define qemu_ld_beuw \
360 cpu_lduw_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
361 #define qemu_ld_beul \
362 cpu_ldl_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
363 #define qemu_ld_beq \
364 cpu_ldq_be_mmuidx_ra(env, taddr, get_mmuidx(oi), (uintptr_t)tb_ptr)
365 #define qemu_st_b(X) \
366 cpu_stb_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
367 #define qemu_st_lew(X) \
368 cpu_stw_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
369 #define qemu_st_lel(X) \
370 cpu_stl_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
371 #define qemu_st_leq(X) \
372 cpu_stq_le_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
373 #define qemu_st_bew(X) \
374 cpu_stw_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
375 #define qemu_st_bel(X) \
376 cpu_stl_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
377 #define qemu_st_beq(X) \
378 cpu_stq_be_mmuidx_ra(env, taddr, X, get_mmuidx(oi), (uintptr_t)tb_ptr)
379
380 #if TCG_TARGET_REG_BITS == 64
381 # define CASE_32_64(x) \
382 case glue(glue(INDEX_op_, x), _i64): \
383 case glue(glue(INDEX_op_, x), _i32):
384 # define CASE_64(x) \
385 case glue(glue(INDEX_op_, x), _i64):
386 #else
387 # define CASE_32_64(x) \
388 case glue(glue(INDEX_op_, x), _i32):
389 # define CASE_64(x)
390 #endif
391
392 /* Interpret pseudo code in tb. */
393 /*
394 * Disable CFI checks.
395 * One possible operation in the pseudo code is a call to binary code.
396 * Therefore, disable CFI checks in the interpreter function
397 */
398 uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env,
399 const void *v_tb_ptr)
400 {
401 const uint8_t *tb_ptr = v_tb_ptr;
402 tcg_target_ulong regs[TCG_TARGET_NB_REGS];
403 long tcg_temps[CPU_TEMP_BUF_NLONGS];
404 uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS);
405 uintptr_t ret = 0;
406
407 regs[TCG_AREG0] = (tcg_target_ulong)env;
408 regs[TCG_REG_CALL_STACK] = sp_value;
409 tci_assert(tb_ptr);
410
411 for (;;) {
412 TCGOpcode opc = tb_ptr[0];
413 #if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
414 uint8_t op_size = tb_ptr[1];
415 const uint8_t *old_code_ptr = tb_ptr;
416 #endif
417 TCGReg r0, r1, r2;
418 tcg_target_ulong t0;
419 tcg_target_ulong t1;
420 tcg_target_ulong t2;
421 TCGCond condition;
422 target_ulong taddr;
423 uint8_t tmp8;
424 uint16_t tmp16;
425 uint32_t tmp32;
426 uint64_t tmp64;
427 #if TCG_TARGET_REG_BITS == 32
428 TCGReg r3, r4;
429 uint64_t T1, T2;
430 #endif
431 TCGMemOpIdx oi;
432 int32_t ofs;
433 void *ptr;
434
435 /* Skip opcode and size entry. */
436 tb_ptr += 2;
437
438 switch (opc) {
439 case INDEX_op_call:
440 t0 = tci_read_i(&tb_ptr);
441 tci_tb_ptr = (uintptr_t)tb_ptr;
442 #if TCG_TARGET_REG_BITS == 32
443 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
444 tci_read_reg(regs, TCG_REG_R1),
445 tci_read_reg(regs, TCG_REG_R2),
446 tci_read_reg(regs, TCG_REG_R3),
447 tci_read_reg(regs, TCG_REG_R4),
448 tci_read_reg(regs, TCG_REG_R5),
449 tci_read_reg(regs, TCG_REG_R6),
450 tci_read_reg(regs, TCG_REG_R7),
451 tci_read_reg(regs, TCG_REG_R8),
452 tci_read_reg(regs, TCG_REG_R9),
453 tci_read_reg(regs, TCG_REG_R10),
454 tci_read_reg(regs, TCG_REG_R11));
455 tci_write_reg(regs, TCG_REG_R0, tmp64);
456 tci_write_reg(regs, TCG_REG_R1, tmp64 >> 32);
457 #else
458 tmp64 = ((helper_function)t0)(tci_read_reg(regs, TCG_REG_R0),
459 tci_read_reg(regs, TCG_REG_R1),
460 tci_read_reg(regs, TCG_REG_R2),
461 tci_read_reg(regs, TCG_REG_R3),
462 tci_read_reg(regs, TCG_REG_R4),
463 tci_read_reg(regs, TCG_REG_R5));
464 tci_write_reg(regs, TCG_REG_R0, tmp64);
465 #endif
466 break;
467 case INDEX_op_br:
468 tci_args_l(&tb_ptr, &ptr);
469 tci_assert(tb_ptr == old_code_ptr + op_size);
470 tb_ptr = ptr;
471 continue;
472 case INDEX_op_setcond_i32:
473 tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
474 regs[r0] = tci_compare32(regs[r1], regs[r2], condition);
475 break;
476 #if TCG_TARGET_REG_BITS == 32
477 case INDEX_op_setcond2_i32:
478 tci_args_rrrrrc(&tb_ptr, &r0, &r1, &r2, &r3, &r4, &condition);
479 T1 = tci_uint64(regs[r2], regs[r1]);
480 T2 = tci_uint64(regs[r4], regs[r3]);
481 regs[r0] = tci_compare64(T1, T2, condition);
482 break;
483 #elif TCG_TARGET_REG_BITS == 64
484 case INDEX_op_setcond_i64:
485 tci_args_rrrc(&tb_ptr, &r0, &r1, &r2, &condition);
486 regs[r0] = tci_compare64(regs[r1], regs[r2], condition);
487 break;
488 #endif
489 CASE_32_64(mov)
490 tci_args_rr(&tb_ptr, &r0, &r1);
491 regs[r0] = regs[r1];
492 break;
493 case INDEX_op_tci_movi_i32:
494 tci_args_ri(&tb_ptr, &r0, &t1);
495 regs[r0] = t1;
496 break;
497
498 /* Load/store operations (32 bit). */
499
500 CASE_32_64(ld8u)
501 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
502 ptr = (void *)(regs[r1] + ofs);
503 regs[r0] = *(uint8_t *)ptr;
504 break;
505 CASE_32_64(ld8s)
506 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
507 ptr = (void *)(regs[r1] + ofs);
508 regs[r0] = *(int8_t *)ptr;
509 break;
510 CASE_32_64(ld16u)
511 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
512 ptr = (void *)(regs[r1] + ofs);
513 regs[r0] = *(uint16_t *)ptr;
514 break;
515 CASE_32_64(ld16s)
516 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
517 ptr = (void *)(regs[r1] + ofs);
518 regs[r0] = *(int16_t *)ptr;
519 break;
520 case INDEX_op_ld_i32:
521 CASE_64(ld32u)
522 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
523 ptr = (void *)(regs[r1] + ofs);
524 regs[r0] = *(uint32_t *)ptr;
525 break;
526 CASE_32_64(st8)
527 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
528 ptr = (void *)(regs[r1] + ofs);
529 *(uint8_t *)ptr = regs[r0];
530 break;
531 CASE_32_64(st16)
532 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
533 ptr = (void *)(regs[r1] + ofs);
534 *(uint16_t *)ptr = regs[r0];
535 break;
536 case INDEX_op_st_i32:
537 CASE_64(st32)
538 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
539 ptr = (void *)(regs[r1] + ofs);
540 *(uint32_t *)ptr = regs[r0];
541 break;
542
543 /* Arithmetic operations (mixed 32/64 bit). */
544
545 CASE_32_64(add)
546 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
547 regs[r0] = regs[r1] + regs[r2];
548 break;
549 CASE_32_64(sub)
550 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
551 regs[r0] = regs[r1] - regs[r2];
552 break;
553 CASE_32_64(mul)
554 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
555 regs[r0] = regs[r1] * regs[r2];
556 break;
557 CASE_32_64(and)
558 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
559 regs[r0] = regs[r1] & regs[r2];
560 break;
561 CASE_32_64(or)
562 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
563 regs[r0] = regs[r1] | regs[r2];
564 break;
565 CASE_32_64(xor)
566 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
567 regs[r0] = regs[r1] ^ regs[r2];
568 break;
569
570 /* Arithmetic operations (32 bit). */
571
572 case INDEX_op_div_i32:
573 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
574 regs[r0] = (int32_t)regs[r1] / (int32_t)regs[r2];
575 break;
576 case INDEX_op_divu_i32:
577 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
578 regs[r0] = (uint32_t)regs[r1] / (uint32_t)regs[r2];
579 break;
580 case INDEX_op_rem_i32:
581 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
582 regs[r0] = (int32_t)regs[r1] % (int32_t)regs[r2];
583 break;
584 case INDEX_op_remu_i32:
585 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
586 regs[r0] = (uint32_t)regs[r1] % (uint32_t)regs[r2];
587 break;
588
589 /* Shift/rotate operations (32 bit). */
590
591 case INDEX_op_shl_i32:
592 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
593 regs[r0] = (uint32_t)regs[r1] << (regs[r2] & 31);
594 break;
595 case INDEX_op_shr_i32:
596 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
597 regs[r0] = (uint32_t)regs[r1] >> (regs[r2] & 31);
598 break;
599 case INDEX_op_sar_i32:
600 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
601 regs[r0] = (int32_t)regs[r1] >> (regs[r2] & 31);
602 break;
603 #if TCG_TARGET_HAS_rot_i32
604 case INDEX_op_rotl_i32:
605 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
606 regs[r0] = rol32(regs[r1], regs[r2] & 31);
607 break;
608 case INDEX_op_rotr_i32:
609 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
610 regs[r0] = ror32(regs[r1], regs[r2] & 31);
611 break;
612 #endif
613 #if TCG_TARGET_HAS_deposit_i32
614 case INDEX_op_deposit_i32:
615 t0 = *tb_ptr++;
616 t1 = tci_read_rval(regs, &tb_ptr);
617 t2 = tci_read_rval(regs, &tb_ptr);
618 tmp16 = *tb_ptr++;
619 tmp8 = *tb_ptr++;
620 tmp32 = (((1 << tmp8) - 1) << tmp16);
621 tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
622 break;
623 #endif
624 case INDEX_op_brcond_i32:
625 tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
626 if (tci_compare32(regs[r0], regs[r1], condition)) {
627 tci_assert(tb_ptr == old_code_ptr + op_size);
628 tb_ptr = ptr;
629 continue;
630 }
631 break;
632 #if TCG_TARGET_REG_BITS == 32
633 case INDEX_op_add2_i32:
634 t0 = *tb_ptr++;
635 t1 = *tb_ptr++;
636 tmp64 = tci_read_r64(regs, &tb_ptr);
637 tmp64 += tci_read_r64(regs, &tb_ptr);
638 tci_write_reg64(regs, t1, t0, tmp64);
639 break;
640 case INDEX_op_sub2_i32:
641 t0 = *tb_ptr++;
642 t1 = *tb_ptr++;
643 tmp64 = tci_read_r64(regs, &tb_ptr);
644 tmp64 -= tci_read_r64(regs, &tb_ptr);
645 tci_write_reg64(regs, t1, t0, tmp64);
646 break;
647 case INDEX_op_brcond2_i32:
648 tci_args_rrrrcl(&tb_ptr, &r0, &r1, &r2, &r3, &condition, &ptr);
649 T1 = tci_uint64(regs[r1], regs[r0]);
650 T2 = tci_uint64(regs[r3], regs[r2]);
651 if (tci_compare64(T1, T2, condition)) {
652 tci_assert(tb_ptr == old_code_ptr + op_size);
653 tb_ptr = ptr;
654 continue;
655 }
656 break;
657 case INDEX_op_mulu2_i32:
658 t0 = *tb_ptr++;
659 t1 = *tb_ptr++;
660 t2 = tci_read_rval(regs, &tb_ptr);
661 tmp64 = (uint32_t)tci_read_rval(regs, &tb_ptr);
662 tci_write_reg64(regs, t1, t0, (uint32_t)t2 * tmp64);
663 break;
664 #endif /* TCG_TARGET_REG_BITS == 32 */
665 #if TCG_TARGET_HAS_ext8s_i32 || TCG_TARGET_HAS_ext8s_i64
666 CASE_32_64(ext8s)
667 tci_args_rr(&tb_ptr, &r0, &r1);
668 regs[r0] = (int8_t)regs[r1];
669 break;
670 #endif
671 #if TCG_TARGET_HAS_ext16s_i32 || TCG_TARGET_HAS_ext16s_i64
672 CASE_32_64(ext16s)
673 tci_args_rr(&tb_ptr, &r0, &r1);
674 regs[r0] = (int16_t)regs[r1];
675 break;
676 #endif
677 #if TCG_TARGET_HAS_ext8u_i32 || TCG_TARGET_HAS_ext8u_i64
678 CASE_32_64(ext8u)
679 tci_args_rr(&tb_ptr, &r0, &r1);
680 regs[r0] = (uint8_t)regs[r1];
681 break;
682 #endif
683 #if TCG_TARGET_HAS_ext16u_i32 || TCG_TARGET_HAS_ext16u_i64
684 CASE_32_64(ext16u)
685 tci_args_rr(&tb_ptr, &r0, &r1);
686 regs[r0] = (uint16_t)regs[r1];
687 break;
688 #endif
689 #if TCG_TARGET_HAS_bswap16_i32 || TCG_TARGET_HAS_bswap16_i64
690 CASE_32_64(bswap16)
691 tci_args_rr(&tb_ptr, &r0, &r1);
692 regs[r0] = bswap16(regs[r1]);
693 break;
694 #endif
695 #if TCG_TARGET_HAS_bswap32_i32 || TCG_TARGET_HAS_bswap32_i64
696 CASE_32_64(bswap32)
697 tci_args_rr(&tb_ptr, &r0, &r1);
698 regs[r0] = bswap32(regs[r1]);
699 break;
700 #endif
701 #if TCG_TARGET_HAS_not_i32 || TCG_TARGET_HAS_not_i64
702 CASE_32_64(not)
703 tci_args_rr(&tb_ptr, &r0, &r1);
704 regs[r0] = ~regs[r1];
705 break;
706 #endif
707 #if TCG_TARGET_HAS_neg_i32 || TCG_TARGET_HAS_neg_i64
708 CASE_32_64(neg)
709 tci_args_rr(&tb_ptr, &r0, &r1);
710 regs[r0] = -regs[r1];
711 break;
712 #endif
713 #if TCG_TARGET_REG_BITS == 64
714 case INDEX_op_tci_movi_i64:
715 tci_args_rI(&tb_ptr, &r0, &t1);
716 regs[r0] = t1;
717 break;
718
719 /* Load/store operations (64 bit). */
720
721 case INDEX_op_ld32s_i64:
722 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
723 ptr = (void *)(regs[r1] + ofs);
724 regs[r0] = *(int32_t *)ptr;
725 break;
726 case INDEX_op_ld_i64:
727 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
728 ptr = (void *)(regs[r1] + ofs);
729 regs[r0] = *(uint64_t *)ptr;
730 break;
731 case INDEX_op_st_i64:
732 tci_args_rrs(&tb_ptr, &r0, &r1, &ofs);
733 ptr = (void *)(regs[r1] + ofs);
734 *(uint64_t *)ptr = regs[r0];
735 break;
736
737 /* Arithmetic operations (64 bit). */
738
739 case INDEX_op_div_i64:
740 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
741 regs[r0] = (int64_t)regs[r1] / (int64_t)regs[r2];
742 break;
743 case INDEX_op_divu_i64:
744 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
745 regs[r0] = (uint64_t)regs[r1] / (uint64_t)regs[r2];
746 break;
747 case INDEX_op_rem_i64:
748 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
749 regs[r0] = (int64_t)regs[r1] % (int64_t)regs[r2];
750 break;
751 case INDEX_op_remu_i64:
752 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
753 regs[r0] = (uint64_t)regs[r1] % (uint64_t)regs[r2];
754 break;
755
756 /* Shift/rotate operations (64 bit). */
757
758 case INDEX_op_shl_i64:
759 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
760 regs[r0] = regs[r1] << (regs[r2] & 63);
761 break;
762 case INDEX_op_shr_i64:
763 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
764 regs[r0] = regs[r1] >> (regs[r2] & 63);
765 break;
766 case INDEX_op_sar_i64:
767 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
768 regs[r0] = (int64_t)regs[r1] >> (regs[r2] & 63);
769 break;
770 #if TCG_TARGET_HAS_rot_i64
771 case INDEX_op_rotl_i64:
772 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
773 regs[r0] = rol64(regs[r1], regs[r2] & 63);
774 break;
775 case INDEX_op_rotr_i64:
776 tci_args_rrr(&tb_ptr, &r0, &r1, &r2);
777 regs[r0] = ror64(regs[r1], regs[r2] & 63);
778 break;
779 #endif
780 #if TCG_TARGET_HAS_deposit_i64
781 case INDEX_op_deposit_i64:
782 t0 = *tb_ptr++;
783 t1 = tci_read_rval(regs, &tb_ptr);
784 t2 = tci_read_rval(regs, &tb_ptr);
785 tmp16 = *tb_ptr++;
786 tmp8 = *tb_ptr++;
787 tmp64 = (((1ULL << tmp8) - 1) << tmp16);
788 tci_write_reg(regs, t0, (t1 & ~tmp64) | ((t2 << tmp16) & tmp64));
789 break;
790 #endif
791 case INDEX_op_brcond_i64:
792 tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
793 if (tci_compare64(regs[r0], regs[r1], condition)) {
794 tci_assert(tb_ptr == old_code_ptr + op_size);
795 tb_ptr = ptr;
796 continue;
797 }
798 break;
799 case INDEX_op_ext32s_i64:
800 case INDEX_op_ext_i32_i64:
801 tci_args_rr(&tb_ptr, &r0, &r1);
802 regs[r0] = (int32_t)regs[r1];
803 break;
804 case INDEX_op_ext32u_i64:
805 case INDEX_op_extu_i32_i64:
806 tci_args_rr(&tb_ptr, &r0, &r1);
807 regs[r0] = (uint32_t)regs[r1];
808 break;
809 #if TCG_TARGET_HAS_bswap64_i64
810 case INDEX_op_bswap64_i64:
811 tci_args_rr(&tb_ptr, &r0, &r1);
812 regs[r0] = bswap64(regs[r1]);
813 break;
814 #endif
815 #endif /* TCG_TARGET_REG_BITS == 64 */
816
817 /* QEMU specific operations. */
818
819 case INDEX_op_exit_tb:
820 ret = *(uint64_t *)tb_ptr;
821 goto exit;
822 break;
823 case INDEX_op_goto_tb:
824 /* Jump address is aligned */
825 tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4);
826 t0 = qatomic_read((int32_t *)tb_ptr);
827 tb_ptr += sizeof(int32_t);
828 tci_assert(tb_ptr == old_code_ptr + op_size);
829 tb_ptr += (int32_t)t0;
830 continue;
831 case INDEX_op_qemu_ld_i32:
832 t0 = *tb_ptr++;
833 taddr = tci_read_ulong(regs, &tb_ptr);
834 oi = tci_read_i(&tb_ptr);
835 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
836 case MO_UB:
837 tmp32 = qemu_ld_ub;
838 break;
839 case MO_SB:
840 tmp32 = (int8_t)qemu_ld_ub;
841 break;
842 case MO_LEUW:
843 tmp32 = qemu_ld_leuw;
844 break;
845 case MO_LESW:
846 tmp32 = (int16_t)qemu_ld_leuw;
847 break;
848 case MO_LEUL:
849 tmp32 = qemu_ld_leul;
850 break;
851 case MO_BEUW:
852 tmp32 = qemu_ld_beuw;
853 break;
854 case MO_BESW:
855 tmp32 = (int16_t)qemu_ld_beuw;
856 break;
857 case MO_BEUL:
858 tmp32 = qemu_ld_beul;
859 break;
860 default:
861 g_assert_not_reached();
862 }
863 tci_write_reg(regs, t0, tmp32);
864 break;
865 case INDEX_op_qemu_ld_i64:
866 t0 = *tb_ptr++;
867 if (TCG_TARGET_REG_BITS == 32) {
868 t1 = *tb_ptr++;
869 }
870 taddr = tci_read_ulong(regs, &tb_ptr);
871 oi = tci_read_i(&tb_ptr);
872 switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) {
873 case MO_UB:
874 tmp64 = qemu_ld_ub;
875 break;
876 case MO_SB:
877 tmp64 = (int8_t)qemu_ld_ub;
878 break;
879 case MO_LEUW:
880 tmp64 = qemu_ld_leuw;
881 break;
882 case MO_LESW:
883 tmp64 = (int16_t)qemu_ld_leuw;
884 break;
885 case MO_LEUL:
886 tmp64 = qemu_ld_leul;
887 break;
888 case MO_LESL:
889 tmp64 = (int32_t)qemu_ld_leul;
890 break;
891 case MO_LEQ:
892 tmp64 = qemu_ld_leq;
893 break;
894 case MO_BEUW:
895 tmp64 = qemu_ld_beuw;
896 break;
897 case MO_BESW:
898 tmp64 = (int16_t)qemu_ld_beuw;
899 break;
900 case MO_BEUL:
901 tmp64 = qemu_ld_beul;
902 break;
903 case MO_BESL:
904 tmp64 = (int32_t)qemu_ld_beul;
905 break;
906 case MO_BEQ:
907 tmp64 = qemu_ld_beq;
908 break;
909 default:
910 g_assert_not_reached();
911 }
912 tci_write_reg(regs, t0, tmp64);
913 if (TCG_TARGET_REG_BITS == 32) {
914 tci_write_reg(regs, t1, tmp64 >> 32);
915 }
916 break;
917 case INDEX_op_qemu_st_i32:
918 t0 = tci_read_rval(regs, &tb_ptr);
919 taddr = tci_read_ulong(regs, &tb_ptr);
920 oi = tci_read_i(&tb_ptr);
921 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
922 case MO_UB:
923 qemu_st_b(t0);
924 break;
925 case MO_LEUW:
926 qemu_st_lew(t0);
927 break;
928 case MO_LEUL:
929 qemu_st_lel(t0);
930 break;
931 case MO_BEUW:
932 qemu_st_bew(t0);
933 break;
934 case MO_BEUL:
935 qemu_st_bel(t0);
936 break;
937 default:
938 g_assert_not_reached();
939 }
940 break;
941 case INDEX_op_qemu_st_i64:
942 tmp64 = tci_read_r64(regs, &tb_ptr);
943 taddr = tci_read_ulong(regs, &tb_ptr);
944 oi = tci_read_i(&tb_ptr);
945 switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) {
946 case MO_UB:
947 qemu_st_b(tmp64);
948 break;
949 case MO_LEUW:
950 qemu_st_lew(tmp64);
951 break;
952 case MO_LEUL:
953 qemu_st_lel(tmp64);
954 break;
955 case MO_LEQ:
956 qemu_st_leq(tmp64);
957 break;
958 case MO_BEUW:
959 qemu_st_bew(tmp64);
960 break;
961 case MO_BEUL:
962 qemu_st_bel(tmp64);
963 break;
964 case MO_BEQ:
965 qemu_st_beq(tmp64);
966 break;
967 default:
968 g_assert_not_reached();
969 }
970 break;
971 case INDEX_op_mb:
972 /* Ensure ordering for all kinds */
973 smp_mb();
974 break;
975 default:
976 g_assert_not_reached();
977 }
978 tci_assert(tb_ptr == old_code_ptr + op_size);
979 }
980 exit:
981 return ret;
982 }